init
This commit is contained in:
commit
444f800536
122 changed files with 17137 additions and 0 deletions
151
shared/Scene/MergeUtil.cpp
Normal file
151
shared/Scene/MergeUtil.cpp
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
#include "shared/Scene/MergeUtil.h"
|
||||
#include "shared/Scene/Scene.h"
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
static uint32_t shiftMeshIndices(MeshData& meshData, const std::vector<uint32_t>& meshesToMerge)
|
||||
{
|
||||
uint32_t minVtxOffset = std::numeric_limits<uint32_t>::max();
|
||||
|
||||
for (uint32_t i : meshesToMerge)
|
||||
minVtxOffset = std::min(meshData.meshes[i].vertexOffset, minVtxOffset);
|
||||
|
||||
uint32_t mergeCount = 0u; // calculated by summing index counts in meshesToMerge
|
||||
|
||||
// now shift all the indices in individual index blocks [use minVtxOffset]
|
||||
for (uint32_t i : meshesToMerge) {
|
||||
Mesh& m = meshData.meshes[i];
|
||||
// for how much should we shift the indices in mesh [m]
|
||||
const uint32_t delta = m.vertexOffset - minVtxOffset;
|
||||
const uint32_t idxCount = m.getLODIndicesCount(0);
|
||||
for (uint32_t ii = 0u; ii < idxCount; ii++)
|
||||
meshData.indexData[m.indexOffset + ii] += delta;
|
||||
|
||||
m.vertexOffset = minVtxOffset;
|
||||
|
||||
// sum all the deleted meshes' indices
|
||||
mergeCount += idxCount;
|
||||
}
|
||||
|
||||
return meshData.indexData.size() - mergeCount;
|
||||
}
|
||||
|
||||
// All the meshesToMerge now have the same vertexOffset and individual index values are shifted by appropriate amount
|
||||
// Here we move all the indices to appropriate places in the new index array
|
||||
static void mergeIndexArray(MeshData& md, const std::vector<uint32_t>& meshesToMerge, std::unordered_map<uint32_t, uint32_t>& oldToNew)
|
||||
{
|
||||
std::vector<uint32_t> newIndices(md.indexData.size());
|
||||
// Two offsets in the new indices array (one begins at the start, the second one after all the copied indices)
|
||||
uint32_t copyOffset = 0;
|
||||
uint32_t mergeOffset = shiftMeshIndices(md, meshesToMerge);
|
||||
|
||||
const size_t mergedMeshIndex = md.meshes.size() - meshesToMerge.size();
|
||||
uint32_t newIndex = 0u;
|
||||
for (size_t midx = 0u; midx < md.meshes.size(); midx++) {
|
||||
const bool shouldMerge = std::binary_search(meshesToMerge.begin(), meshesToMerge.end(), midx);
|
||||
|
||||
oldToNew[midx] = shouldMerge ? mergedMeshIndex : newIndex;
|
||||
newIndex += shouldMerge ? 0 : 1;
|
||||
|
||||
Mesh& mesh = md.meshes[midx];
|
||||
const uint32_t idxCount = mesh.getLODIndicesCount(0);
|
||||
// move all indices to the new array at mergeOffset
|
||||
const auto start = md.indexData.begin() + mesh.indexOffset;
|
||||
mesh.indexOffset = copyOffset;
|
||||
uint32_t* const offsetPtr = shouldMerge ? &mergeOffset : ©Offset;
|
||||
std::copy(start, start + idxCount, newIndices.begin() + *offsetPtr);
|
||||
*offsetPtr += idxCount;
|
||||
}
|
||||
|
||||
md.indexData = newIndices;
|
||||
|
||||
// all the merged indices are now in lastMesh
|
||||
Mesh lastMesh = md.meshes[meshesToMerge[0]];
|
||||
lastMesh.indexOffset = copyOffset;
|
||||
lastMesh.lodOffset[0] = copyOffset;
|
||||
lastMesh.lodOffset[1] = mergeOffset;
|
||||
lastMesh.lodCount = 1;
|
||||
md.meshes.push_back(lastMesh);
|
||||
}
|
||||
|
||||
void mergeNodesWithMaterial(Scene& scene, MeshData& meshData, const std::string& materialName)
|
||||
{
|
||||
// Find material index
|
||||
const int oldMaterial = (int)std::distance(
|
||||
std::begin(scene.materialNames), std::find(std::begin(scene.materialNames), std::end(scene.materialNames), materialName));
|
||||
|
||||
std::vector<uint32_t> toDelete;
|
||||
|
||||
for (size_t i = 0u; i < scene.hierarchy.size(); i++)
|
||||
if (scene.meshForNode.contains(i) && scene.materialForNode.contains(i) && (scene.materialForNode.at(i) == oldMaterial))
|
||||
toDelete.push_back(i);
|
||||
|
||||
std::vector<uint32_t> meshesToMerge(toDelete.size());
|
||||
|
||||
// Convert toDelete indices to mesh indices
|
||||
std::transform(toDelete.begin(), toDelete.end(), meshesToMerge.begin(), [&scene](uint32_t i) { return scene.meshForNode.at(i); });
|
||||
|
||||
// TODO: if merged mesh transforms are non-zero, then we should pre-transform individual mesh vertices in meshData using local transform
|
||||
|
||||
// old-to-new mesh indices
|
||||
std::unordered_map<uint32_t, uint32_t> oldToNew;
|
||||
|
||||
// now move all the meshesToMerge to the end of array
|
||||
mergeIndexArray(meshData, meshesToMerge, oldToNew);
|
||||
|
||||
// cutoff all but one of the merged meshes (insert the last saved mesh from meshesToMerge - they are all the same)
|
||||
eraseSelected(meshData.meshes, meshesToMerge);
|
||||
|
||||
for (auto& n : scene.meshForNode)
|
||||
n.second = oldToNew[n.second];
|
||||
|
||||
// reattach the node with merged meshes [identity transforms are assumed]
|
||||
int newNode = addNode(scene, 0, 1);
|
||||
scene.meshForNode[newNode] = (int)meshData.meshes.size() - 1;
|
||||
scene.materialForNode[newNode] = (uint32_t)oldMaterial;
|
||||
|
||||
deleteSceneNodes(scene, toDelete);
|
||||
}
|
||||
|
||||
void mergeMaterialLists(
|
||||
const std::vector<std::vector<Material>*>& oldMaterials, const std::vector<std::vector<std::string>*>& oldTextures,
|
||||
std::vector<Material>& allMaterials, std::vector<std::string>& newTextures)
|
||||
{
|
||||
// map texture names to indices in newTexturesList (calculated as we fill the newTexturesList)
|
||||
std::unordered_map<std::string, int> newTextureNames;
|
||||
std::unordered_map<size_t, size_t> materialToTextureList; // use the index of Material in the allMaterials array
|
||||
|
||||
// create a combined material list [no hashing of materials, just straightforward merging of all lists]
|
||||
for (size_t midx = 0; midx != oldMaterials.size(); midx++) {
|
||||
for (const Material& m : *oldMaterials[midx]) {
|
||||
allMaterials.push_back(m);
|
||||
materialToTextureList[allMaterials.size() - 1] = midx;
|
||||
}
|
||||
}
|
||||
|
||||
// create one combined texture list
|
||||
for (const std::vector<std::string>* tl : oldTextures) {
|
||||
for (const std::string& file : *tl) {
|
||||
newTextureNames[file] = addUnique(newTextures, file);
|
||||
}
|
||||
}
|
||||
|
||||
// a lambda to replace textureID by a new "version" (from the global list)
|
||||
auto replaceTexture = [&materialToTextureList, &oldTextures, &newTextureNames](int mtlId, int* textureID) {
|
||||
if (*textureID == -1)
|
||||
return;
|
||||
|
||||
const size_t listIdx = materialToTextureList[mtlId];
|
||||
const std::vector<std::string>& texList = *oldTextures[listIdx];
|
||||
const std::string& texFile = texList[*textureID];
|
||||
*textureID = newTextureNames[texFile];
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < allMaterials.size(); i++) {
|
||||
Material& m = allMaterials[i];
|
||||
replaceTexture(i, &m.baseColorTexture);
|
||||
replaceTexture(i, &m.emissiveTexture);
|
||||
replaceTexture(i, &m.normalTexture);
|
||||
replaceTexture(i, &m.opacityTexture);
|
||||
}
|
||||
}
|
||||
16
shared/Scene/MergeUtil.h
Normal file
16
shared/Scene/MergeUtil.h
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
#pragma once
|
||||
|
||||
#include "shared/Scene/Scene.h"
|
||||
#include "shared/Scene/VtxData.h"
|
||||
|
||||
void mergeNodesWithMaterial(Scene& scene, MeshData& meshData, const std::string& materialName);
|
||||
|
||||
// Merge material lists from multiple scenes (follows the logic of merging in mergeScenes)
|
||||
void mergeMaterialLists(
|
||||
// Input:
|
||||
const std::vector<std::vector<Material>*>& oldMaterials, // all materials
|
||||
const std::vector<std::vector<std::string>*>& oldTextures, // all textures from all material lists
|
||||
// Output:
|
||||
std::vector<Material>& allMaterials,
|
||||
std::vector<std::string>& newTextures // all textures (merged from oldTextures, only unique items)
|
||||
);
|
||||
493
shared/Scene/Scene.cpp
Normal file
493
shared/Scene/Scene.cpp
Normal file
|
|
@ -0,0 +1,493 @@
|
|||
#include "shared/Scene/Scene.h"
|
||||
#include "shared/Utils.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
|
||||
int addNode(Scene &scene, int parent, int level) {
|
||||
const int node = (int)scene.hierarchy.size();
|
||||
{
|
||||
// TODO: resize aux arrays (local/global etc.)
|
||||
scene.localTransform.push_back(glm::mat4(1.0f));
|
||||
scene.globalTransform.push_back(glm::mat4(1.0f));
|
||||
}
|
||||
scene.hierarchy.push_back({.parent = parent, .lastSibling = -1});
|
||||
if (parent > -1) {
|
||||
// find first item (sibling)
|
||||
const int s = scene.hierarchy[parent].firstChild;
|
||||
if (s == -1) {
|
||||
scene.hierarchy[parent].firstChild = node;
|
||||
scene.hierarchy[node].lastSibling = node;
|
||||
} else {
|
||||
int dest = scene.hierarchy[s].lastSibling;
|
||||
if (dest <= -1) {
|
||||
// no cached lastSibling, iterate nextSibling indices
|
||||
for (dest = s; scene.hierarchy[dest].nextSibling != -1;
|
||||
dest = scene.hierarchy[dest].nextSibling)
|
||||
;
|
||||
}
|
||||
scene.hierarchy[dest].nextSibling = node;
|
||||
scene.hierarchy[s].lastSibling = node;
|
||||
}
|
||||
}
|
||||
scene.hierarchy[node].level = level;
|
||||
scene.hierarchy[node].nextSibling = -1;
|
||||
scene.hierarchy[node].firstChild = -1;
|
||||
return node;
|
||||
}
|
||||
|
||||
void markAsChanged(Scene &scene, int node) {
|
||||
const int level = scene.hierarchy[node].level;
|
||||
scene.changedAtThisFrame[level].push_back(node);
|
||||
|
||||
// TODO: use non-recursive iteration with aux stack
|
||||
for (int s = scene.hierarchy[node].firstChild; s != -1;
|
||||
s = scene.hierarchy[s].nextSibling) {
|
||||
markAsChanged(scene, s);
|
||||
}
|
||||
}
|
||||
|
||||
int findNodeByName(const Scene &scene, const std::string &name) {
|
||||
// Extremely simple linear search without any hierarchy reference
|
||||
// To support DFS/BFS searches separate traversal routines are needed
|
||||
|
||||
for (size_t i = 0; i < scene.localTransform.size(); i++)
|
||||
if (scene.nameForNode.contains(i)) {
|
||||
int strID = scene.nameForNode.at(i);
|
||||
if (strID > -1)
|
||||
if (scene.nodeNames[strID] == name)
|
||||
return (int)i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool mat4IsIdentity(const glm::mat4 &m);
|
||||
void fprintfMat4(FILE *f, const glm::mat4 &m);
|
||||
|
||||
// CPU version of global transform update []
|
||||
bool recalculateGlobalTransforms(Scene &scene) {
|
||||
bool wasUpdated = false;
|
||||
|
||||
if (!scene.changedAtThisFrame[0].empty()) {
|
||||
const int c = scene.changedAtThisFrame[0][0];
|
||||
scene.globalTransform[c] = scene.localTransform[c];
|
||||
scene.changedAtThisFrame[0].clear();
|
||||
wasUpdated = true;
|
||||
}
|
||||
|
||||
for (int i = 1; i < MAX_NODE_LEVEL; i++) {
|
||||
for (int c : scene.changedAtThisFrame[i]) {
|
||||
const int p = scene.hierarchy[c].parent;
|
||||
scene.globalTransform[c] =
|
||||
scene.globalTransform[p] * scene.localTransform[c];
|
||||
}
|
||||
wasUpdated |= !scene.changedAtThisFrame[i].empty();
|
||||
scene.changedAtThisFrame[i].clear();
|
||||
}
|
||||
|
||||
return wasUpdated;
|
||||
}
|
||||
|
||||
void loadMap(FILE *f, std::unordered_map<uint32_t, uint32_t> &map) {
|
||||
std::vector<uint32_t> ms;
|
||||
|
||||
uint32_t sz = 0;
|
||||
fread(&sz, 1, sizeof(sz), f);
|
||||
|
||||
ms.resize(sz);
|
||||
fread(ms.data(), sizeof(uint32_t), sz, f);
|
||||
|
||||
for (size_t i = 0; i < (sz / 2); i++)
|
||||
map[ms[i * 2 + 0]] = ms[i * 2 + 1];
|
||||
}
|
||||
|
||||
void loadScene(const char *fileName, Scene &scene) {
|
||||
FILE *f = fopen(fileName, "rb");
|
||||
|
||||
if (!f) {
|
||||
printf("Cannot open scene file '%s'. Please run SceneConverter from "
|
||||
"Chapter7 and/or MergeMeshes from Chapter 9",
|
||||
fileName);
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t sz = 0;
|
||||
fread(&sz, sizeof(sz), 1, f);
|
||||
|
||||
scene.hierarchy.resize(sz);
|
||||
scene.globalTransform.resize(sz);
|
||||
scene.localTransform.resize(sz);
|
||||
// TODO: check > -1
|
||||
// TODO: recalculate changedAtThisLevel() - find max depth of a node [or save
|
||||
// scene.maxLevel]
|
||||
fread(scene.localTransform.data(), sizeof(glm::mat4), sz, f);
|
||||
fread(scene.globalTransform.data(), sizeof(glm::mat4), sz, f);
|
||||
fread(scene.hierarchy.data(), sizeof(Hierarchy), sz, f);
|
||||
|
||||
// Mesh for node [index to some list of buffers]
|
||||
loadMap(f, scene.materialForNode);
|
||||
loadMap(f, scene.meshForNode);
|
||||
|
||||
if (!feof(f)) {
|
||||
loadMap(f, scene.nameForNode);
|
||||
loadStringList(f, scene.nodeNames);
|
||||
loadStringList(f, scene.materialNames);
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
|
||||
markAsChanged(scene, 0);
|
||||
recalculateGlobalTransforms(scene);
|
||||
}
|
||||
|
||||
void saveMap(FILE *f, const std::unordered_map<uint32_t, uint32_t> &map) {
|
||||
std::vector<uint32_t> ms;
|
||||
ms.reserve(map.size() * 2);
|
||||
for (const auto &m : map) {
|
||||
ms.push_back(m.first);
|
||||
ms.push_back(m.second);
|
||||
}
|
||||
const uint32_t sz = static_cast<uint32_t>(ms.size());
|
||||
fwrite(&sz, sizeof(sz), 1, f);
|
||||
fwrite(ms.data(), sizeof(uint32_t), ms.size(), f);
|
||||
}
|
||||
|
||||
void saveScene(const char *fileName, const Scene &scene) {
|
||||
FILE *f = fopen(fileName, "wb");
|
||||
|
||||
const uint32_t sz = (uint32_t)scene.hierarchy.size();
|
||||
fwrite(&sz, sizeof(sz), 1, f);
|
||||
|
||||
fwrite(scene.localTransform.data(), sizeof(glm::mat4), sz, f);
|
||||
fwrite(scene.globalTransform.data(), sizeof(glm::mat4), sz, f);
|
||||
fwrite(scene.hierarchy.data(), sizeof(Hierarchy), sz, f);
|
||||
|
||||
// Mesh for node [index to some list of buffers]
|
||||
saveMap(f, scene.materialForNode);
|
||||
saveMap(f, scene.meshForNode);
|
||||
|
||||
if (!scene.nodeNames.empty() && !scene.nameForNode.empty()) {
|
||||
saveMap(f, scene.nameForNode);
|
||||
saveStringList(f, scene.nodeNames);
|
||||
saveStringList(f, scene.materialNames);
|
||||
}
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
bool mat4IsIdentity(const glm::mat4 &m) {
|
||||
return (m[0][0] == 1 && m[0][1] == 0 && m[0][2] == 0 && m[0][3] == 0 &&
|
||||
m[1][0] == 0 && m[1][1] == 1 && m[1][2] == 0 && m[1][3] == 0 &&
|
||||
m[2][0] == 0 && m[2][1] == 0 && m[2][2] == 1 && m[2][3] == 0 &&
|
||||
m[3][0] == 0 && m[3][1] == 0 && m[3][2] == 0 && m[3][3] == 1);
|
||||
}
|
||||
|
||||
void fprintfMat4(FILE *f, const glm::mat4 &m) {
|
||||
if (mat4IsIdentity(m)) {
|
||||
fprintf(f, "Identity\n");
|
||||
} else {
|
||||
fprintf(f, "\n");
|
||||
for (int i = 0; i < 4; i++) {
|
||||
for (int j = 0; j < 4; j++)
|
||||
fprintf(f, "%f ;", m[i][j]);
|
||||
fprintf(f, "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dumpTransforms(const char *fileName, const Scene &scene) {
|
||||
FILE *f = fopen(fileName, "a+");
|
||||
for (size_t i = 0; i < scene.localTransform.size(); i++) {
|
||||
fprintf(f, "Node[%d].localTransform: ", (int)i);
|
||||
fprintfMat4(f, scene.localTransform[i]);
|
||||
fprintf(f, "Node[%d].globalTransform: ", (int)i);
|
||||
fprintfMat4(f, scene.globalTransform[i]);
|
||||
fprintf(f, "Node[%d].globalDet = %f; localDet = %f\n", (int)i,
|
||||
glm::determinant(scene.globalTransform[i]),
|
||||
glm::determinant(scene.localTransform[i]));
|
||||
}
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void printChangedNodes(const Scene &scene) {
|
||||
for (int i = 0; i < MAX_NODE_LEVEL && (!scene.changedAtThisFrame[i].empty());
|
||||
i++) {
|
||||
printf("Changed at level(%d):\n", i);
|
||||
|
||||
for (const int &c : scene.changedAtThisFrame[i]) {
|
||||
int p = scene.hierarchy[c].parent;
|
||||
// scene.globalTransform_[c] = scene.globalTransform_[p] *
|
||||
// scene.localTransform_[c];
|
||||
printf(" Node %d. Parent = %d; LocalTransform: ", c, p);
|
||||
fprintfMat4(stdout, scene.localTransform[i]);
|
||||
if (p > -1) {
|
||||
printf(" ParentGlobalTransform: ");
|
||||
fprintfMat4(stdout, scene.globalTransform[p]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shift all hierarchy components in the nodes
|
||||
void shiftNodes(Scene &scene, int startOffset, int nodeCount, int shiftAmount) {
|
||||
auto shiftNode = [shiftAmount](Hierarchy &node) {
|
||||
if (node.parent > -1)
|
||||
node.parent += shiftAmount;
|
||||
if (node.firstChild > -1)
|
||||
node.firstChild += shiftAmount;
|
||||
if (node.nextSibling > -1)
|
||||
node.nextSibling += shiftAmount;
|
||||
if (node.lastSibling > -1)
|
||||
node.lastSibling += shiftAmount;
|
||||
// node->level does not require to be shifted
|
||||
};
|
||||
|
||||
// If there are too many nodes, we can use std::execution::par with
|
||||
// std::transform:
|
||||
// std::transform(scene.hierarchy_.begin() + startOffset,
|
||||
// scene.hierarchy_.begin() + nodeCount,
|
||||
// scene.hierarchy_.begin() + startOffset,
|
||||
// shiftNode);
|
||||
// for (auto i = scene.hierarchy_.begin() + startOffset ; i !=
|
||||
// scene.hierarchy_.begin() + nodeCount ; i++) shiftNode(*i);
|
||||
|
||||
for (int i = 0; i < nodeCount; i++)
|
||||
shiftNode(scene.hierarchy[i + startOffset]);
|
||||
}
|
||||
|
||||
using ItemMap = std::unordered_map<uint32_t, uint32_t>;
|
||||
|
||||
// Add the items from otherMap shifting indices and values along the way
|
||||
void mergeMaps(ItemMap &m, const ItemMap &otherMap, int indexOffset,
|
||||
int itemOffset) {
|
||||
for (const auto &i : otherMap)
|
||||
m[i.first + indexOffset] = i.second + itemOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
There are different use cases for scene merging.
|
||||
The simplest one is the direct "gluing" of multiple scenes into one [all the
|
||||
material lists and mesh lists are merged and indices in all scene nodes are
|
||||
shifted appropriately] The second one is creating a "grid" of objects (or
|
||||
scenes) with the same material and mesh sets. For the second use case we need
|
||||
two flags: 'mergeMeshes' and 'mergeMaterials' to avoid shifting mesh indices
|
||||
*/
|
||||
void mergeScenes(Scene &scene, const std::vector<Scene *> &scenes,
|
||||
const std::vector<glm::mat4> &rootTransforms,
|
||||
const std::vector<uint32_t> &meshCounts, bool mergeMeshes,
|
||||
bool mergeMaterials) {
|
||||
// Create new root node
|
||||
scene.hierarchy = {{
|
||||
.parent = -1,
|
||||
.firstChild = 1,
|
||||
.nextSibling = -1,
|
||||
.lastSibling = -1,
|
||||
.level = 0,
|
||||
}};
|
||||
|
||||
scene.nameForNode[0] = 0;
|
||||
scene.nodeNames = {"NewRoot"};
|
||||
|
||||
scene.localTransform.push_back(glm::mat4(1.f));
|
||||
scene.globalTransform.push_back(glm::mat4(1.f));
|
||||
|
||||
if (scenes.empty())
|
||||
return;
|
||||
|
||||
int offs = 1;
|
||||
int meshOffs = 0;
|
||||
int nameOffs = (int)scene.nodeNames.size();
|
||||
int materialOfs = 0;
|
||||
auto meshCount = meshCounts.begin();
|
||||
|
||||
if (!mergeMaterials)
|
||||
scene.materialNames = scenes[0]->materialNames;
|
||||
|
||||
// FIXME: too much logic (for all the components in a scene, though mesh data
|
||||
// and materials go separately - they're dedicated data lists)
|
||||
for (const Scene *s : scenes) {
|
||||
mergeVectors(scene.localTransform, s->localTransform);
|
||||
mergeVectors(scene.globalTransform, s->globalTransform);
|
||||
|
||||
mergeVectors(scene.hierarchy, s->hierarchy);
|
||||
|
||||
mergeVectors(scene.nodeNames, s->nodeNames);
|
||||
if (mergeMaterials)
|
||||
mergeVectors(scene.materialNames, s->materialNames);
|
||||
|
||||
const int nodeCount = (int)s->hierarchy.size();
|
||||
|
||||
shiftNodes(scene, offs, nodeCount, offs);
|
||||
|
||||
mergeMaps(scene.meshForNode, s->meshForNode, offs,
|
||||
mergeMeshes ? meshOffs : 0);
|
||||
mergeMaps(scene.materialForNode, s->materialForNode, offs,
|
||||
mergeMaterials ? materialOfs : 0);
|
||||
mergeMaps(scene.nameForNode, s->nameForNode, offs, nameOffs);
|
||||
|
||||
offs += nodeCount;
|
||||
|
||||
materialOfs += (int)s->materialNames.size();
|
||||
nameOffs += (int)s->nodeNames.size();
|
||||
|
||||
if (mergeMeshes) {
|
||||
meshOffs += *meshCount;
|
||||
meshCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// fixing 'nextSibling' fields in the old roots (zero-index in all the scenes)
|
||||
offs = 1;
|
||||
int idx = 0;
|
||||
for (const Scene *s : scenes) {
|
||||
const int nodeCount = (int)s->hierarchy.size();
|
||||
const bool isLast = (idx == scenes.size() - 1);
|
||||
// calculate new next sibling for the old scene roots
|
||||
const int next = isLast ? -1 : offs + nodeCount;
|
||||
|
||||
scene.hierarchy[offs].nextSibling = next;
|
||||
// attach to new root
|
||||
scene.hierarchy[offs].parent = 0;
|
||||
|
||||
// transform old root nodes, if the transforms are given
|
||||
if (!rootTransforms.empty())
|
||||
scene.localTransform[offs] =
|
||||
rootTransforms[idx] * scene.localTransform[offs];
|
||||
|
||||
offs += nodeCount;
|
||||
idx++;
|
||||
}
|
||||
|
||||
// now, shift levels of all nodes below the root
|
||||
for (auto i = scene.hierarchy.begin() + 1; i != scene.hierarchy.end(); i++)
|
||||
i->level++;
|
||||
}
|
||||
|
||||
void dumpSceneToDot(const char *fileName, const Scene &scene, int *visited) {
|
||||
FILE *f = fopen(fileName, "w");
|
||||
fprintf(f, "digraph G\n{\n");
|
||||
for (size_t i = 0; i < scene.globalTransform.size(); i++) {
|
||||
std::string name = "";
|
||||
std::string extra = "";
|
||||
if (scene.nameForNode.contains(i)) {
|
||||
int strID = scene.nameForNode.at(i);
|
||||
name = scene.nodeNames[strID];
|
||||
}
|
||||
if (visited) {
|
||||
if (visited[i])
|
||||
extra = ", color = red";
|
||||
}
|
||||
fprintf(f, "n%d [label=\"%s\" %s]\n", (int)i, name.c_str(), extra.c_str());
|
||||
}
|
||||
for (size_t i = 0; i < scene.hierarchy.size(); i++) {
|
||||
int p = scene.hierarchy[i].parent;
|
||||
if (p > -1)
|
||||
fprintf(f, "\t n%d -> n%d\n", p, (int)i);
|
||||
}
|
||||
fprintf(f, "}\n");
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
// A rather long algorithm (and the auxiliary routines) to delete a number of
|
||||
// scene nodes from the hierarchy
|
||||
|
||||
// Add an index to a sorted index array
|
||||
static void addUniqueIdx(std::vector<uint32_t> &v, uint32_t index) {
|
||||
if (!std::binary_search(v.begin(), v.end(), index))
|
||||
v.push_back(index);
|
||||
}
|
||||
|
||||
// Recurse down from a node and collect all nodes which are already marked for
|
||||
// deletion
|
||||
static void collectNodesToDelete(const Scene &scene, int node,
|
||||
std::vector<uint32_t> &nodes) {
|
||||
for (int n = scene.hierarchy[node].firstChild; n != -1;
|
||||
n = scene.hierarchy[n].nextSibling) {
|
||||
addUniqueIdx(nodes, n);
|
||||
collectNodesToDelete(scene, n, nodes);
|
||||
}
|
||||
}
|
||||
|
||||
int findLastNonDeletedItem(const Scene &scene,
|
||||
const std::vector<int> &newIndices, int node) {
|
||||
// we have to be more subtle:
|
||||
// if the (newIndices[firstChild_] == -1), we should follow the link and
|
||||
// extract the last non-removed item
|
||||
// ..
|
||||
if (node == -1)
|
||||
return -1;
|
||||
|
||||
return (newIndices[node] == -1)
|
||||
? findLastNonDeletedItem(scene, newIndices,
|
||||
scene.hierarchy[node].nextSibling)
|
||||
: newIndices[node];
|
||||
}
|
||||
|
||||
void shiftMapIndices(std::unordered_map<uint32_t, uint32_t> &items,
|
||||
const std::vector<int> &newIndices) {
|
||||
std::unordered_map<uint32_t, uint32_t> newItems;
|
||||
for (const auto &m : items) {
|
||||
int newIndex = newIndices[m.first];
|
||||
if (newIndex != -1)
|
||||
newItems[newIndex] = m.second;
|
||||
}
|
||||
items = newItems;
|
||||
}
|
||||
|
||||
// Approximately an O ( N * Log(N) * Log(M)) algorithm (N = scene.size, M =
|
||||
// nodesToDelete.size) to delete a collection of nodes from scene graph
|
||||
void deleteSceneNodes(Scene &scene,
|
||||
const std::vector<uint32_t> &nodesToDelete) {
|
||||
// 0) Add all the nodes down below in the hierarchy
|
||||
auto indicesToDelete = nodesToDelete;
|
||||
for (uint32_t i : indicesToDelete)
|
||||
collectNodesToDelete(scene, i, indicesToDelete);
|
||||
|
||||
// aux array with node indices to keep track of the moved ones [moved =
|
||||
// [](node) { return (node != nodes[node]); ]
|
||||
std::vector<int> nodes(scene.hierarchy.size());
|
||||
std::iota(nodes.begin(), nodes.end(), 0);
|
||||
|
||||
// 1.a) Move all the indicesToDelete to the end of 'nodes' array (and cut them
|
||||
// off, a variation of swap'n'pop for multiple elements)
|
||||
const size_t oldSize = nodes.size();
|
||||
eraseSelected(nodes, indicesToDelete);
|
||||
|
||||
// 1.b) Make a newIndices[oldIndex] mapping table
|
||||
std::vector<int> newIndices(oldSize, -1);
|
||||
for (int i = 0; i < nodes.size(); i++)
|
||||
newIndices[nodes[i]] = i;
|
||||
|
||||
// 2) Replace all non-null parent/firstChild/nextSibling pointers in all the
|
||||
// nodes by new positions
|
||||
auto nodeMover = [&scene, &newIndices](Hierarchy &h) {
|
||||
return Hierarchy{
|
||||
.parent = (h.parent != -1) ? newIndices[h.parent] : -1,
|
||||
.firstChild = findLastNonDeletedItem(scene, newIndices, h.firstChild),
|
||||
.nextSibling = findLastNonDeletedItem(scene, newIndices, h.nextSibling),
|
||||
.lastSibling = findLastNonDeletedItem(scene, newIndices, h.lastSibling),
|
||||
};
|
||||
};
|
||||
std::transform(scene.hierarchy.begin(), scene.hierarchy.end(),
|
||||
scene.hierarchy.begin(), nodeMover);
|
||||
|
||||
// 3) Finally throw away the hierarchy items
|
||||
eraseSelected(scene.hierarchy, indicesToDelete);
|
||||
|
||||
// 4) As in mergeScenes() routine we also have to adjust all the "components"
|
||||
// (i.e., meshes, materials, names and transformations)
|
||||
|
||||
// 4a) Transformations are stored in arrays, so we just erase the items as we
|
||||
// did with the scene.hierarchy_
|
||||
eraseSelected(scene.localTransform, indicesToDelete);
|
||||
eraseSelected(scene.globalTransform, indicesToDelete);
|
||||
|
||||
// 4b) All the maps should change the key values with the newIndices[] array
|
||||
shiftMapIndices(scene.meshForNode, newIndices);
|
||||
shiftMapIndices(scene.materialForNode, newIndices);
|
||||
shiftMapIndices(scene.nameForNode, newIndices);
|
||||
|
||||
// 5) scene node names list is not modified, but in principle it can be
|
||||
// (remove all non-used items and adjust the nameForNode_ map) 6) Material
|
||||
// names list is not modified also, but if some materials fell out of use
|
||||
}
|
||||
98
shared/Scene/Scene.h
Normal file
98
shared/Scene/Scene.h
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include <glm/ext.hpp>
|
||||
#include <glm/glm.hpp>
|
||||
|
||||
using glm::mat4;
|
||||
|
||||
// we do not define std::vector<Node*> Children - this is already present in the
|
||||
// aiNode from assimp
|
||||
|
||||
constexpr const int MAX_NODE_LEVEL = 16;
|
||||
|
||||
struct Hierarchy {
|
||||
// parent for this node (or -1 for root)
|
||||
int parent = -1;
|
||||
// first child for a node (or -1)
|
||||
int firstChild = -1;
|
||||
// next sibling for a node (or -1)
|
||||
int nextSibling = -1;
|
||||
// last added node (or -1)
|
||||
int lastSibling = -1;
|
||||
// cached node level
|
||||
int level = 0;
|
||||
};
|
||||
|
||||
/* This scene is converted into a descriptorSet(s) in MultiRenderer class
|
||||
This structure is also used as a storage type in SceneExporter tool
|
||||
*/
|
||||
struct Scene {
|
||||
// local transformations for each node and global transforms
|
||||
// + an array of 'dirty/changed' local transforms
|
||||
std::vector<mat4> localTransform; // indexed by node
|
||||
std::vector<mat4> globalTransform; // indexed by node
|
||||
|
||||
// list of nodes that need their global transforms recalculated
|
||||
std::vector<int> changedAtThisFrame[MAX_NODE_LEVEL];
|
||||
|
||||
// Hierarchy component
|
||||
std::vector<Hierarchy> hierarchy;
|
||||
|
||||
// Mesh component: which Mesh belongs to which node (Node -> Mesh)
|
||||
std::unordered_map<uint32_t, uint32_t> meshForNode;
|
||||
|
||||
// Material component: which material belongs to which node (Node -> Material)
|
||||
std::unordered_map<uint32_t, uint32_t> materialForNode;
|
||||
|
||||
// Node name component: which name is assigned to the node (Node -> Name)
|
||||
std::unordered_map<uint32_t, uint32_t> nameForNode;
|
||||
|
||||
// List of scene node names
|
||||
std::vector<std::string> nodeNames;
|
||||
|
||||
// Debug list of material names
|
||||
std::vector<std::string> materialNames;
|
||||
};
|
||||
|
||||
int addNode(Scene &scene, int parent, int level);
|
||||
|
||||
void markAsChanged(Scene &scene, int node);
|
||||
|
||||
int findNodeByName(const Scene &scene, const std::string &name);
|
||||
|
||||
inline std::string getNodeName(const Scene &scene, int node) {
|
||||
int strID =
|
||||
scene.nameForNode.contains(node) ? scene.nameForNode.at(node) : -1;
|
||||
return (strID > -1) ? scene.nodeNames[strID] : std::string();
|
||||
}
|
||||
|
||||
inline void setNodeName(Scene &scene, int node, const std::string &name) {
|
||||
uint32_t stringID = (uint32_t)scene.nodeNames.size();
|
||||
scene.nodeNames.push_back(name);
|
||||
scene.nameForNode[node] = stringID;
|
||||
}
|
||||
|
||||
int getNodeLevel(const Scene &scene, int n);
|
||||
|
||||
bool recalculateGlobalTransforms(Scene &scene);
|
||||
|
||||
void loadScene(const char *fileName, Scene &scene);
|
||||
void saveScene(const char *fileName, const Scene &scene);
|
||||
|
||||
void dumpTransforms(const char *fileName, const Scene &scene);
|
||||
void printChangedNodes(const Scene &scene);
|
||||
|
||||
void dumpSceneToDot(const char *fileName, const Scene &scene,
|
||||
int *visited = nullptr);
|
||||
|
||||
void mergeScenes(Scene &scene, const std::vector<Scene *> &scenes,
|
||||
const std::vector<glm::mat4> &rootTransforms,
|
||||
const std::vector<uint32_t> &meshCounts,
|
||||
bool mergeMeshes = true, bool mergeMaterials = true);
|
||||
|
||||
// Delete a collection of nodes from a scenegraph
|
||||
void deleteSceneNodes(Scene &scene, const std::vector<uint32_t> &nodesToDelete);
|
||||
342
shared/Scene/VtxData.cpp
Normal file
342
shared/Scene/VtxData.cpp
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
#include "shared/Scene/VtxData.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
bool isMeshDataValid(const char* fileName)
|
||||
{
|
||||
FILE* f = fopen(fileName, "rb");
|
||||
|
||||
if (!f)
|
||||
return false;
|
||||
|
||||
SCOPE_EXIT
|
||||
{
|
||||
fclose(f);
|
||||
};
|
||||
|
||||
MeshFileHeader header;
|
||||
|
||||
if (fread(&header, 1, sizeof(header), f) != sizeof(header))
|
||||
return false;
|
||||
|
||||
if (fseek(f, sizeof(Mesh) * header.meshCount, SEEK_CUR))
|
||||
return false;
|
||||
|
||||
if (fseek(f, sizeof(BoundingBox) * header.meshCount, SEEK_CUR))
|
||||
return false;
|
||||
|
||||
if (fseek(f, header.indexDataSize, SEEK_CUR))
|
||||
return false;
|
||||
|
||||
if (fseek(f, header.vertexDataSize, SEEK_CUR))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isMeshHierarchyValid(const char* fileName)
|
||||
{
|
||||
FILE* f = fopen(fileName, "rb");
|
||||
|
||||
if (!f)
|
||||
return false;
|
||||
|
||||
SCOPE_EXIT
|
||||
{
|
||||
fclose(f);
|
||||
};
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isMeshMaterialsValid(const char* fileName)
|
||||
{
|
||||
FILE* f = fopen(fileName, "rb");
|
||||
|
||||
if (!f)
|
||||
return false;
|
||||
|
||||
SCOPE_EXIT
|
||||
{
|
||||
fclose(f);
|
||||
};
|
||||
|
||||
uint64_t numMaterials = 0;
|
||||
uint64_t materialsSize = 0;
|
||||
|
||||
if (fread(&numMaterials, 1, sizeof(numMaterials), f) != sizeof(numMaterials))
|
||||
return false;
|
||||
if (fread(&materialsSize, 1, sizeof(materialsSize), f) != sizeof(materialsSize))
|
||||
return false;
|
||||
if (numMaterials * sizeof(Material) != materialsSize)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
MeshFileHeader loadMeshData(const char* meshFile, MeshData& out)
|
||||
{
|
||||
FILE* f = fopen(meshFile, "rb");
|
||||
|
||||
assert(f);
|
||||
|
||||
if (!f) {
|
||||
printf("Cannot open '%s'.\n", meshFile);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
SCOPE_EXIT
|
||||
{
|
||||
fclose(f);
|
||||
};
|
||||
|
||||
MeshFileHeader header;
|
||||
|
||||
if (fread(&header, 1, sizeof(header), f) != sizeof(header)) {
|
||||
printf("Unable to read mesh file header.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (fread(&out.streams, 1, sizeof(out.streams), f) != sizeof(out.streams)) {
|
||||
printf("Unable to read vertex streams description.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
out.meshes.resize(header.meshCount);
|
||||
if (fread(out.meshes.data(), sizeof(Mesh), header.meshCount, f) != header.meshCount) {
|
||||
printf("Could not read mesh descriptors.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
out.boxes.resize(header.meshCount);
|
||||
if (fread(out.boxes.data(), sizeof(BoundingBox), header.meshCount, f) != header.meshCount) {
|
||||
printf("Could not read bounding boxes.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
out.indexData.resize(header.indexDataSize / sizeof(uint32_t));
|
||||
out.vertexData.resize(header.vertexDataSize);
|
||||
|
||||
if (fread(out.indexData.data(), 1, header.indexDataSize, f) != header.indexDataSize) {
|
||||
printf("Unable to read index data.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (fread(out.vertexData.data(), 1, header.vertexDataSize, f) != header.vertexDataSize) {
|
||||
printf("Unable to read vertex data.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
return header;
|
||||
}
|
||||
|
||||
void loadMeshDataMaterials(const char* fileName, MeshData& out)
|
||||
{
|
||||
FILE* f = fopen(fileName, "rb");
|
||||
|
||||
if (!f) {
|
||||
printf("Cannot open '%s'.\n", fileName);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
uint64_t numMaterials = 0;
|
||||
uint64_t materialsSize = 0;
|
||||
|
||||
if (fread(&numMaterials, 1, sizeof(numMaterials), f) != sizeof(numMaterials)) {
|
||||
printf("Unable to read numMaterials.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (fread(&materialsSize, 1, sizeof(materialsSize), f) != sizeof(materialsSize)) {
|
||||
printf("Unable to read materialsSize.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (numMaterials * sizeof(Material) != materialsSize) {
|
||||
printf("Corrupted material file '%s'.\n", fileName);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
out.materials.resize(numMaterials);
|
||||
if (fread(out.materials.data(), 1, materialsSize, f) != materialsSize) {
|
||||
printf("Unable to read material data.\n");
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
loadStringList(f, out.textureFiles);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void saveMeshData(const char* fileName, const MeshData& m)
|
||||
{
|
||||
FILE* f = fopen(fileName, "wb");
|
||||
|
||||
if (!f) {
|
||||
printf("Error opening file '%s' for writing.\n", fileName);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
const MeshFileHeader header = {
|
||||
.meshCount = (uint32_t)m.meshes.size(),
|
||||
.indexDataSize = (uint32_t)(m.indexData.size() * sizeof(uint32_t)),
|
||||
.vertexDataSize = (uint32_t)(m.vertexData.size()),
|
||||
};
|
||||
|
||||
fwrite(&header, 1, sizeof(header), f);
|
||||
fwrite(&m.streams, 1, sizeof(m.streams), f);
|
||||
fwrite(m.meshes.data(), sizeof(Mesh), header.meshCount, f);
|
||||
fwrite(m.boxes.data(), sizeof(BoundingBox), header.meshCount, f);
|
||||
fwrite(m.indexData.data(), 1, header.indexDataSize, f);
|
||||
fwrite(m.vertexData.data(), 1, header.vertexDataSize, f);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void saveMeshDataMaterials(const char* fileName, const MeshData& m)
|
||||
{
|
||||
FILE* f = fopen(fileName, "wb");
|
||||
|
||||
if (!f) {
|
||||
printf("Error opening file '%s' for writing.\n", fileName);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
const uint64_t numMaterials = m.materials.size();
|
||||
const uint64_t materialsSize = m.materials.size() * sizeof(Material);
|
||||
|
||||
fwrite(&numMaterials, 1, sizeof(numMaterials), f);
|
||||
fwrite(&materialsSize, 1, sizeof(materialsSize), f);
|
||||
fwrite(m.materials.data(), sizeof(Material), numMaterials, f);
|
||||
|
||||
saveStringList(f, m.textureFiles);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void saveBoundingBoxes(const char* fileName, const std::vector<BoundingBox>& boxes)
|
||||
{
|
||||
FILE* f = fopen(fileName, "wb");
|
||||
|
||||
if (!f) {
|
||||
printf("Error opening bounding boxes file '%s' for writing.\n", fileName);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
const uint32_t sz = (uint32_t)boxes.size();
|
||||
fwrite(&sz, 1, sizeof(sz), f);
|
||||
fwrite(boxes.data(), sz, sizeof(BoundingBox), f);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void loadBoundingBoxes(const char* fileName, std::vector<BoundingBox>& boxes)
|
||||
{
|
||||
FILE* f = fopen(fileName, "rb");
|
||||
|
||||
if (!f) {
|
||||
printf("Error opening bounding boxes file '%s'\n", fileName);
|
||||
assert(false);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
uint32_t sz;
|
||||
fread(&sz, 1, sizeof(sz), f);
|
||||
|
||||
// TODO: check file size, divide by bounding box size
|
||||
boxes.resize(sz);
|
||||
fread(boxes.data(), sz, sizeof(BoundingBox), f);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
// combine a collection of meshes into a single MeshData container
|
||||
MeshFileHeader mergeMeshData(MeshData& m, const std::vector<MeshData*> md)
|
||||
{
|
||||
uint32_t numTotalVertices = 0;
|
||||
uint32_t numTotalIndices = 0;
|
||||
|
||||
if (!md.empty()) {
|
||||
m.streams = md[0]->streams;
|
||||
}
|
||||
|
||||
const uint32_t vertexSize = m.streams.getVertexSize();
|
||||
|
||||
uint32_t offset = 0;
|
||||
uint32_t mtlOffset = 0;
|
||||
|
||||
for (const MeshData* i : md) {
|
||||
LVK_ASSERT(m.streams == i->streams);
|
||||
mergeVectors(m.indexData, i->indexData);
|
||||
mergeVectors(m.vertexData, i->vertexData);
|
||||
mergeVectors(m.meshes, i->meshes);
|
||||
mergeVectors(m.boxes, i->boxes);
|
||||
|
||||
for (size_t j = 0; j != i->meshes.size(); j++) {
|
||||
// m.vertexCount, m.lodCount and m.streamCount do not change
|
||||
// m.vertexOffset also does not change, because vertex offsets are local (i.e., baked into the indices)
|
||||
m.meshes[offset + j].indexOffset += numTotalIndices;
|
||||
m.meshes[offset + j].materialID += mtlOffset;
|
||||
}
|
||||
|
||||
// shift individual indices
|
||||
for (size_t j = 0; j != i->indexData.size(); j++) {
|
||||
m.indexData[numTotalIndices + j] += numTotalVertices;
|
||||
}
|
||||
|
||||
offset += (uint32_t)i->meshes.size();
|
||||
mtlOffset += (uint32_t)i->materials.size();
|
||||
|
||||
numTotalIndices += (uint32_t)i->indexData.size();
|
||||
numTotalVertices += (uint32_t)i->vertexData.size() / vertexSize;
|
||||
}
|
||||
|
||||
return MeshFileHeader{
|
||||
.magicValue = 0x12345678,
|
||||
.meshCount = (uint32_t)offset,
|
||||
.indexDataSize = static_cast<uint32_t>(numTotalIndices * sizeof(uint32_t)),
|
||||
.vertexDataSize = static_cast<uint32_t>(m.vertexData.size()),
|
||||
};
|
||||
}
|
||||
|
||||
void recalculateBoundingBoxes(MeshData& m)
|
||||
{
|
||||
LVK_ASSERT(m.streams.attributes[0].format == lvk::VertexFormat::Float3);
|
||||
|
||||
const uint32_t stride = m.streams.getVertexSize();
|
||||
|
||||
m.boxes.clear();
|
||||
m.boxes.reserve(m.meshes.size());
|
||||
|
||||
for (const Mesh& mesh : m.meshes) {
|
||||
const uint32_t numIndices = mesh.getLODIndicesCount(0);
|
||||
|
||||
glm::vec3 vmin(std::numeric_limits<float>::max());
|
||||
glm::vec3 vmax(std::numeric_limits<float>::lowest());
|
||||
|
||||
for (uint32_t i = 0; i != numIndices; i++) {
|
||||
const uint32_t vtxOffset = m.indexData[mesh.indexOffset + i] + mesh.vertexOffset;
|
||||
const float* vf = (const float*)&m.vertexData[vtxOffset * stride];
|
||||
|
||||
vmin = glm::min(vmin, vec3(vf[0], vf[1], vf[2]));
|
||||
vmax = glm::max(vmax, vec3(vf[0], vf[1], vf[2]));
|
||||
}
|
||||
|
||||
m.boxes.emplace_back(vmin, vmax);
|
||||
}
|
||||
}
|
||||
111
shared/Scene/VtxData.h
Normal file
111
shared/Scene/VtxData.h
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
|
||||
#include "shared/Utils.h"
|
||||
#include "shared/UtilsMath.h"
|
||||
|
||||
constexpr const uint32_t kMaxLODs = 7;
|
||||
|
||||
// All offsets are relative to the beginning of the data block (excluding headers with a Mesh list)
|
||||
struct Mesh final {
|
||||
// Number of LODs in this mesh. Strictly less than MAX_LODS, last LOD offset is used as a marker only
|
||||
uint32_t lodCount = 1;
|
||||
|
||||
// The total count of all previous vertices in this mesh file
|
||||
uint32_t indexOffset = 0;
|
||||
|
||||
uint32_t vertexOffset = 0;
|
||||
|
||||
// Vertex count (for all LODs)
|
||||
uint32_t vertexCount = 0;
|
||||
|
||||
// Offsets to LOD indices data. The last offset is used as a marker to calculate the size
|
||||
uint32_t lodOffset[kMaxLODs + 1] = { 0 };
|
||||
|
||||
uint32_t materialID = 0;
|
||||
|
||||
inline uint32_t getLODIndicesCount(uint32_t lod) const { return lod < lodCount ? lodOffset[lod + 1] - lodOffset[lod] : 0; }
|
||||
|
||||
// Any additional information, such as mesh name, can be added here...
|
||||
};
|
||||
|
||||
struct MeshFileHeader {
|
||||
// Unique 64-bit value to check integrity of the file
|
||||
uint32_t magicValue = 0x12345678;
|
||||
|
||||
// Number of mesh descriptors following this header
|
||||
uint32_t meshCount = 0;
|
||||
|
||||
// How much space index data takes in bytes
|
||||
uint32_t indexDataSize = 0;
|
||||
|
||||
// How much space vertex data takes in bytes
|
||||
uint32_t vertexDataSize = 0;
|
||||
|
||||
// According to your needs, you may add additional metadata fields...
|
||||
};
|
||||
|
||||
enum MaterialFlags {
|
||||
sMaterialFlags_CastShadow = 0x1,
|
||||
sMaterialFlags_ReceiveShadow = 0x2,
|
||||
sMaterialFlags_Transparent = 0x4,
|
||||
};
|
||||
|
||||
struct Material {
|
||||
vec4 emissiveFactor = vec4(0.0f, 0.0f, 0.0f, 0.0f);
|
||||
vec4 baseColorFactor = vec4(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
float roughness = 1.0f;
|
||||
float transparencyFactor = 1.0f;
|
||||
float alphaTest = 0.0f;
|
||||
float metallicFactor = 0.0f;
|
||||
// index into MeshData::textureFiles
|
||||
int baseColorTexture = -1;
|
||||
int emissiveTexture = -1;
|
||||
int normalTexture = -1;
|
||||
int opacityTexture = -1;
|
||||
uint32_t flags = sMaterialFlags_CastShadow | sMaterialFlags_ReceiveShadow;
|
||||
};
|
||||
|
||||
struct MeshData {
|
||||
lvk::VertexInput streams = {};
|
||||
std::vector<uint32_t> indexData;
|
||||
std::vector<uint8_t> vertexData;
|
||||
std::vector<Mesh> meshes;
|
||||
std::vector<BoundingBox> boxes;
|
||||
std::vector<Material> materials;
|
||||
std::vector<std::string> textureFiles;
|
||||
MeshFileHeader getMeshFileHeader() const
|
||||
{
|
||||
return {
|
||||
.meshCount = (uint32_t)meshes.size(),
|
||||
.indexDataSize = (uint32_t)(indexData.size() * sizeof(uint32_t)),
|
||||
.vertexDataSize = (uint32_t)vertexData.size(),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
static_assert(sizeof(BoundingBox) == sizeof(float) * 6);
|
||||
|
||||
bool isMeshDataValid(const char* fileName);
|
||||
bool isMeshMaterialsValid(const char* fileName);
|
||||
bool isMeshHierarchyValid(const char* fileName);
|
||||
MeshFileHeader loadMeshData(const char* meshFile, MeshData& out);
|
||||
void loadMeshDataMaterials(const char* meshFile, MeshData& out);
|
||||
void saveMeshData(const char* fileName, const MeshData& m);
|
||||
void saveMeshDataMaterials(const char* fileName, const MeshData& m);
|
||||
|
||||
void recalculateBoundingBoxes(MeshData& m);
|
||||
|
||||
// combine a list of meshes to a single mesh container
|
||||
MeshFileHeader mergeMeshData(MeshData& m, const std::vector<MeshData*> md);
|
||||
|
||||
// use to write values into MeshData::vertexData
|
||||
template <typename T> inline void put(std::vector<uint8_t>& v, const T& value)
|
||||
{
|
||||
const size_t pos = v.size();
|
||||
v.resize(v.size() + sizeof(value));
|
||||
memcpy(v.data() + pos, &value, sizeof(value));
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue