mirror of https://github.com/bjornbytes/lovr.git
ModelData: Use union for transform fields;
Nodes can have either a transform matrix, or decomposed transform properties, but never both. Using a union means we can store both of those variants in the same piece of memory, using the existing matrix boolean to figure out which one to use. This reduces the size of the struct by 48 bytes (152 -> 104), which ends up speeding up some model operations, I'm guessing due to the CPU cache.
This commit is contained in:
parent
155718d161
commit
443376efde
|
@ -157,10 +157,14 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
const char* name;
|
||||
float transform[16];
|
||||
float translation[4];
|
||||
float rotation[4];
|
||||
float scale[4];
|
||||
union {
|
||||
float matrix[16];
|
||||
struct {
|
||||
float translation[4];
|
||||
float rotation[4];
|
||||
float scale[4];
|
||||
} properties;
|
||||
} transform;
|
||||
uint32_t* children;
|
||||
uint32_t childCount;
|
||||
uint32_t primitiveIndex;
|
||||
|
|
|
@ -809,9 +809,9 @@ ModelData* lovrModelDataInitGltf(ModelData* model, Blob* source, ModelDataIO* io
|
|||
jsmntok_t* token = info.nodes;
|
||||
ModelNode* node = model->nodes;
|
||||
for (int i = (token++)->size; i > 0; i--, node++) {
|
||||
vec3 translation = vec3_set(node->translation, 0.f, 0.f, 0.f);
|
||||
quat rotation = quat_set(node->rotation, 0.f, 0.f, 0.f, 1.f);
|
||||
vec3 scale = vec3_set(node->scale, 1.f, 1.f, 1.f);
|
||||
vec3 translation = vec3_set(node->transform.properties.translation, 0.f, 0.f, 0.f);
|
||||
quat rotation = quat_set(node->transform.properties.rotation, 0.f, 0.f, 0.f, 1.f);
|
||||
vec3 scale = vec3_set(node->transform.properties.scale, 1.f, 1.f, 1.f);
|
||||
node->matrix = false;
|
||||
node->primitiveCount = 0;
|
||||
node->skin = ~0u;
|
||||
|
@ -834,7 +834,7 @@ ModelData* lovrModelDataInitGltf(ModelData* model, Blob* source, ModelDataIO* io
|
|||
lovrAssert((token++)->size == 16, "Node matrix needs 16 elements");
|
||||
node->matrix = true;
|
||||
for (int j = 0; j < 16; j++) {
|
||||
node->transform[j] = NOM_FLOAT(json, token);
|
||||
node->transform.matrix[j] = NOM_FLOAT(json, token);
|
||||
}
|
||||
} else if (STR_EQ(key, "translation")) {
|
||||
lovrAssert((token++)->size == 3, "Node translation needs 3 elements");
|
||||
|
@ -898,7 +898,7 @@ ModelData* lovrModelDataInitGltf(ModelData* model, Blob* source, ModelDataIO* io
|
|||
ModelNode* lastNode = &model->nodes[model->rootNode];
|
||||
lastNode->childCount = scenes[rootScene].nodeCount;
|
||||
lastNode->children = &model->children[childIndex];
|
||||
mat4_identity(lastNode->transform);
|
||||
mat4_identity(lastNode->transform.matrix);
|
||||
lastNode->matrix = true;
|
||||
lastNode->primitiveCount = 0;
|
||||
lastNode->skin = ~0u;
|
||||
|
|
|
@ -321,7 +321,7 @@ ModelData* lovrModelDataInitObj(ModelData* model, Blob* source, ModelDataIO* io)
|
|||
}
|
||||
|
||||
model->nodes[0] = (ModelNode) {
|
||||
.transform = MAT4_IDENTITY,
|
||||
.transform.matrix = MAT4_IDENTITY,
|
||||
.primitiveIndex = 0,
|
||||
.primitiveCount = (uint32_t) groups.length,
|
||||
.skin = ~0u,
|
||||
|
|
|
@ -357,13 +357,13 @@ void lovrModelPose(Model* model, uint32_t nodeIndex, float position[4], float ro
|
|||
void lovrModelResetPose(Model* model) {
|
||||
for (uint32_t i = 0; i < model->data->nodeCount; i++) {
|
||||
if (model->data->nodes[i].matrix) {
|
||||
mat4_getPosition(model->data->nodes[i].transform, model->localTransforms[i].properties[PROP_TRANSLATION]);
|
||||
mat4_getOrientation(model->data->nodes[i].transform, model->localTransforms[i].properties[PROP_ROTATION]);
|
||||
mat4_getScale(model->data->nodes[i].transform, model->localTransforms[i].properties[PROP_SCALE]);
|
||||
mat4_getPosition(model->data->nodes[i].transform.matrix, model->localTransforms[i].properties[PROP_TRANSLATION]);
|
||||
mat4_getOrientation(model->data->nodes[i].transform.matrix, model->localTransforms[i].properties[PROP_ROTATION]);
|
||||
mat4_getScale(model->data->nodes[i].transform.matrix, model->localTransforms[i].properties[PROP_SCALE]);
|
||||
} else {
|
||||
vec3_init(model->localTransforms[i].properties[PROP_TRANSLATION], model->data->nodes[i].translation);
|
||||
quat_init(model->localTransforms[i].properties[PROP_ROTATION], model->data->nodes[i].rotation);
|
||||
vec3_init(model->localTransforms[i].properties[PROP_SCALE], model->data->nodes[i].scale);
|
||||
vec3_init(model->localTransforms[i].properties[PROP_TRANSLATION], model->data->nodes[i].transform.properties.translation);
|
||||
quat_init(model->localTransforms[i].properties[PROP_ROTATION], model->data->nodes[i].transform.properties.rotation);
|
||||
vec3_init(model->localTransforms[i].properties[PROP_SCALE], model->data->nodes[i].transform.properties.scale);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -476,7 +476,7 @@ static ModelData* openvr_newModelData(Device device) {
|
|||
};
|
||||
|
||||
model->nodes[0] = (ModelNode) {
|
||||
.transform = MAT4_IDENTITY,
|
||||
.transform.matrix = MAT4_IDENTITY,
|
||||
.primitiveIndex = 0,
|
||||
.primitiveCount = 1,
|
||||
.skin = ~0u,
|
||||
|
|
Loading…
Reference in New Issue