/* DOCUMENTATION struct rxNodePS2AllMatPvtData { // Callbacks RxPipelineNodePS2AllMatMeshInstanceTestCallBack meshInstanceTestCB; RxPipelineNodePS2AllMatResEntryAllocCallBack resEntryAllocCB; RxPipelineNodePS2AllMatInstanceCallBack instanceCB; RxPipelineNodePS2AllMatBridgeCallBack bridgeCB; RxPipelineNodePS2AllMatPostMeshCallBack postMeshCB; // MatBridge sub-section [first for memory coherence, // instance stuff is not fast-path] int vifOffset; void **vu1CodeArray; RwUInt32 codeArrayLength; // MatInstance sub-section // [Arrays are indexed by CL_xxx defines in PS2ClusterAttribs.h] rwPS2AllClusterInstanceInfo clinfo[CL_MAXCL + FMADD]; RwUInt32 cliIndex[CL_MAXCL + FMADD]; RpMeshHeaderFlags pipeType; // primitive this pipe can handle RwUInt8 totallyOpaque; // all clusters are marked CL_ATTRIB_OPAQUE RwUInt8 numStripes; // num broken out clusters RwUInt8 sizeOnVU; // size of one vertex on VU in qwords RwUInt8 pad0; rwPS2AllResEntryFormat strip; rwPS2AllResEntryFormat list; RwUInt32 magicValue; // 0x34F9 }; struct rwPS2AllFieldRec { int numVerts; // number of vertices for this cluster #if (defined(FASTMORPH)) int morphNumVerts; #endif int dataoffset; // start of first data block in instance data #if (defined(FASTMORPH)) int morphDataoffset; #endif short skip; // number of qwords between two data blocks #if (defined(FASTMORPH)) short morphSkip; #endif short reverse; // negative offset to beginning of last batch unsigned char vuoffset; // offset of vertex attribute in VU memory #if (!defined(FASTMORPH)) unsigned char pad[3]; #else unsigned char pad[1]; #endif }; struct rwPS2AllResEntryFormat { RwUInt8 batchRound; // used to round down number of vertices per batch RwUInt8 stripReverse; // number of vertices that need to be repeated between batches RwUInt8 pad[2]; RwUInt32 maxInputSize; // size of input vertex buffer in qwords RwInt32 batchSize; // number of vertices per batch RwInt32 batchesPerTag; // max number of batches per DMA packet #if (defined(FASTMORPH)) RwInt32 morphBatchSize; // as above for fast-morphing RwInt32 morphBatchesPerTag; // #endif rwPS2AllFieldRec fieldRec[CL_MAXCL + FMADD]; }; */ struct ps2matInitData { RxClusterDefinition *clusters[CL_MAXCL + FMADD]; RwInt32 stripVifOffset; RwInt32 listVifOffset; RwInt32 stride; // on VU RwInt32 pipeType; }; // Not a function in RW ps2matInitData* getInitData(RxPipelineNode *self) { ps2matInitData *init; init = RxPipelineNodeGetInitData(self); if(init) return init; init = RxPipelineNodeCreateInitData(self, sizeof(ps2matInitData)); if(init){ memset(init, 0, sizeof(ps2matInitData)); init->stripVifOffset = 0x114; init->listVifOffset = 0x114 init->stride = 4; return init; } return NULL; } RwBool _rwPS2AllRabinsConstructionTimeCode(rxNodePS2AllMatPvtData *pvtData) { RwInt32 nverts; RwInt32 i, numAttribs, atrb; rwPS2AllResEntryFormat *fmt; rwPS2AllFieldRec *rec; int skip, vuoffset, dataoffset, size; if(pvtData->pipeType & rpMESHHEADERTRISTRIP || (pvtData->pipeType & rpMESHHEADERPRIMMASK) == 0){ // vertex count must be a multiple of 4 due to alignment (but only in stripes?) // trilist vertex count must be a multiple of 3 obviously pvtData->list.batchRound = 3*4; pvtData->strip.batchRound = 4; pvtData->list.stripReverse = 0; pvtData->strip.stripReverse = 2; }else if(pvtData->pipeType & (rpMESHHEADERLINELIST|rpMESHHEADERPOLYLINE)){ pvtData->list.batchRound = 4; pvtData->strip.batchRound = 4; pvtData->list.stripReverse = 0; pvtData->strip.stripReverse = 1; }else{ pvtData->list.batchRound = 4; pvtData->strip.batchRound = 4; pvtData->list.stripReverse = 0; pvtData->strip.stripReverse = 0; } pvtData->clinfo[10].attrib = CL_V3_32 | CL_ATTRIB_OPAQUE | CL_ATTRIB_REQUIRED; pvtData->clinfo[11].attrib = CL_V4_8 | CL_ATTRIB_OPAQUE | CL_ATTRIB_REQUIRED; pvtData->totallyOpaque = 1; numAttribs = 0; for(i = 0; i < CL_MAXCL; i++){ if(pvtData->clinfo[i].attrib & CL_ATTRIB_REQUIRED){ if(!(pvtData->clinfo[i].attrib & CL_ATTRIB_OPAQUE)) pvtData->totallyOpaque = 0; numAttribs++; }else if(pvtData->clinfo[i].attrib & CL_ATTRIB_PLACEHOLDER){ numAttribs++; } } if(numAttribs != pvtData->sizeOnVU) return FALSE; // figure out res entry format for lists and strips fmt = &pvtData->list; format: if(pvtData->totallyOpaque){ nverts = pvtData->maxInputSize / numAttribs; fmt->batchSize = nverts/fmt->batchRound*fmt->batchRound; if(fmt->batchSize < fmt->batchRound) return FALSE; // assume maximum possible size: xyzw, uv2, rgba, normals fmt->batchesPerTag = 0xFFFF/((fmt->batchSize*2) + 2 + // xyzw and uv2 (fmt->batchSize*4 + 12)/16 + 1 + // rgba (fmt->batchSize*3 + 15)/16 + 1 + // normals 1); // kick off nverts = pvtData->maxInputSize / (numAttribs+2); fmt->morphBatchSize = nverts/fmt->batchRound*fmt->batchRound; // same as above plus xyz and normals for morphing fmt->morphBatchesPerTag = 0xFFFF/((fmt->morphBatchSize*2) + 2 + // xyzw and uv2 (fmt->morphBatchSize*4 + 12)/16 + 1 + // rgba (fmt->morphBatchSize*3 + 15)/16 + 1 + // normals (fmt->morphBatchSize*12 + 12)/16 + 1 + // morph xyz (fmt->morphBatchSize*3 + 15)/16 + 1 + // morph normals 1); // kick off }else{ nverts = pvtData->maxInputSize / numAttribs; fmt->batchSize = (nverts - fmt->stripReverse)/fmt->batchRound*fmt->batchRound + fmt->stripReverse; if(fmt->batchSize < nverts - fmt->stripReverse + fmt->batchRound) return FALSE; fmt->batchesPerTag = 1; nverts = pvtData->maxInputSize / (numAttribs+2); fmt->morphBatchSize = (nverts - fmt->stripReverse)/fmt->batchRound*fmt->batchRound + fmt->stripReverse; fmt->morphBatchesPerTag = 1; } // calculate the size of one batch so we'll be able to skip through skip = pvtData->totallyOpaque ? 1 : // just to kick off 2; // one embedded DMAcnt for opaque for(i = 0; i < CL_MAXCL; i++){ if(!(pvtData->clinfo[i].attrib & CL_ATTRIB_REQUIRED)) continue; if(pvtData->clinfo[i].attrib & CL_ATTRIB_OPAQUE){ switch(i){ case CL_XYZ: skip += (fmt->batchSize*12 + 12)/16; break; case CL_XYZW: case CL_UV2: skip += fmt->batchSize; break; case CL_UV: skip += (fmt->batchSize*8 + 12)/16; break; case CL_RGBA: skip += (fmt->batchSize*4 + 12)/16; break; case CL_NORMAL: skip += (fmt->batchSize*3 + 15)/16; break; default: break; } skip += 1; // unpack VIFcode }else{ skip += 2; // DMAref(unpack VIFcode) and empty DMAcnt } } // fill fieldRec for each cluster vuoffset = 0; size = 0; dataoffset = 2*pvtData->numStripes + // skip DMAref and DMAcnt for each stripe 1; // DMAcnt of opaque data for(i = 0; i < CL_MAXCL; i++){ rec = &fmt->fieldRec[i]; if(pvtData->clinfo[i].attrib & CL_ATTRIB_REQUIRED){ rec->vuoffset = vuoffset++; if(pvtData->clinfo[i].attrib & CL_ATTRIB_OPAQUE){ dataoffset += 1 + size; // unpack + last unpacked data rec->numVerts = fmt->batchSize; rec->dataoffset = dataoffset; rec->reverse = 0; rec->skip = skip; switch(i){ case CL_XYZ: size = (fmt->batchSize*12 + 12)/16; break; case CL_XYZW: case CL_UV2: size = fmt->batchSize; break; case CL_UV: size = (fmt->batchSize*8 + 12)/16; break; case CL_RGBA: size = (fmt->batchSize*4 + 12)/16; break; case CL_NORMAL: size = (fmt->batchSize*3 + 15)/16; break; default: break; } }else{ rec->numVerts = 0; rec->dataoffset = 0; rec->reverse = 0; rec->skip = 0; } }else if(pvtData->clinfo[i].attrib & CL_ATTRIB_PLACEHOLDER){ rec->vuoffset = vuoffset++; rec->numVerts = 0; rec->dataoffset = 0; rec->reverse = 0; rec->skip = 0; } } // same as above, for morphing skip = pvtData->totallyOpaque ? 1 : // just to kick off 2; // one embedded DMAcnt for opaque for(i = 0; i < CL_MAXCL+2; i++){ if(!(pvtData->clinfo[i].attrib & CL_ATTRIB_REQUIRED)) continue; if(pvtData->clinfo[i].attrib & CL_ATTRIB_OPAQUE){ switch(i){ case CL_MAXCL: // morph XYZ case CL_XYZ: skip += (fmt->morphBatchSize*12 + 12)/16; break; case CL_XYZW: case CL_UV2: skip += fmt->morphBatchSize; break; case CL_UV: skip += (fmt->morphBatchSize*8 + 12)/16; break; case CL_RGBA: skip += (fmt->morphBatchSize*4 + 12)/16; break; case CL_MAXCL+1: // morph normals case CL_NORMAL: skip += (fmt->morphBatchSize*3 + 15)/16; break; default: break; } skip += 1; // unpack VIFcode }else{ skip += 2; // DMAref(unpack VIFcode) and empty DMAcnt } } vuoffset = 0; size = 0; dataoffset = 2*pvtData->numStripes + // skip DMAref and DMAcnt for each stripe 1; // DMAcnt of opaque data for(i = 0; i < CL_MAXCL+2; i++){ rec = &fmt->fieldRec[i]; if(pvtData->clinfo[i].attrib & CL_ATTRIB_REQUIRED){ rec->vuoffset = vuoffset++; if(pvtData->clinfo[i].attrib & CL_ATTRIB_OPAQUE){ dataoffset += 1 + size; // unpack + last unpacked data rec->morphNumVerts = fmt->morphBatchSize; rec->morphDataoffset = dataoffset; rec->reverse = 0; rec->morphSkip = skip; switch(i){ case CL_MAXCL: // morph XYZ case CL_XYZ: size = (fmt->morphBatchSize*12 + 12)/16; break; case CL_XYZW: case CL_UV2: size = fmt->morphBatchSize; break; case CL_UV: size = (fmt->morphBatchSize*8 + 12)/16; break; case CL_RGBA: size = (fmt->morphBatchSize*4 + 12)/16; break; case CL_MAXCL+1: // morph normals case CL_NORMAL: size = (fmt->morphBatchSize*3 + 15)/16; break; default: break; } }else{ rec->morphNumVerts = 0; rec->morphDataoffset = 0; rec->reverse = 0; rec->morphSkip = 0; } }else if(pvtData->clinfo[i].attrib & CL_ATTRIB_PLACEHOLDER){ rec->vuoffset = vuoffset++; rec->morphNumVerts = 0; rec->morphDataoffset = 0; rec->reverse = 0; rec->morphSkip = 0; } } if(fmt == &pvtData->list){ fmt = &pvtData->strip; goto format; } return TRUE; } RwBool PS2AllMatProcessInitData(RxPipelineNode *self) { ps2matInitData *init; rxNodePS2AllMatPvtData *pvtData; RxClusterDefinition *cl; RwInt32 i, numClusters; RwChar *s; init = (ps2matInitData*)RxPipelineNodeGetInitData(self); pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(init == NULL){ pvtData->sizeOnVU = 4; pvtData->strip.maxInputSize = 0x100; pvtData->list.maxInputSize = 0x40; pvtData->pipeType = rpMESHHEADERTRISTRIP|rpMESHHEADERTRIFAN; return FALSE; } pvtData->sizeOnVU = init->stride; if(init->stride == 0) return FALSE; pvtData->strip.maxInputSize = init->stripVifOffset; pvtData->list.maxInputSize = init->listVifOffset; pvtData->pipeType = init->pipeType ? init->pipeType : rpMESHHEADERTRISTRIP|rpMESHHEADERTRIFAN; pvtData->numStripes = 0; numClusters = 0; for(i = 0; i < CL_MAXCL; i++){ cl = init->clusters[i]; if(cl == nil){ pvtData->clinfo[i].attrib = 0; continue; } numClusters++; if(cl->defaultAttributes & CL_ATTRIB_REQUIRED){ pvtData->clinfo[i].attrib = cl->defaultAttributes; if(cl->defaultAttributes & CL_ATTRIB_PLACEHOLDER){ printerror("PS2AllMatProcessInitData; cluster %s has both " "CL_ATTRIB_REQUIRED and CL_ATTRIB_PLACEHOLDER " "set in its attributes - illegal!", cl->name); return FALSE; } switch(cl->defaultAttributes & 0xFF000000)){ case CL_V4_32: default: pvtData->clinfo[i].stride = 4; break; case CL_V3_32: pvtData->clinfo[i].stride = 3; break; case CL_V2_32: case CL_V4_16: pvtData->clinfo[i].stride = 2; break; case CL_V2_16; case CL_V4_8: case CL_S32: pvtData->clinfo[i].stride = 1; break; } if(cl->defaultAttributes & CL_ATTRIB_READWRITE){ // broken out if(cl->defaultAttributes & CL_ATTRIB_OPAQUE){ printerror("PS2AllMatProcessInitData; cluster %s has both " "(CL_ATTRIB_READ or CL_ATTRIB_WRITE) and " "CL_ATTRIB_OPAQUE set, these are mutually incompatible", cl->name); return FALSE; } pvtData->cliIndex[pvtData->numStripes++] = 0; }else{ // not broken out if(i >= CL_USER1){ printerror("PS2AllMatProcessInitData; user clusters must have " "attributes set to CL_ATTRIB_PLACEHOLDER or " "CL_ATTRIB_WRITE or they will have no effect!"); return FALSE; } if(cl->defaultAttributes & CL_ATTRIB_DONT_FILL){ printerror("PS2AllMatProcessInitData; non-user clusters with " "CL_ATTRIB_DONTFILL attributes must also have either " "CL_ATTRIB_PLACEHOLDER or CL_ATTRIB_WRITE attributes, " "or uninitialised junk will be uploaded to VU1"); return FALSE; } } }else if(cl->defaultAttributes & CL_ATTRIB_PLACEHOLDER){ pvtData->clinfo[i].attrib = cl->defaultAttributes; switch(cl->defaultAttributes & 0xFF000000)){ case CL_V4_32: default: pvtData->clinfo[i].stride = 4; break; case CL_V3_32: pvtData->clinfo[i].stride = 3; break; case CL_V2_32: case CL_V4_16: pvtData->clinfo[i].stride = 2; break; case CL_V2_16; case CL_V4_8: case CL_S32: pvtData->clinfo[i].stride = 1; break; } }else{ switch(i){ case CL_XYZ: s = "XYZ"; break; case CL_XYZW: s = "XYZW"; break; case CL_UV: s = "UV"; break; case CL_UV2: s = "UV2"; break; case CL_RGBA: s = "RGBA"; break; case CL_NORMAL: s = "NORMAL"; break; case CL_USER1: s = "USER1"; break; case CL_USER2: s = "USER2"; break; case CL_USER3: s = "USER3"; break; case CL_USER4: s = "USER4"; break; default: s = "!UNKNOWN!"; } printerror("PS2AllMatProcessInitData; %s " "cluster has unrecognised attributes", s); return FALSE; } } return numClusters ? _rwPS2AllRabinsConstructionTimeCode(pvtData) : FALSE; } RwBool PS2AllMatNodeBody(RxPipelineNode *self, const RxPipelineNodeParam *params) { return FALSE; } RwBool PS2AllMatPipelineNodeInit(RxPipelineNode *self) { rxNodePS2AllMatPvtData *pvtData; if(PS2AllMatProcessInitData(self)){ pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(pvtData){ pvtData->vu1CodeArray = skyVU1NullTransforms; pvtData->magicValue = 0x34F9; pvtData->codeArrayLength = 0x20; pvtData->vifOffset = 0x114; pvtData->meshInstanceTestCB = NULL; pvtData->resEntryAllocCB = NULL; pvtData->instanceCB = NULL; pvtData->bridgeCB = NULL; pvtData->postMeshCB = NULL; return TRUE; } } return NULL; } RwChar *_PS2AllMat_csl = "PS2All.csl"; // _PS2AllMat_csl.42 RxNodeDefinition nodePS2AllMatCSL = { // nodePS2AllMatCSL.43 _PS2AllMat_csl, { PS2AllMatNodeBody, NULL, NULL, PS2AllMatPipelineNodeInit, NULL, NULL, NULL }, { 0, NULL, NULL, 0, NULL }, sizeof(rxNodePS2AllMatPvtData), rxNODEDEFCONST, 0 }; RxNodeDefinition* RxNodeDefinitionGetPS2AllMat(void) { return &nodePS2AllMatCSL; } RxPipelineNode* RxPipelineNodePS2AllMatGenerateCluster(RxPipelineNode *self, RxClusterDefinition *cluster2generate, RwUInt32 type) { ps2matInitData *init; if(self && cluster2generate && type < CL_MAXCL){ switch(type){ case CL_XYZ: if((cluster2generate->defaultAttributes & CL_V3_32) == 0 || strcmp(cluster2generate->attributeSet, "PS2") != 0) return NULL; break; case CL_XYZW: if((cluster2generate->defaultAttributes & CL_V4_32) == 0 || strcmp(cluster2generate->attributeSet, "PS2") != 0) return NULL; break; case CL_UV: if((cluster2generate->defaultAttributes & CL_V2_32) == 0 || strcmp(cluster2generate->attributeSet, "PS2") != 0) return NULL; break; case CL_UV2: if((cluster2generate->defaultAttributes & CL_V4_32) == 0 || strcmp(cluster2generate->attributeSet, "PS2") != 0) return NULL; break; case CL_RGBA: if((cluster2generate->defaultAttributes & CL_V4_8) == 0 || strcmp(cluster2generate->attributeSet, "PS2") != 0) return NULL; break; case CL_NORMAL: if((cluster2generate->defaultAttributes & CL_V4_8) == 0 || strcmp(cluster2generate->attributeSet, "PS2") != 0) return NULL; break; default: break; } init = getInitData(self); if(init){ init->clusters[type] = cluster2generate; return self; } } return NULL; } RxPipelineNode* RxPipelineNodePS2AllMatSetTriangleVUBufferSizes(RxPipelineNode *self, RwInt32 strideOfInputVertex, RwInt32 vuTSVertexMaxCount, RwInt32 vuTLTriMaxCount) { ps2matInitData *init; if(self && strideOfInputVertex > 0 && vuPLVertexMaxCount > 0){ init = getInitData(self); if(init == NULL) return NULL; if(init->pipeType & (rpMESHHEADERLINELIST|rpMESHHEADERPOLYLINE| rpMESHHEADERPOINTLIST)) return NULL; init->pipeType = rpMESHHEADERTRISTRIP|rpMESHHEADERTRIFAN; init->stride = strideOfInputVertex; init->stripVifOffset = strideOfInputVertex*vuTSVertexMaxCount; init->listVifOffset = strideOfInputVertex*3*vuTLTriMaxCount; return self; } return NULL; } RxPipelineNode* RxPipelineNodePS2AllMatSetLineVUBufferSizes(RxPipelineNode *self, RwInt32 strideOfInputVertex, RwInt32 vuLSVertexMaxCount, RwInt32 vuLLLineMaxCount) { ps2matInitData *init; if(self && strideOfInputVertex > 0 && vuPLVertexMaxCount > 0){ init = getInitData(self); if(init == NULL) return NULL; if(init->pipeType & (rpMESHHEADERTRISTRIP|rpMESHHEADERTRIFAN| rpMESHHEADERPOINTLIST)) return NULL; init->pipeType = rpMESHHEADERLINELIST|rpMESHHEADERPOLYLINE; init->stride = strideOfInputVertex; init->stripVifOffset = strideOfInputVertex*vuLSVertexMaxCount; init->listVifOffset = strideOfInputVertex*2*vuLLLineMaxCount; return self; } return NULL; } RxPipelineNode* RxPipelineNodePS2AllMatSetPointListVUBufferSize(RxPipelineNode *self, RwInt32 strideOfInputVertex, RwInt32 vuPLVertexMaxCount) { ps2matInitData *init; if(self && strideOfInputVertex > 0 && vuPLVertexMaxCount > 0){ init = getInitData(self); if(init == NULL) return NULL; if(init->pipeType & (rpMESHHEADERTRISTRIP|rpMESHHEADERTRIFAN| rpMESHHEADERLINELIST|rpMESHHEADERPOLYLINE)) return NULL; init->pipeType = rpMESHHEADERPOINTLIST; init->stride = strideOfInputVertex; init->listVifOffset = strideOfInputVertex*vuPLVertexMaxCount; return self; } return NULL; } RwInt32 RxPipelineNodePS2AllMatGetVUBatchSize(RxPipelineNode *self, RpMeshHeaderFlags flags) { rxNodePS2AllMatPvtData *pvtData; pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(flags & (rpMESHHEADERTRISTRIP|rpMESHHEADERPOLYLINE)) return pvtData->strip.batchSize; else return pvtData->list.batchSize; } RxPipelineNode* RxPipelineNodePS2AllMatSetVIFOffset(RxPipelineNode *self, int vifOffset) { rxNodePS2AllMatPvtData *pvtData; if(self){ pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(pvtData){ pvtData->vifOffset = vifOffset; return self; } } return NULL; } RxPipelineNode* RxPipelineNodePS2AllMatSetVU1CodeArray(RxPipelineNode *self, void **vu1CodeArray, RwUInt32 length) { rxNodePS2AllMatPvtData *pvtData; if(self){ pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(pvtData){ if(vu1CodeArray == NULL){ pvtData->codeArrayLength = 0x20; pvtData->vu1CodeArray = skyVU1NullTransforms; }else{ pvtData->codeArrayLength = length; pvtData->vu1CodeArray = vu1CodeArray; } return self; } } return NULL; } const void** RxPipelineNodePS2AllMatGetVU1CodeArray(RxPipelineNode *self, RwUInt32 *length) { rxNodePS2AllMatPvtData *pvtData; if(self){ pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(pvtData){ *length = pvtData->codeArrayLength; return pvtData->vu1CodeArray; } } return NULL; } RxPipelineNode* RxPipelineNodePS2AllMatSetCallBack(RxPipelineNode *self, RxPipelineNodePS2AllMatCallBackType type, void *func) { rxNodePS2AllMatPvtData *pvtData; if(self){ pvtData = (rxNodePS2AllMatPvtData*)self->privateData; if(pvtData){ switch(type){ case rxPS2ALLMATCALLBACKMESHINSTANCETEST: pvtData->meshInstanceTestCB = (RxPipelineNodePS2AllMatMeshInstanceTestCallBack)func; break; case rxPS2ALLMATCALLBACKRESENTRYALLOC: if(func == NULL) return NULL; pvtData->resEntryAllocCB = (RxPipelineNodePS2AllMatResEntryAllocCallBack)func; break; case rxPS2ALLMATCALLBACKINSTANCE: if(func == NULL) return NULL; pvtData->instanceCB = (RxPipelineNodePS2AllMatInstanceCallBack)func; break; case rxPS2ALLMATCALLBACKBRIDGE: if(func == NULL) return NULL; pvtData->bridgeCB = (RxPipelineNodePS2AllMatBridgeCallBack)func; break; case rxPS2ALLMATCALLBACKPOSTMESH: pvtData->postMeshCB = (RxPipelineNodePS2AllMatPostMeshCallBack)func; break; default: return NULL; } return self; } } return NULL; }