/* Checks whether the current mesh is the first mesh of the current object */ #define RPMESHPS2ALLISFIRSTMESH(_p2apd) \ ((_p2apd)->mesh == (RpMesh *)((_p2apd)->meshHeader->firstMeshOffset + \ (RwUInt8 *)((_p2apd)->meshHeader + 1)) ) /* Used as RpMeshPS2AllAsyncTextureUpload (function in debug) */ #define RpMeshPS2AllAsyncTextureUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ \ if (NULL != _p2apd->texture) \ { \ PS2ALLMACROASSERT(NULL != _p2apd->texture->raster); \ RpSkyTexCacheAccessSpeculate(_p2apd->texture->raster); \ } \ } \ MACRO_STOP #if (!defined(__MWERKS__)) #define _rxPS2AllTexFilterASM(_filtering) \ MACRO_START \ { \ /* We try to force CW to work by using a temp var */ \ long tmp = 0; \ \ /* *INDENT-OFF* */ \ asm __volatile__ ( \ ".set noreorder ; \ .set noat ; \ \ ori $at, $0, 0x6 ; \ beql $at, %2, 1f ; \ ori %0, %1, 0x160 ; \ \ ori $at, $0, 0x5 ; \ beql $at, %2, 1f ; \ ori %0, %1, 0xC0 ; \ \ ori $at, $0, 0x4 ; \ beql $at, %2, 1f ; \ ori %0, %1, 0x120 ; \ \ ori $at, $0, 0x3 ; \ beql $at, %2, 1f ; \ ori %0, %1, 0x80 ; \ \ ori $at, $0, 0x2 ; \ beql $at, %2, 1f ; \ ori %0, %1, 0x60 ; \ \ ori %0,%1,0x0 ; \ \ 1: nop ; \ \ .set reorder ; \ .set at \ " : "=r" (tmp) \ : "r" (skyTex1_1 & ~0x1E0L), \ "r" (_filtering)); \ \ skyTex1_1 = tmp; \ } \ MACRO_STOP #else /* (!defined(__MWERKS__)) */ #define _rxPS2AllTexFilterASM _rxPS2AllTexFilterASMFunc #endif /* (!defined(__MWERKS__)) */ /* Used as (function in debug) */ #define RpMeshPS2AllSyncTextureUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ RwTexture *_nwtx; \ RwRaster *_nwrs = (RwRaster *)NULL; \ \ /* Get appropriate setup done even if (NULL == texture) */ \ _nwtx = _p2apd->texture; \ if (NULL != _nwtx) _nwrs = _nwtx->raster; \ \ skyAlphaTex = FALSE; \ if (NULL != _nwrs) \ { \ const RwUInt32 cFormat = _nwrs->cFormat & (rwRASTERFORMATPIXELFORMATMASK >> 8); \ \ /* If an alpha format texture - enable alpha blending */ \ skyAlphaTex |= ((rwRASTERFORMAT1555 >> 8) == cFormat) | \ ((rwRASTERFORMAT8888 >> 8) == cFormat); \ } \ \ if (_nwrs != skyTextureRaster) \ { \ /* Raster to texture with */ \ skyTextureRaster = _nwrs; \ \ if (NULL != skyTextureRaster) \ { \ RwBool success; \ \ /* Enable texturing */ \ skyPrim_State |= 0x10; \ \ /* Do what it takes to get the raster selected */ \ success = RpSkyTexCacheAccessRaster(skyTextureRaster, FALSE); \ PS2ALLMACROASSERT(success/*Texture upload failed*/); \ } \ else \ { \ skyPrim_State &= ~0x10L; \ } \ \ if (NULL != _nwtx) \ { \ /* Old CodeWarrior version from before inline asm worked */ \ /* Switch statements hurt */ \ /* if (_p2apd->texture->filtering == rwFILTERLINEARMIPNEAREST) */ \ /* skyTex1_1 = (skyTex1_1 & ~0x1e0l) | 0xc0; */ \ /* else if (_p2apd->texture->filtering == rwFILTERLINEAR) */ \ /* skyTex1_1 = (skyTex1_1 & ~0x1e0l) | 0x60; */ \ /* else if (_p2apd->texture->filtering == rwFILTERLINEARMIPLINEAR) */ \ /* skyTex1_1 = (skyTex1_1 & ~0x1e0l) | 0x160; */ \ /* else if (_p2apd->texture->filtering == rwFILTERMIPNEAREST) */ \ /* skyTex1_1 = (skyTex1_1 & ~0x1e0l) | 0x80; */ \ /* else if (_p2apd->texture->filtering == rwFILTERMIPLINEAR) */ \ /* skyTex1_1 = (skyTex1_1 & ~0x1e0l) | 0x120; */ \ /* else / * must be rwFILTERNEAREST * / */ \ /* skyTex1_1 = skyTex1_1 & ~0x1e0l; */ \ \ /* A more efficient texture command production than a switch statement, */ \ /* which uses a jump table pulled into the d-cache from the text segment. */ \ /* ASSUMES THESE ENUM VALUES AREN'T GOING ANYWHERE :-) */ \ _rxPS2AllTexFilterASM(RwTextureGetFilterMode(_nwtx)); \ \ /* Clamp, wrap, mirror or border. We now have two addressing modes, */ \ /* one for U and one for V directions. If the app has never set the */ \ /* V direction, then default both U and V to the setting from the */ \ /* U direction, which will have been set. */ \ skyClamp_1 = 0; /* default to repeat in U and V */ \ if (RwTextureGetAddressingU(_nwtx) == rwTEXTUREADDRESSCLAMP) skyClamp_1 |= 1; \ if (RwTextureGetAddressingV(_nwtx) == rwTEXTUREADDRESSCLAMP) skyClamp_1 |= 4; \ } \ } \ \ /* Finally iff skyAlphaTex we turn on Alpha test */ \ if (skyAlphaTex) \ { \ skyTest_1 |= 1; \ } \ else \ { \ skyTest_1 &= ~1L; \ } \ } \ MACRO_STOP /* Used as RpMeshPS2AllStartVIFUploads (function in debug) */ #define RpMeshPS2AllStartVIFUploadsMacro(_initialQW, _extraQW) \ MACRO_START \ { \ RwUInt32 _itQW = (_initialQW); \ RwUInt32 _xaQW = (_extraQW); \ \ /* Open a VIF packet, with TTE set. */ \ /* The total number of QW needed for the packet is one for a DMA tag, if (initialQW != 0), */ \ /* plus extraQW, plus one for the geometry transfer (i.e. RpMeshPS2AllEndVIFUploads) */ \ _rwDMAOpenVIFPkt(0, (1 + _itQW) + _xaQW + 1); \ \ /* Make a standard DMA tag, no embedded VIFTag, no chain, no ref, initialQW QWs */ \ if (0 != _itQW) \ { \ RwUInt64 tmp; \ u_long128 ltmp = 0; \ \ tmp = (1L << 28) | (_itQW); \ MAKE128(ltmp, 0L, tmp); \ RWDMA_ADD_TO_PKT(ltmp); \ } \ } \ MACRO_STOP /* Used as RpMeshPS2AllGIFTagUpload (function in debug) */ #define rpMESHPS2ALLGIFTAGNUMINITIALQW 2 #define RpMeshPS2AllGIFTagUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ u_long128 __ltmp = 0; \ RwUInt64 __tmp1; \ RwUInt32 __prmTmp; \ \ /* A VIFTag to transfer a GIFTag to VU1 memory */ \ __tmp1 = (((0x6CL << 24) | /* VIF unpack 4-32 */ \ (0x01L << 16) | /* Transfer 1 QW */ \ /* vuSDgifTag = 0x03FA, Destination address in VU1 memory (in QWs) */ \ (vuSDgifTag) ) << 32) | \ ((1L << 24) | (4 << 8) | (4)); /* How to unpack, length 4W, stride 4W */ \ MAKE128(__ltmp, __tmp1, 0L); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ /* Calc these J.I.T. (if we don't upload a GIFTag, it's assumed we don't want to set this state */ \ /* but if we DO upload a GIFTag, do the state updates here so there's no chance to forget 'em) */ \ skyVertexAlpha = (_p2apd->matCol.alpha != 255); \ if (skyVertexAlpha | skyAlphaTex) \ { \ skyPrim_State |= 0x40L; \ } \ else \ { \ skyPrim_State &= ~0x40L; \ } \ \ __prmTmp = skyPrim_State | (_ps2AllPipeData)->primType; \ /* Mask out old primitive type in the GIFTag */ \ __tmp1 = *((RwUInt64*)&gifTag128) & ~((RwUInt64)0x7FF << 47 ); \ /* Now set the new primitive type (restoring primitive state masked out above) */ \ __tmp1 |= (RwUInt64)(__prmTmp & 0x7FF) << 47 ; \ MAKE128(__ltmp, ((RwUInt64*)&gifTag128)[1], __tmp1); \ gifTag128 = __ltmp; \ /* GIF tag for 1 primitive using packed mode */ \ RWDMA_ADD_TO_PKT(gifTag128); \ } \ MACRO_STOP #if (!defined(__MWERKS__)) #define _rxPS2AllMatColASM(_matCol, _colScale) \ MACRO_START \ { \ u_long128 __ltmp = 0; \ float __floattmp1 = 0.0f; \ float __floattmp2 = 0.0f; \ long __longtmp = 0; \ \ /* *INDENT-OFF* */ \ asm __volatile__ ( \ ".set noreorder ; \ mul.s %1, %7, %9 ; \ mul.s %2, %5, %8 ; \ \ mfc1 %0, %1 ; \ mfc1 %3, %2 ; \ \ pexew %0, %0 ; \ pexew %3, %3 ; \ \ mul.s %1, %6, %8 ; \ mul.s %2, %4, %8 ; \ \ mfc1 %0, %1 ; \ mfc1 %3, %2 ; \ \ ppacw %0, %0, %3 ; \ .set reorder \ \ " : "=r" (__ltmp), \ "=f&" (__floattmp1), \ "=f&" (__floattmp2), \ "=r" (__longtmp): \ "f" ((RwReal)((_matCol)->red)), \ "f" ((RwReal)((_matCol)->green)), \ "f" ((RwReal)((_matCol)->blue)), \ "f" ((RwReal)((_matCol)->alpha)), \ "f" (_colScale), \ "f" (128.1f/(255.0f*255.0f)) ); \ RWDMA_ADD_TO_PKT(__ltmp); \ } \ MACRO_STOP #else /* (!defined(__MWERKS__)) */ #define _rxPS2AllMatColASM _rxPS2AllMatColASMFunc #endif /* (!defined(__MWERKS__)) */ /* Used as RpMeshPS2AllMatColUpload (function in debug) */ #define rpMESHPS2ALLMATCOLNUMINITIALQW 2 #define RpMeshPS2AllMatColUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ \ u_long128 __ltmp = 0; \ RwUInt64 __tmp1; \ float __colScale; \ \ /* A VIFTag to transfer colScale to VU1 memory */ \ __tmp1 = (((0x6CL << 24) | /* VIF unpack 4-32 */ \ (0x01L << 16) | /* Transfer 1 QW */ \ /* vuSDcolScale = 0x03FB, Destination address in VU1 memory (in QWs) */ \ (vuSDcolScale) ) << 32) | \ ((1L << 24) | (4 << 8) | (4)); /* How to unpack, length 4W, stride 4W */ \ MAKE128(__ltmp, __tmp1, 0L); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ /* Old metrowerks version (before inline asm worked) */ \ /* if (!(skyPrim_State & 0x10)) */ \ /* { */ \ /* colScale = 255.0f; */ \ /* } */ \ /* ((RwReal *) & ltmp)[0] = (colScale / (255.0f*255.0f)) * */ \ /* (RwReal)_p2apd->matCol.red; */ \ /* ((RwReal *) & ltmp)[1] = (colScale / (255.0f*255.0f)) * */ \ /* (RwReal)_p2apd->matCol.green; */ \ /* ((RwReal *) & ltmp)[2] = (colScale / (255.0f*255.0f)) * */ \ /* (RwReal)_p2apd->matCol.blue; */ \ /* ((RwReal *) & ltmp)[3] = (alphaScale / (255.0f*255.0f)) * */ \ /* (RwReal)_p2apd->matCol.alpha; */ \ \ if (skyPrim_State & 0x10) \ { \ /* [We have a sneaking suspicion that 128.0f should be 128.1f] */ \ __colScale = 128.1f/(255.0f*255.0f); \ } \ else \ { \ __colScale = 1.0f/255.0f; \ } \ _rxPS2AllMatColASM(&(_p2apd->matCol), __colScale); \ } \ MACRO_STOP #if (!defined(__MWERKS__)) #define _rxPS2AllSurfPropsASM(_surfProps, _extra) \ MACRO_START \ { \ u_long128 __ltmp = 0; \ float __floattmp1 = 0.0f; \ float __floattmp2 = 0.0f; \ long __longtmp = 0; \ \ /* *INDENT-OFF* */ \ asm __volatile__ ( \ ".set noreorder ; \ mul.s %2, %5, %7 ; \ \ mfc1 %0, %8 ; \ mfc1 %3, %2 ; \ \ pexew %0, %0 ; \ pexew %3, %3 ; \ \ mul.s %1, %6, %7 ; \ mul.s %2, %4, %7 ; \ \ mfc1 %0, %1 ; \ mfc1 %3, %2 ; \ \ ppacw %0, %0, %3 ; \ \ .set reorder \ " : "=r" (__ltmp), \ "=f&" (__floattmp1), \ "=f&" (__floattmp2), \ "=r" (__longtmp): \ "f" ((RwReal)((_surfProps)->ambient)), \ "f" ((RwReal)((_surfProps)->specular)), \ "f" ((RwReal)((_surfProps)->diffuse)), \ "f" (255.00001f), \ /* Extra value - e.g 'scale' for the FASTMORPH plugin */ \ "f" (_extra) ); \ \ RWDMA_ADD_TO_PKT(__ltmp); \ } \ MACRO_STOP #else /* (!defined(__MWERKS__)) */ #define _rxPS2AllSurfPropsASM _rxPS2AllSurfPropsASMFunc #endif /* (!defined(__MWERKS__)) */ /* Used as RpMeshPS2AllSurfPropsUpload (function in debug) */ #define rpMESHPS2ALLSURFPROPSNUMINITIALQW 2 #define RpMeshPS2AllSurfPropsUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ \ u_long128 __ltmp = 0; \ RwUInt64 __tmp1; \ \ /* A VIFTag to transfer (surfProps+extra) to VU1 memory */ \ __tmp1 = (((0x6CL << 24) | /* VIF unpack 4-32 */ \ (0x01L << 16) | /* Transfer 1 QW */ \ /* vuSDsurfProps = 0x03FC, Destination address in VU1 memory (in QWs) */ \ (vuSDsurfProps) ) << 32) | \ ((1L << 24) | (4 << 8) | (4)); /* How to unpack, length 4W, stride 4W */ \ MAKE128(__ltmp, __tmp1, 0L); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ /* Old metrowerks version (before inline asm worked) */ \ /* ((RwReal *) & ltmp)[0] = 255.00001f * (RwReal) ps2AllPipeData->surfProps->ambient; */ \ /* ((RwReal *) & ltmp)[1] = 255.00001f * (RwReal) ps2AllPipeData->surfProps->specular; */ \ /* ((RwReal *) & ltmp)[2] = 255.00001f * (RwReal) ps2AllPipeData->surfProps->diffuse; */ \ /* ((RwReal *) & ltmp)[3] = 255.00001f * (RwReal) ps2AllPipeData->spExtra; */ \ \ /* Surface prop */ \ _rxPS2AllSurfPropsASM(_p2apd->surfProps, _p2apd->spExtra); \ } \ MACRO_STOP /* Can't nest # directives in macros, so have to have predicate some * separated sub-sections for RpMeshPS2AllClipInfoUploadMacro */ #if (defined(VUCONTINUE)) /*TODO[6]: THE DIFFERENCE BETWEEN VUCONTINUE AND non-VUCONTINUE VERSIONS NEEDS DOCUMENTING */ #define RpPS2AllClipTmp1Setup(_tempone) \ (_tempone = (1L << 32) | skyUserSwitch1) #else /* (defined(VUCONTINUE)) */ #define RpPS2AllClipTmp1Setup(_tempone) \ (_tempone = (((RwUInt64)skyUserSwitch2) << 32) | skyUserSwitch1) #endif /* (defined(VUCONTINUE)) */ /* Used as RpMeshPS2AllClipInfoUpload (function in debug) */ #define rpMESHPS2ALLCLIPINFONUMINITIALQW 4 #define RpMeshPS2AllClipInfoUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ \ u_long128 __ltmp = 0; \ RwUInt64 __tmp1; \ RwUInt32 __skySwitchFlag; \ \ __skySwitchFlag = _p2apd->transType & (rxSKYTRANSTYPEFOG|rxSKYTRANSTYPECLIP|rxSKYTRANSTYPELIST); \ \ /* A VIFTag to transfer the Clip Vectors and SwitchQW to VU1 memory */ \ __tmp1 = (((0x6CL << 24) | /* VIF unpack 4-32 */ \ (0x03L << 16) | /* Transfer 3 QWs */ \ /* vuSDClipvec1 = 0x03FD, Destination address in VU1 memory (in QWs) */ \ /* [vuSDClipvec2 and vuSDVUSwitch follow directly] */ \ (vuSDClipvec1) ) << 32) | \ ((1L << 24) | (4 << 8) | (4)); /* How to unpack, length 4W, stride 4W */ \ MAKE128(__ltmp, __tmp1, 0L); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ /* Upload camera clipping info */ \ if((skyTSClipperMode && (!(__skySwitchFlag & rxSKYTRANSTYPELIST))) || \ (skyTLClipperMode && (__skySwitchFlag & rxSKYTRANSTYPELIST) ) ) \ { \ /* True clipping. Use small frustum */ \ __skySwitchFlag |= 8; \ RWDMA_ADD_TO_PKT(skyCClipVect1); \ RWDMA_ADD_TO_PKT(skyCClipVect2); \ } \ else \ { \ /* Fast-culling. Use large frustum */ \ RWDMA_ADD_TO_PKT(skyClipVect1); \ RWDMA_ADD_TO_PKT(skyClipVect2); \ } \ \ /* Back/front-face culling flag */ \ skyUserSwitch1 = 0; \ if (gSkyCullState == rwCULLMODECULLFRONT) \ { \ skyUserSwitch1 = 0x20; \ } \ \ /* Combine skyUserSwitch1 and skyUserSwitch2 (skyUserSwitch2 ignored */ \ /* if VUCONTINUE is defined) and upload with __skySwitchFlag */ \ RpPS2AllClipTmp1Setup(__tmp1); \ MAKE128(__ltmp, __tmp1, __skySwitchFlag); \ RWDMA_ADD_TO_PKT(__ltmp); \ } \ MACRO_STOP /* Can't nest # directives in macros, so have to have predicate some * separated sub-sections for RpMeshPS2AllTextureStateUploadMacro */ #if (defined(LESSEOPS)) #define RpPS2AllTexStateEOP (0L << 15) #else /* (defined(LESSEOPS)) */ #define RpPS2AllTexStateEOP (1L << 15) #endif /* (defined(LESSEOPS)) */ /* Used as (function in debug) */ #define rpMESHPS2ALLTEXTURESTATENUMINITIALQW 5 #define RpMeshPS2AllTextureStateUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ \ RwUInt64 __tmp, __tmp1; \ u_long128 __ltmp = 0; \ \ if (NULL != _p2apd->texture) \ { \ /* VIF tag sending 4 QWs thru VIF direct to the GS */ \ __tmp1 = ((0x50L << 24) | 4L) << 32; \ MAKE128(__ltmp, __tmp1, 0L); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ __tmp = /* NLOOP */ 3L | \ /* EOP */ RpPS2AllTexStateEOP | \ /* PRE */ (0L << 46) | \ /* FLG */ (0L << 58) | \ /* NREG */ (1L << 60); \ /* '(64 - 64)' is because we're writing to bit 64 of a 128-bit QW, but we're using */ \ /* a 64-bit variable to do so... helps keep track of which bit we're writing to. */ \ __tmp1 = /* A+D */ (0xEL << (64 - 64)); \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ __tmp = skyTest_1; \ __tmp1 = (GS_TEST_1 << (64 - 64)); \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ __tmp = skyTex1_1; \ __tmp1 = (GS_TEX1_1 << (64 - 64)); \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ __tmp = skyClamp_1; \ __tmp1 = (GS_CLAMP_1 << (64 - 64)); \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ } \ else \ { \ /* This is inlined from _rwSkySetRenderState(rwRENDERSTATETEXTURERASTER, NULL) */ \ /* Other bits are done in RpMeshPS2AllSyncTextureUploadMacro */ \ \ /* Enable / disable Test_1 which is determined in RpMeshPS2AllSyncTextureUploadMacro */ \ \ /* Need to transfer 5 QW in total, add 2 NOPs */ \ RWDMA_ADD_TO_PKT(__ltmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ /* VIF tag sending 2 QWs thru VIF direct to the GS */ \ __tmp1 = ((0x50L << 24) | 2L) << 32; \ MAKE128(__ltmp, __tmp1, 0L); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ __tmp = /* NLOOP */ 1L | \ /* EOP */ RpPS2AllTexStateEOP | \ /* PRE */ (0L << 46) | \ /* FLG */ (0L << 58) | \ /* NREG */ (1L << 60); \ __tmp1 = /* A+D */ (0xEL << (64 - 64)); \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ __tmp = skyTest_1; \ __tmp1 = (GS_TEST_1 << (64 - 64)); \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ } \ } \ MACRO_STOP /* Used as RpMeshPS2AllVU1CodeIndexSetup (function in debug) */ #define RpMeshPS2AllVU1CodeIndexSetupMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ \ _p2apd->vu1CodeIndex = ((rxSKYTRANSTYPELINE & _p2apd->transType) >> 2) | \ ((rxSKYTRANSTYPEISO & _p2apd->transType) >> 2) | \ ((rxSKYTRANSTYPECULL & _p2apd->transType) >> 5); \ PS2ALLMACROASSERT(_p2apd->vu1CodeIndex < \ ((rxNodePS2AllMatPvtData *)_p2apd->matPvtData)->codeArrayLength); \ } \ MACRO_STOP /* Can't nest # directives in macros, so have to have predicate some * separated sub-sections for RpMeshPS2AllVU1CodeUploadMacro */ #if (defined(VUCONTINUE)) #define RpMeshNewVU1CodeCont() \ MACRO_START \ { \ RwUInt64 __tmp, __tmp1; \ u_long128 __ltmp = 0; \ \ /* This DMA tag transfers no QWs, but TTE's enabled so it gets transferred */ \ /* (along with the embedded VIF tag below). */ \ /* NOTE: we do this as late as poss. so as much transfer (in parallel with VU1 */ \ /* code execution) as poss. before the flush, at the cost of this extra DMA tag */ \ __tmp = 1L << 28; \ /* This VIF tag does a flush (waits for VU code execution to complete) */ \ __tmp1 = 0x15L << 24; \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ } \ MACRO_STOP #else /* (defined(VUCONTINUE)) */ #define RpMeshNewVU1CodeCont() \ MACRO_START \ { /* No op */ } \ MACRO_STOP #endif /* (defined(VUCONTINUE)) */ /* Used as RpMeshPS2AllVU1CodeUpload (function in debug) */ #define rpMESHPS2ALLVU1CODEUPLOADNUMEXTRAQW 1 #define RpMeshPS2AllVU1CodeUploadMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ const void *_kohd; \ \ /* Assumes the VU1CodeArray is always set up (if nothing else, to */ \ /* skyVU1NullTransforms in PS2AllPipelineNodeInit) and contains no NULLs. */ \ PS2ALLMACROASSERT(NULL != _p2apd->matPvtData->vu1CodeArray); \ PS2ALLMACROASSERT(_p2apd->vu1CodeIndex < \ ((rxNodePS2AllMatPvtData *)_p2apd->matPvtData)->codeArrayLength); \ _kohd = _p2apd->matPvtData->vu1CodeArray[_p2apd->vu1CodeIndex]; \ PS2ALLMACROASSERT(NULL != _kohd); \ /* The DMA tag (optionally) created by RpMeshPS2AllStartVIFUploadsMacro finishes */ \ /* transferring just before us so we're free to either add a new DMA tag or not. */ \ if (skyUploadedCode != _kohd) \ { \ RwUInt64 tmp; \ u_long128 ltmp = 0; \ \ /* This DMA tag calls the code upload chunk (has its own DMA transfer tag) */ \ tmp = (5L << 28) | (RwUInt64) ((RwUInt32) _kohd) << 32; \ MAKE128(ltmp, 0L, tmp); \ RWDMA_ADD_TO_PKT(ltmp); \ skyUploadedCode = _kohd; \ \ /* Do VUCONTINUE stuff */ \ RpMeshNewVU1CodeCont(); \ } \ } \ MACRO_STOP /* Used as RpMeshPS2AllEndVIFUploads (function in debug) */ #define RpMeshPS2AllEndVIFUploadsMacro(_ps2AllPipeData) \ MACRO_START \ { \ RxPS2AllPipeData *_p2apd = (_ps2AllPipeData); \ rwPS2AllResEntryHeader *_p2rh; \ \ RwUInt64 __tmp, __tmp1; \ u_long128 __ltmp = 0; \ \ _p2rh = RWPS2ALLRESENTRYHEADERFROMRESENTRY(*(_p2apd->cacheEntryRef)); \ \ /* This DMA tag calls the geometry chain which (has its own DMA tags and) returns here when done */ \ __tmp = (((RwUInt64)(RwUInt32)(_p2rh->data)) << 32) | (5L << 28); \ /* This sets up the VIF offset (i.e which batch the geom chain will transfer into) */ \ __tmp1 = (3L << 24) | 0 | \ (((2L << 24) | _p2apd->matPvtData->vifOffset) << 32); /* Reset vifOffset */ \ MAKE128(__ltmp, __tmp1, __tmp); \ RWDMA_ADD_TO_PKT(__ltmp); \ \ /* Reference count stuff (used when objects are destroyed) */ \ /* clrCnt improves efficiency by allowing many reference */ \ /* increments in one packet to be performed in one go */ \ RWDMA_CRITICAL_SECTION_BEGIN(); \ _p2rh->refCnt += 1; \ RWDMA_CRITICAL_SECTION_END(); \ _p2rh->clrCnt += 1; \ if (_p2rh->clrCnt == 1) \ { \ _rwDMAAddPURef(&(_p2rh->refCnt)); \ } \ } \ MACRO_STOP #if (defined(__cplusplus)) extern "C" { #endif /* (defined(__cplusplus)) */ /* Callback components, for use in the BridgeCB */ extern void RpMeshPS2AllAsyncTextureUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllSyncTextureUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllStartVIFUploadsFunc( RwUInt32 numInitialQW, RwUInt32 numExtraQW); extern void RpMeshPS2AllGIFTagUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllMatColUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllSurfPropsUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllClipInfoUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllTextureStateUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllVU1CodeIndexSetupFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllVU1CodeUploadFunc( RxPS2AllPipeData *ps2AllPipeData); extern void RpMeshPS2AllEndVIFUploadsFunc( RxPS2AllPipeData *ps2AllPipeData); /* These help prop up CodeWarrior */ extern void _rxPS2AllTexFilterASMFunc(RwTextureFilterMode filtering); extern void _rxPS2AllMatColASMFunc(RwRGBA *matCol, float colScale); extern void _rxPS2AllSurfPropsASMFunc(RwSurfaceProperties *surfProps, RwReal extra); #if (defined(__cplusplus)) } #endif /* (defined(__cplusplus)) */ #if (defined(RWDEBUG)) #define RpMeshPS2AllAsyncTextureUpload RpMeshPS2AllAsyncTextureUploadFunc #define RpMeshPS2AllSyncTextureUpload RpMeshPS2AllSyncTextureUploadFunc #define RpMeshPS2AllStartVIFUploads RpMeshPS2AllStartVIFUploadsFunc #define RpMeshPS2AllGIFTagUpload RpMeshPS2AllGIFTagUploadFunc #define RpMeshPS2AllMatColUpload RpMeshPS2AllMatColUploadFunc #define RpMeshPS2AllSurfPropsUpload RpMeshPS2AllSurfPropsUploadFunc #define RpMeshPS2AllClipInfoUpload RpMeshPS2AllClipInfoUploadFunc #define RpMeshPS2AllTextureStateUpload RpMeshPS2AllTextureStateUploadFunc #define RpMeshPS2AllVU1CodeIndexSetup RpMeshPS2AllVU1CodeIndexSetupFunc #define RpMeshPS2AllVU1CodeUpload RpMeshPS2AllVU1CodeUploadFunc #define RpMeshPS2AllEndVIFUploads RpMeshPS2AllEndVIFUploadsFunc #else /* (defined(RWDEBUG)) */ #define RpMeshPS2AllAsyncTextureUpload RpMeshPS2AllAsyncTextureUploadMacro #define RpMeshPS2AllSyncTextureUpload RpMeshPS2AllSyncTextureUploadMacro #define RpMeshPS2AllStartVIFUploads RpMeshPS2AllStartVIFUploadsMacro #define RpMeshPS2AllGIFTagUpload RpMeshPS2AllGIFTagUploadMacro #define RpMeshPS2AllMatColUpload RpMeshPS2AllMatColUploadMacro #define RpMeshPS2AllSurfPropsUpload RpMeshPS2AllSurfPropsUploadMacro #define RpMeshPS2AllClipInfoUpload RpMeshPS2AllClipInfoUploadMacro #define RpMeshPS2AllTextureStateUpload RpMeshPS2AllTextureStateUploadMacro #define RpMeshPS2AllVU1CodeIndexSetup RpMeshPS2AllVU1CodeIndexSetupMacro #define RpMeshPS2AllVU1CodeUpload RpMeshPS2AllVU1CodeUploadMacro #define RpMeshPS2AllEndVIFUploads RpMeshPS2AllEndVIFUploadsMacro #endif /* (defined(RWDEBUG)) */