sm64

A Super Mario 64 decompilation
Log | Files | Refs | README | LICENSE

memory.c (18059B)


      1 #include <PR/ultratypes.h>
      2 
      3 #include "sm64.h"
      4 
      5 #define INCLUDED_FROM_MEMORY_C
      6 
      7 #include "buffers/zbuffer.h"
      8 #include "buffers/buffers.h"
      9 #include "decompress.h"
     10 #include "game_init.h"
     11 #include "main.h"
     12 #include "memory.h"
     13 #include "segments.h"
     14 #include "segment_symbols.h"
     15 
     16 // round up to the next multiple
     17 #define ALIGN4(val) (((val) + 0x3) & ~0x3)
     18 #define ALIGN8(val) (((val) + 0x7) & ~0x7)
     19 #define ALIGN16(val) (((val) + 0xF) & ~0xF)
     20 
     21 struct MainPoolState {
     22     u32 freeSpace;
     23     struct MainPoolBlock *listHeadL;
     24     struct MainPoolBlock *listHeadR;
     25     struct MainPoolState *prev;
     26 };
     27 
     28 struct MainPoolBlock {
     29     struct MainPoolBlock *prev;
     30     struct MainPoolBlock *next;
     31 };
     32 
     33 struct MemoryBlock {
     34     struct MemoryBlock *next;
     35     u32 size;
     36 };
     37 
     38 struct MemoryPool {
     39     u32 totalSpace;
     40     struct MemoryBlock *firstBlock;
     41     struct MemoryBlock freeList;
     42 };
     43 
     44 // Double declared to preserve US bss ordering.
     45 extern uintptr_t sSegmentTable[32];
     46 extern u32 sPoolFreeSpace;
     47 extern u8 *sPoolStart;
     48 extern u8 *sPoolEnd;
     49 extern struct MainPoolBlock *sPoolListHeadL;
     50 extern struct MainPoolBlock *sPoolListHeadR;
     51 
     52 
     53 /**
     54  * Memory pool for small graphical effects that aren't connected to Objects.
     55  * Used for colored text, paintings, and environmental snow and bubbles.
     56  */
     57 struct MemoryPool *gEffectsMemoryPool;
     58 
     59 FORCE_BSS uintptr_t sSegmentTable[32];
     60 FORCE_BSS u32 sPoolFreeSpace;
     61 FORCE_BSS u8 *sPoolStart;
     62 FORCE_BSS u8 *sPoolEnd;
     63 FORCE_BSS struct MainPoolBlock *sPoolListHeadL;
     64 FORCE_BSS struct MainPoolBlock *sPoolListHeadR;
     65 
     66 static struct MainPoolState *gMainPoolState = NULL;
     67 
     68 uintptr_t set_segment_base_addr(s32 segment, void *addr) {
     69     sSegmentTable[segment] = (uintptr_t) addr & 0x1FFFFFFF;
     70     return sSegmentTable[segment];
     71 }
     72 
     73 void *get_segment_base_addr(s32 segment) {
     74     return (void *) (sSegmentTable[segment] | 0x80000000);
     75 }
     76 
     77 #ifndef NO_SEGMENTED_MEMORY
     78 void *segmented_to_virtual(const void *addr) {
     79     size_t segment = (uintptr_t) addr >> 24;
     80     size_t offset = (uintptr_t) addr & 0x00FFFFFF;
     81 
     82     return (void *) ((sSegmentTable[segment] + offset) | 0x80000000);
     83 }
     84 
     85 void *virtual_to_segmented(u32 segment, const void *addr) {
     86     size_t offset = ((uintptr_t) addr & 0x1FFFFFFF) - sSegmentTable[segment];
     87 
     88     return (void *) ((segment << 24) + offset);
     89 }
     90 
     91 void move_segment_table_to_dmem(void) {
     92     s32 i;
     93 
     94     for (i = 0; i < 16; i++) {
     95         gSPSegment(gDisplayListHead++, i, sSegmentTable[i]);
     96     }
     97 }
     98 #else
     99 void *segmented_to_virtual(const void *addr) {
    100     return (void *) addr;
    101 }
    102 
    103 void *virtual_to_segmented(u32 segment, const void *addr) {
    104     return (void *) addr;
    105 }
    106 
    107 void move_segment_table_to_dmem(void) {
    108 }
    109 #endif
    110 
    111 /**
    112  * Initialize the main memory pool. This pool is conceptually a pair of stacks
    113  * that grow inward from the left and right. It therefore only supports
    114  * freeing the object that was most recently allocated from a side.
    115  */
    116 void main_pool_init(UNUSED_CN void *start, void *end) {
    117 #if defined(VERSION_CN) && !defined(USE_EXT_RAM)
    118     sPoolStart = (u8 *) ALIGN16((uintptr_t) &gZBufferEnd) + 16;
    119 #else
    120     sPoolStart = (u8 *) ALIGN16((uintptr_t) start) + 16;
    121 #endif
    122     sPoolEnd = (u8 *) ALIGN16((uintptr_t) end - 15) - 16;
    123     sPoolFreeSpace = sPoolEnd - sPoolStart;
    124 
    125     sPoolListHeadL = (struct MainPoolBlock *) (sPoolStart - 16);
    126     sPoolListHeadR = (struct MainPoolBlock *) sPoolEnd;
    127     sPoolListHeadL->prev = NULL;
    128     sPoolListHeadL->next = NULL;
    129     sPoolListHeadR->prev = NULL;
    130     sPoolListHeadR->next = NULL;
    131 }
    132 
    133 /**
    134  * Allocate a block of memory from the pool of given size, and from the
    135  * specified side of the pool (MEMORY_POOL_LEFT or MEMORY_POOL_RIGHT).
    136  * If there is not enough space, return NULL.
    137  */
    138 void *main_pool_alloc(u32 size, u32 side) {
    139     struct MainPoolBlock *newListHead;
    140     void *addr = NULL;
    141 
    142     size = ALIGN16(size) + 16;
    143     if (size != 0 && sPoolFreeSpace >= size) {
    144         sPoolFreeSpace -= size;
    145         if (side == MEMORY_POOL_LEFT) {
    146             newListHead = (struct MainPoolBlock *) ((u8 *) sPoolListHeadL + size);
    147             sPoolListHeadL->next = newListHead;
    148             newListHead->prev = sPoolListHeadL;
    149             newListHead->next = NULL;
    150             addr = (u8 *) sPoolListHeadL + 16;
    151             sPoolListHeadL = newListHead;
    152         } else {
    153             newListHead = (struct MainPoolBlock *) ((u8 *) sPoolListHeadR - size);
    154             sPoolListHeadR->prev = newListHead;
    155             newListHead->next = sPoolListHeadR;
    156             newListHead->prev = NULL;
    157             sPoolListHeadR = newListHead;
    158             addr = (u8 *) sPoolListHeadR + 16;
    159         }
    160     }
    161     return addr;
    162 }
    163 
    164 /**
    165  * Free a block of memory that was allocated from the pool. The block must be
    166  * the most recently allocated block from its end of the pool, otherwise all
    167  * newer blocks are freed as well.
    168  * Return the amount of free space left in the pool.
    169  */
    170 u32 main_pool_free(void *addr) {
    171     struct MainPoolBlock *block = (struct MainPoolBlock *) ((u8 *) addr - 16);
    172     struct MainPoolBlock *oldListHead = (struct MainPoolBlock *) ((u8 *) addr - 16);
    173 
    174     if (oldListHead < sPoolListHeadL) {
    175         while (oldListHead->next != NULL) {
    176             oldListHead = oldListHead->next;
    177         }
    178         sPoolListHeadL = block;
    179         sPoolListHeadL->next = NULL;
    180         sPoolFreeSpace += (uintptr_t) oldListHead - (uintptr_t) sPoolListHeadL;
    181     } else {
    182         while (oldListHead->prev != NULL) {
    183             oldListHead = oldListHead->prev;
    184         }
    185         sPoolListHeadR = block->next;
    186         sPoolListHeadR->prev = NULL;
    187         sPoolFreeSpace += (uintptr_t) sPoolListHeadR - (uintptr_t) oldListHead;
    188     }
    189     return sPoolFreeSpace;
    190 }
    191 
    192 /**
    193  * Resize a block of memory that was allocated from the left side of the pool.
    194  * If the block is increasing in size, it must be the most recently allocated
    195  * block from the left side.
    196  * The block does not move.
    197  */
    198 void *main_pool_realloc(void *addr, u32 size) {
    199     void *newAddr = NULL;
    200     struct MainPoolBlock *block = (struct MainPoolBlock *) ((u8 *) addr - 16);
    201 
    202     if (block->next == sPoolListHeadL) {
    203         main_pool_free(addr);
    204         newAddr = main_pool_alloc(size, MEMORY_POOL_LEFT);
    205     }
    206     return newAddr;
    207 }
    208 
    209 /**
    210  * Return the size of the largest block that can currently be allocated from the
    211  * pool.
    212  */
    213 u32 main_pool_available(void) {
    214     return sPoolFreeSpace - 16;
    215 }
    216 
    217 /**
    218  * Push pool state, to be restored later. Return the amount of free space left
    219  * in the pool.
    220  */
    221 u32 main_pool_push_state(void) {
    222     struct MainPoolState *prevState = gMainPoolState;
    223     u32 freeSpace = sPoolFreeSpace;
    224     struct MainPoolBlock *lhead = sPoolListHeadL;
    225     struct MainPoolBlock *rhead = sPoolListHeadR;
    226 
    227     gMainPoolState = main_pool_alloc(sizeof(*gMainPoolState), MEMORY_POOL_LEFT);
    228     gMainPoolState->freeSpace = freeSpace;
    229     gMainPoolState->listHeadL = lhead;
    230     gMainPoolState->listHeadR = rhead;
    231     gMainPoolState->prev = prevState;
    232     return sPoolFreeSpace;
    233 }
    234 
    235 /**
    236  * Restore pool state from a previous call to main_pool_push_state. Return the
    237  * amount of free space left in the pool.
    238  */
    239 u32 main_pool_pop_state(void) {
    240     sPoolFreeSpace = gMainPoolState->freeSpace;
    241     sPoolListHeadL = gMainPoolState->listHeadL;
    242     sPoolListHeadR = gMainPoolState->listHeadR;
    243     gMainPoolState = gMainPoolState->prev;
    244     return sPoolFreeSpace;
    245 }
    246 
    247 /**
    248  * Perform a DMA read from ROM. The transfer is split into 4KB blocks, and this
    249  * function blocks until completion.
    250  */
    251 static void dma_read(u8 *dest, u8 *srcStart, u8 *srcEnd) {
    252     u32 size = ALIGN16(srcEnd - srcStart);
    253 
    254     osInvalDCache(dest, size);
    255     while (size != 0) {
    256         u32 copySize = (size >= 0x1000) ? 0x1000 : size;
    257 
    258         osPiStartDma(&gDmaIoMesg, OS_MESG_PRI_NORMAL, OS_READ, (uintptr_t) srcStart, dest, copySize,
    259                      &gDmaMesgQueue);
    260         osRecvMesg(&gDmaMesgQueue, &gMainReceivedMesg, OS_MESG_BLOCK);
    261 
    262         dest += copySize;
    263         srcStart += copySize;
    264         size -= copySize;
    265     }
    266 }
    267 
    268 /**
    269  * Perform a DMA read from ROM, allocating space in the memory pool to write to.
    270  * Return the destination address.
    271  */
    272 static void *dynamic_dma_read(u8 *srcStart, u8 *srcEnd, u32 side) {
    273     void *dest;
    274     u32 size = ALIGN16(srcEnd - srcStart);
    275 
    276     dest = main_pool_alloc(size, side);
    277     if (dest != NULL) {
    278         dma_read(dest, srcStart, srcEnd);
    279     }
    280     return dest;
    281 }
    282 
    283 #ifndef NO_SEGMENTED_MEMORY
    284 /**
    285  * Load data from ROM into a newly allocated block, and set the segment base
    286  * address to this block.
    287  */
    288 void *load_segment(s32 segment, u8 *srcStart, u8 *srcEnd, u32 side) {
    289     void *addr = dynamic_dma_read(srcStart, srcEnd, side);
    290 
    291     if (addr != NULL) {
    292         set_segment_base_addr(segment, addr);
    293     }
    294     return addr;
    295 }
    296 
    297 /*
    298  * Allocate a block of memory starting at destAddr and ending at the end of
    299  * the memory pool. Then copy srcStart through srcEnd from ROM to this block.
    300  * If this block is not large enough to hold the ROM data, or that portion
    301  * of the pool is already allocated, return NULL.
    302  */
    303 void *load_to_fixed_pool_addr(u8 *destAddr, u8 *srcStart, u8 *srcEnd) {
    304     void *dest = NULL;
    305     u32 srcSize = ALIGN16(srcEnd - srcStart);
    306     u32 destSize = ALIGN16((u8 *) sPoolListHeadR - destAddr);
    307 
    308     if (srcSize <= destSize) {
    309         dest = main_pool_alloc(destSize, MEMORY_POOL_RIGHT);
    310         if (dest != NULL) {
    311             bzero(dest, destSize);
    312             osWritebackDCacheAll();
    313             dma_read(dest, srcStart, srcEnd);
    314             osInvalICache(dest, destSize);
    315             osInvalDCache(dest, destSize);
    316         }
    317     } else {
    318     }
    319     return dest;
    320 }
    321 
    322 /**
    323  * Decompress the block of ROM data from srcStart to srcEnd and return a
    324  * pointer to an allocated buffer holding the decompressed data. Set the
    325  * base address of segment to this address.
    326  */
    327 void *load_segment_decompress(s32 segment, u8 *srcStart, u8 *srcEnd) {
    328     void *dest = NULL;
    329 
    330     u32 compSize = ALIGN16(srcEnd - srcStart);
    331     u8 *compressed = main_pool_alloc(compSize, MEMORY_POOL_RIGHT);
    332 
    333     // Decompressed size from mio0 header
    334     u32 *size = (u32 *) (compressed + 4);
    335 
    336     if (compressed != NULL) {
    337         dma_read(compressed, srcStart, srcEnd);
    338         dest = main_pool_alloc(*size, MEMORY_POOL_LEFT);
    339         if (dest != NULL) {
    340             CN_DEBUG_PRINTF(("start decompress\n"));
    341             decompress(compressed, dest);
    342             CN_DEBUG_PRINTF(("end decompress\n"));
    343 
    344             set_segment_base_addr(segment, dest);
    345             main_pool_free(compressed);
    346         } else {
    347         }
    348     } else {
    349     }
    350     return dest;
    351 }
    352 
    353 void *load_segment_decompress_heap(u32 segment, u8 *srcStart, u8 *srcEnd) {
    354     UNUSED void *dest = NULL;
    355     u32 compSize = ALIGN16(srcEnd - srcStart);
    356     u8 *compressed = main_pool_alloc(compSize, MEMORY_POOL_RIGHT);
    357     UNUSED u32 *pUncSize = (u32 *) (compressed + 4);
    358 
    359     if (compressed != NULL) {
    360         dma_read(compressed, srcStart, srcEnd);
    361         decompress(compressed, gDecompressionHeap);
    362         set_segment_base_addr(segment, gDecompressionHeap);
    363         main_pool_free(compressed);
    364     } else {
    365     }
    366     return gDecompressionHeap;
    367 }
    368 
    369 void load_engine_code_segment(void) {
    370     void *startAddr = (void *) SEG_ENGINE;
    371     u32 totalSize = SEG_FRAMEBUFFERS - SEG_ENGINE;
    372     UNUSED u32 alignedSize = ALIGN16(_engineSegmentRomEnd - _engineSegmentRomStart);
    373 
    374     bzero(startAddr, totalSize);
    375     osWritebackDCacheAll();
    376     dma_read(startAddr, _engineSegmentRomStart, _engineSegmentRomEnd);
    377     osInvalICache(startAddr, totalSize);
    378     osInvalDCache(startAddr, totalSize);
    379 }
    380 #endif
    381 
    382 /**
    383  * Allocate an allocation-only pool from the main pool. This pool doesn't
    384  * support freeing allocated memory.
    385  * Return NULL if there is not enough space in the main pool.
    386  */
    387 struct AllocOnlyPool *alloc_only_pool_init(u32 size, u32 side) {
    388     void *addr;
    389     struct AllocOnlyPool *subPool = NULL;
    390 
    391     size = ALIGN4(size);
    392     addr = main_pool_alloc(size + sizeof(struct AllocOnlyPool), side);
    393     if (addr != NULL) {
    394         subPool = (struct AllocOnlyPool *) addr;
    395         subPool->totalSpace = size;
    396         subPool->usedSpace = 0;
    397         subPool->startPtr = (u8 *) addr + sizeof(struct AllocOnlyPool);
    398         subPool->freePtr = (u8 *) addr + sizeof(struct AllocOnlyPool);
    399     }
    400     return subPool;
    401 }
    402 
    403 /**
    404  * Allocate from an allocation-only pool.
    405  * Return NULL if there is not enough space.
    406  */
    407 void *alloc_only_pool_alloc(struct AllocOnlyPool *pool, s32 size) {
    408     void *addr = NULL;
    409 
    410     size = ALIGN4(size);
    411     if (size > 0 && pool->usedSpace + size <= pool->totalSpace) {
    412         addr = pool->freePtr;
    413         pool->freePtr += size;
    414         pool->usedSpace += size;
    415     }
    416     return addr;
    417 }
    418 
    419 /**
    420  * Resize an allocation-only pool.
    421  * If the pool is increasing in size, the pool must be the last thing allocated
    422  * from the left end of the main pool.
    423  * The pool does not move.
    424  */
    425 struct AllocOnlyPool *alloc_only_pool_resize(struct AllocOnlyPool *pool, u32 size) {
    426     struct AllocOnlyPool *newPool;
    427 
    428     size = ALIGN4(size);
    429     newPool = main_pool_realloc(pool, size + sizeof(struct AllocOnlyPool));
    430     if (newPool != NULL) {
    431         pool->totalSpace = size;
    432     }
    433     return newPool;
    434 }
    435 
    436 /**
    437  * Allocate a memory pool from the main pool. This pool supports arbitrary
    438  * order for allocation/freeing.
    439  * Return NULL if there is not enough space in the main pool.
    440  */
    441 struct MemoryPool *mem_pool_init(u32 size, u32 side) {
    442     void *addr;
    443     struct MemoryBlock *block;
    444     struct MemoryPool *pool = NULL;
    445 
    446     size = ALIGN4(size);
    447     addr = main_pool_alloc(size + sizeof(struct MemoryPool), side);
    448     if (addr != NULL) {
    449         pool = (struct MemoryPool *) addr;
    450 
    451         pool->totalSpace = size;
    452         pool->firstBlock = (struct MemoryBlock *) ((u8 *) addr + sizeof(struct MemoryPool));
    453         pool->freeList.next = (struct MemoryBlock *) ((u8 *) addr + sizeof(struct MemoryPool));
    454 
    455         block = pool->firstBlock;
    456         block->next = NULL;
    457         block->size = pool->totalSpace;
    458     }
    459     return pool;
    460 }
    461 
    462 /**
    463  * Allocate from a memory pool. Return NULL if there is not enough space.
    464  */
    465 void *mem_pool_alloc(struct MemoryPool *pool, u32 size) {
    466     struct MemoryBlock *freeBlock = &pool->freeList;
    467     void *addr = NULL;
    468 
    469     size = ALIGN4(size) + sizeof(struct MemoryBlock);
    470     while (freeBlock->next != NULL) {
    471         if (freeBlock->next->size >= size) {
    472             addr = (u8 *) freeBlock->next + sizeof(struct MemoryBlock);
    473             if (freeBlock->next->size - size <= sizeof(struct MemoryBlock)) {
    474                 freeBlock->next = freeBlock->next->next;
    475             } else {
    476                 struct MemoryBlock *newBlock = (struct MemoryBlock *) ((u8 *) freeBlock->next + size);
    477                 newBlock->size = freeBlock->next->size - size;
    478                 newBlock->next = freeBlock->next->next;
    479                 freeBlock->next->size = size;
    480                 freeBlock->next = newBlock;
    481             }
    482             break;
    483         }
    484         freeBlock = freeBlock->next;
    485     }
    486     return addr;
    487 }
    488 
    489 /**
    490  * Free a block that was allocated using mem_pool_alloc.
    491  */
    492 BAD_RETURN(s32) mem_pool_free(struct MemoryPool *pool, void *addr) {
    493     struct MemoryBlock *block = (struct MemoryBlock *) ((u8 *) addr - sizeof(struct MemoryBlock));
    494     struct MemoryBlock *freeList = pool->freeList.next;
    495 
    496     if (pool->freeList.next == NULL) {
    497         pool->freeList.next = block;
    498         block->next = NULL;
    499     } else {
    500         if (block < pool->freeList.next) {
    501             if ((u8 *) pool->freeList.next == (u8 *) block + block->size) {
    502                 block->size += freeList->size;
    503                 block->next = freeList->next;
    504                 pool->freeList.next = block;
    505             } else {
    506                 block->next = pool->freeList.next;
    507                 pool->freeList.next = block;
    508             }
    509         } else {
    510             while (freeList->next != NULL) {
    511                 if (freeList < block && block < freeList->next) {
    512                     break;
    513                 }
    514 
    515                 freeList = freeList->next;
    516             }
    517 
    518             if (block == (struct MemoryBlock *) ((u8 *) freeList + freeList->size)) {
    519                 freeList->size += block->size;
    520                 block = freeList;
    521             } else {
    522                 block->next = freeList->next;
    523                 freeList->next = block;
    524             }
    525 
    526             if (block->next != NULL && (u8 *) block->next == (u8 *) block + block->size) {
    527                 block->size = block->size + block->next->size;
    528                 block->next = block->next->next;
    529             }
    530         }
    531     }
    532     // nothing is returned, but must have non-void return type for render_text_labels to match on iQue
    533 }
    534 
    535 void *alloc_display_list(u32 size) {
    536     void *ptr = NULL;
    537 
    538     size = ALIGN8(size);
    539     if (gGfxPoolEnd - size >= (u8 *) gDisplayListHead) {
    540         gGfxPoolEnd -= size;
    541         ptr = gGfxPoolEnd;
    542     } else {
    543     }
    544     return ptr;
    545 }
    546 
    547 static struct DmaTable *load_dma_table_address(u8 *srcAddr) {
    548     struct DmaTable *table = dynamic_dma_read(srcAddr, srcAddr + sizeof(u32),
    549                                                              MEMORY_POOL_LEFT);
    550     u32 size = table->count * sizeof(struct OffsetSizePair) +
    551         sizeof(struct DmaTable) - sizeof(struct OffsetSizePair);
    552     main_pool_free(table);
    553 
    554     table = dynamic_dma_read(srcAddr, srcAddr + size, MEMORY_POOL_LEFT);
    555     table->srcAddr = srcAddr;
    556     return table;
    557 }
    558 
    559 void setup_dma_table_list(struct DmaHandlerList *list, void *srcAddr, void *buffer) {
    560     if (srcAddr != NULL) {
    561         list->dmaTable = load_dma_table_address(srcAddr);
    562     }
    563     list->currentAddr = NULL;
    564     list->bufTarget = buffer;
    565 }
    566 
    567 s32 load_patchable_table(struct DmaHandlerList *list, s32 index) {
    568     s32 ret = FALSE;
    569     struct DmaTable *table = list->dmaTable;
    570 
    571     if ((u32)index < table->count) {
    572         u8 *addr = table->srcAddr + table->anim[index].offset;
    573         s32 size = table->anim[index].size;
    574 
    575         if (addr != list->currentAddr) {
    576             dma_read(list->bufTarget, addr, addr + size);
    577             list->currentAddr = addr;
    578             ret = TRUE;
    579         }
    580     }
    581     return ret;
    582 }