tlsf.c (29751B)
1 #include <assert.h> 2 #include <limits.h> 3 #include <stddef.h> 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <string.h> 7 8 #include "tlsf.h" 9 #include "tlsfbits.h" 10 11 /* 12 ** Constants. 13 */ 14 15 /* Public constants: may be modified. */ 16 enum tlsf_public 17 { 18 /* log2 of number of linear subdivisions of block sizes. */ 19 SL_INDEX_COUNT_LOG2 = 5, 20 }; 21 22 /* Private constants: do not modify. */ 23 enum tlsf_private 24 { 25 #if defined (TLSF_64BIT) 26 /* All allocation sizes and addresses are aligned to 8 bytes. */ 27 ALIGN_SIZE_LOG2 = 3, 28 #else 29 /* All allocation sizes and addresses are aligned to 4 bytes. */ 30 ALIGN_SIZE_LOG2 = 2, 31 #endif 32 ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2), 33 34 /* 35 ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits. 36 ** However, because we linearly subdivide the second-level lists, and 37 ** our minimum size granularity is 4 bytes, it doesn't make sense to 38 ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4, 39 ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be 40 ** trying to split size ranges into more slots than we have available. 41 ** Instead, we calculate the minimum threshold size, and place all 42 ** blocks below that size into the 0th first-level list. 43 */ 44 45 #if defined (TLSF_64BIT) 46 /* 47 ** TODO: We can increase this to support larger sizes, at the expense 48 ** of more overhead in the TLSF structure. 49 */ 50 FL_INDEX_MAX = 32, 51 #else 52 FL_INDEX_MAX = 30, 53 #endif 54 SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), 55 FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), 56 FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), 57 58 SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), 59 }; 60 61 /* 62 ** Cast and min/max macros. 63 */ 64 65 #define tlsf_cast(t, exp) ((t) (exp)) 66 #define tlsf_min(a, b) ((a) < (b) ? (a) : (b)) 67 #define tlsf_max(a, b) ((a) > (b) ? (a) : (b)) 68 69 /* 70 ** Set assert macro, if it has not been provided by the user. 71 */ 72 #if !defined (tlsf_assert) 73 #define tlsf_assert assert 74 #endif 75 76 /* 77 ** Static assertion mechanism. 78 */ 79 80 #define _tlsf_glue2(x, y) x ## y 81 #define _tlsf_glue(x, y) _tlsf_glue2(x, y) 82 #define tlsf_static_assert(exp) \ 83 typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1] 84 85 /* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */ 86 tlsf_static_assert(sizeof(int) * CHAR_BIT == 32); 87 tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32); 88 tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64); 89 90 /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ 91 tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT); 92 93 /* Ensure we've properly tuned our sizes. */ 94 tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT); 95 96 /* 97 ** Data structures and associated constants. 98 */ 99 100 /* 101 ** Block header structure. 102 ** 103 ** There are several implementation subtleties involved: 104 ** - The prev_phys_block field is only valid if the previous block is free. 105 ** - The prev_phys_block field is actually stored at the end of the 106 ** previous block. It appears at the beginning of this structure only to 107 ** simplify the implementation. 108 ** - The next_free / prev_free fields are only valid if the block is free. 109 */ 110 typedef struct block_header_t 111 { 112 /* Points to the previous physical block. */ 113 struct block_header_t* prev_phys_block; 114 115 /* The size of this block, excluding the block header. */ 116 size_t size; 117 118 /* Next and previous free blocks. */ 119 struct block_header_t* next_free; 120 struct block_header_t* prev_free; 121 } block_header_t; 122 123 /* 124 ** Since block sizes are always at least a multiple of 4, the two least 125 ** significant bits of the size field are used to store the block status: 126 ** - bit 0: whether block is busy or free 127 ** - bit 1: whether previous block is busy or free 128 */ 129 static const size_t block_header_free_bit = 1 << 0; 130 static const size_t block_header_prev_free_bit = 1 << 1; 131 132 /* 133 ** The size of the block header exposed to used blocks is the size field. 134 ** The prev_phys_block field is stored *inside* the previous free block. 135 */ 136 static const size_t block_header_overhead = sizeof(size_t); 137 138 /* User data starts directly after the size field in a used block. */ 139 static const size_t block_start_offset = 140 offsetof(block_header_t, size) + sizeof(size_t); 141 142 /* 143 ** A free block must be large enough to store its header minus the size of 144 ** the prev_phys_block field, and no larger than the number of addressable 145 ** bits for FL_INDEX. 146 */ 147 static const size_t block_size_min = 148 sizeof(block_header_t) - sizeof(block_header_t*); 149 static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX; 150 151 152 /* The TLSF control structure. */ 153 typedef struct control_t 154 { 155 /* Empty lists point at this block to indicate they are free. */ 156 block_header_t block_null; 157 158 /* Bitmaps for free lists. */ 159 unsigned int fl_bitmap; 160 unsigned int sl_bitmap[FL_INDEX_COUNT]; 161 162 /* Head of free lists. */ 163 block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]; 164 } control_t; 165 166 /* A type used for casting when doing pointer arithmetic. */ 167 typedef ptrdiff_t tlsfptr_t; 168 169 /* 170 ** block_header_t member functions. 171 */ 172 173 static size_t block_size(const block_header_t* block) 174 { 175 return block->size & ~(block_header_free_bit | block_header_prev_free_bit); 176 } 177 178 static void block_set_size(block_header_t* block, size_t size) 179 { 180 const size_t oldsize = block->size; 181 block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit)); 182 } 183 184 static int block_is_last(const block_header_t* block) 185 { 186 return 0 == block_size(block); 187 } 188 189 static int block_is_free(const block_header_t* block) 190 { 191 return tlsf_cast(int, block->size & block_header_free_bit); 192 } 193 194 static void block_set_free(block_header_t* block) 195 { 196 block->size |= block_header_free_bit; 197 } 198 199 static void block_set_used(block_header_t* block) 200 { 201 block->size &= ~block_header_free_bit; 202 } 203 204 static int block_is_prev_free(const block_header_t* block) 205 { 206 return tlsf_cast(int, block->size & block_header_prev_free_bit); 207 } 208 209 static void block_set_prev_free(block_header_t* block) 210 { 211 block->size |= block_header_prev_free_bit; 212 } 213 214 static void block_set_prev_used(block_header_t* block) 215 { 216 block->size &= ~block_header_prev_free_bit; 217 } 218 219 static block_header_t* block_from_ptr(const void* ptr) 220 { 221 return tlsf_cast(block_header_t*, 222 tlsf_cast(unsigned char*, ptr) - block_start_offset); 223 } 224 225 static void* block_to_ptr(const block_header_t* block) 226 { 227 return tlsf_cast(void*, 228 tlsf_cast(unsigned char*, block) + block_start_offset); 229 } 230 231 /* Return location of next block after block of given size. */ 232 static block_header_t* offset_to_block(const void* ptr, size_t size) 233 { 234 return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size); 235 } 236 237 /* Return location of previous block. */ 238 static block_header_t* block_prev(const block_header_t* block) 239 { 240 return block->prev_phys_block; 241 } 242 243 /* Return location of next existing block. */ 244 static block_header_t* block_next(const block_header_t* block) 245 { 246 block_header_t* next = offset_to_block(block_to_ptr(block), 247 block_size(block) - block_header_overhead); 248 tlsf_assert(!block_is_last(block)); 249 return next; 250 } 251 252 /* Link a new block with its physical neighbor, return the neighbor. */ 253 static block_header_t* block_link_next(block_header_t* block) 254 { 255 block_header_t* next = block_next(block); 256 next->prev_phys_block = block; 257 return next; 258 } 259 260 static void block_mark_as_free(block_header_t* block) 261 { 262 /* Link the block to the next block, first. */ 263 block_header_t* next = block_link_next(block); 264 block_set_prev_free(next); 265 block_set_free(block); 266 } 267 268 static void block_mark_as_used(block_header_t* block) 269 { 270 block_header_t* next = block_next(block); 271 block_set_prev_used(next); 272 block_set_used(block); 273 } 274 275 static size_t align_up(size_t x, size_t align) 276 { 277 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 278 return (x + (align - 1)) & ~(align - 1); 279 } 280 281 static size_t align_down(size_t x, size_t align) 282 { 283 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 284 return x - (x & (align - 1)); 285 } 286 287 static void* align_ptr(const void* ptr, size_t align) 288 { 289 const tlsfptr_t aligned = 290 (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1); 291 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 292 return tlsf_cast(void*, aligned); 293 } 294 295 /* 296 ** Adjust an allocation size to be aligned to word size, and no smaller 297 ** than internal minimum. 298 */ 299 static size_t adjust_request_size(size_t size, size_t align) 300 { 301 size_t adjust = 0; 302 if (size && size < block_size_max) 303 { 304 const size_t aligned = align_up(size, align); 305 adjust = tlsf_max(aligned, block_size_min); 306 } 307 return adjust; 308 } 309 310 /* 311 ** TLSF utility functions. In most cases, these are direct translations of 312 ** the documentation found in the white paper. 313 */ 314 315 static void mapping_insert(size_t size, int* fli, int* sli) 316 { 317 int fl, sl; 318 if (size < SMALL_BLOCK_SIZE) 319 { 320 /* Store small blocks in first list. */ 321 fl = 0; 322 sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT); 323 } 324 else 325 { 326 fl = tlsf_fls_sizet(size); 327 sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2); 328 fl -= (FL_INDEX_SHIFT - 1); 329 } 330 *fli = fl; 331 *sli = sl; 332 } 333 334 /* This version rounds up to the next block size (for allocations) */ 335 static void mapping_search(size_t size, int* fli, int* sli) 336 { 337 if (size >= (1 << SL_INDEX_COUNT_LOG2)) 338 { 339 const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1; 340 size += round; 341 } 342 mapping_insert(size, fli, sli); 343 } 344 345 static block_header_t* search_suitable_block(control_t* control, int* fli, int* sli) 346 { 347 int fl = *fli; 348 int sl = *sli; 349 350 /* 351 ** First, search for a block in the list associated with the given 352 ** fl/sl index. 353 */ 354 unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl); 355 if (!sl_map) 356 { 357 /* No block exists. Search in the next largest first-level list. */ 358 const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1)); 359 if (!fl_map) 360 { 361 /* No free blocks available, memory has been exhausted. */ 362 return 0; 363 } 364 365 fl = tlsf_ffs(fl_map); 366 *fli = fl; 367 sl_map = control->sl_bitmap[fl]; 368 } 369 tlsf_assert(sl_map && "internal error - second level bitmap is null"); 370 sl = tlsf_ffs(sl_map); 371 *sli = sl; 372 373 /* Return the first block in the free list. */ 374 return control->blocks[fl][sl]; 375 } 376 377 /* Remove a free block from the free list.*/ 378 static void remove_free_block(control_t* control, block_header_t* block, int fl, int sl) 379 { 380 block_header_t* prev = block->prev_free; 381 block_header_t* next = block->next_free; 382 tlsf_assert(prev && "prev_free field can not be null"); 383 tlsf_assert(next && "next_free field can not be null"); 384 next->prev_free = prev; 385 prev->next_free = next; 386 387 /* If this block is the head of the free list, set new head. */ 388 if (control->blocks[fl][sl] == block) 389 { 390 control->blocks[fl][sl] = next; 391 392 /* If the new head is null, clear the bitmap. */ 393 if (next == &control->block_null) 394 { 395 control->sl_bitmap[fl] &= ~(1 << sl); 396 397 /* If the second bitmap is now empty, clear the fl bitmap. */ 398 if (!control->sl_bitmap[fl]) 399 { 400 control->fl_bitmap &= ~(1 << fl); 401 } 402 } 403 } 404 } 405 406 /* Insert a free block into the free block list. */ 407 static void insert_free_block(control_t* control, block_header_t* block, int fl, int sl) 408 { 409 block_header_t* current = control->blocks[fl][sl]; 410 tlsf_assert(current && "free list cannot have a null entry"); 411 tlsf_assert(block && "cannot insert a null entry into the free list"); 412 block->next_free = current; 413 block->prev_free = &control->block_null; 414 current->prev_free = block; 415 416 tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE) 417 && "block not aligned properly"); 418 /* 419 ** Insert the new block at the head of the list, and mark the first- 420 ** and second-level bitmaps appropriately. 421 */ 422 control->blocks[fl][sl] = block; 423 control->fl_bitmap |= (1 << fl); 424 control->sl_bitmap[fl] |= (1 << sl); 425 } 426 427 /* Remove a given block from the free list. */ 428 static void block_remove(control_t* control, block_header_t* block) 429 { 430 int fl, sl; 431 mapping_insert(block_size(block), &fl, &sl); 432 remove_free_block(control, block, fl, sl); 433 } 434 435 /* Insert a given block into the free list. */ 436 static void block_insert(control_t* control, block_header_t* block) 437 { 438 int fl, sl; 439 mapping_insert(block_size(block), &fl, &sl); 440 insert_free_block(control, block, fl, sl); 441 } 442 443 static int block_can_split(block_header_t* block, size_t size) 444 { 445 return block_size(block) >= sizeof(block_header_t) + size; 446 } 447 448 /* Split a block into two, the second of which is free. */ 449 static block_header_t* block_split(block_header_t* block, size_t size) 450 { 451 /* Calculate the amount of space left in the remaining block. */ 452 block_header_t* remaining = 453 offset_to_block(block_to_ptr(block), size - block_header_overhead); 454 455 const size_t remain_size = block_size(block) - (size + block_header_overhead); 456 457 tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE) 458 && "remaining block not aligned properly"); 459 460 tlsf_assert(block_size(block) == remain_size + size + block_header_overhead); 461 block_set_size(remaining, remain_size); 462 tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size"); 463 464 block_set_size(block, size); 465 block_mark_as_free(remaining); 466 467 return remaining; 468 } 469 470 /* Absorb a free block's storage into an adjacent previous free block. */ 471 static block_header_t* block_absorb(block_header_t* prev, block_header_t* block) 472 { 473 tlsf_assert(!block_is_last(prev) && "previous block can't be last!"); 474 /* Note: Leaves flags untouched. */ 475 prev->size += block_size(block) + block_header_overhead; 476 block_link_next(prev); 477 return prev; 478 } 479 480 /* Merge a just-freed block with an adjacent previous free block. */ 481 static block_header_t* block_merge_prev(control_t* control, block_header_t* block) 482 { 483 if (block_is_prev_free(block)) 484 { 485 block_header_t* prev = block_prev(block); 486 tlsf_assert(prev && "prev physical block can't be null"); 487 tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such"); 488 block_remove(control, prev); 489 block = block_absorb(prev, block); 490 } 491 492 return block; 493 } 494 495 /* Merge a just-freed block with an adjacent free block. */ 496 static block_header_t* block_merge_next(control_t* control, block_header_t* block) 497 { 498 block_header_t* next = block_next(block); 499 tlsf_assert(next && "next physical block can't be null"); 500 501 if (block_is_free(next)) 502 { 503 tlsf_assert(!block_is_last(block) && "previous block can't be last!"); 504 block_remove(control, next); 505 block = block_absorb(block, next); 506 } 507 508 return block; 509 } 510 511 /* Trim any trailing block space off the end of a block, return to pool. */ 512 static void block_trim_free(control_t* control, block_header_t* block, size_t size) 513 { 514 tlsf_assert(block_is_free(block) && "block must be free"); 515 if (block_can_split(block, size)) 516 { 517 block_header_t* remaining_block = block_split(block, size); 518 block_link_next(block); 519 block_set_prev_free(remaining_block); 520 block_insert(control, remaining_block); 521 } 522 } 523 524 /* Trim any trailing block space off the end of a used block, return to pool. */ 525 static void block_trim_used(control_t* control, block_header_t* block, size_t size) 526 { 527 tlsf_assert(!block_is_free(block) && "block must be used"); 528 if (block_can_split(block, size)) 529 { 530 /* If the next block is free, we must coalesce. */ 531 block_header_t* remaining_block = block_split(block, size); 532 block_set_prev_used(remaining_block); 533 534 remaining_block = block_merge_next(control, remaining_block); 535 block_insert(control, remaining_block); 536 } 537 } 538 539 static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size) 540 { 541 block_header_t* remaining_block = block; 542 if (block_can_split(block, size)) 543 { 544 /* We want the 2nd block. */ 545 remaining_block = block_split(block, size - block_header_overhead); 546 block_set_prev_free(remaining_block); 547 548 block_link_next(block); 549 block_insert(control, block); 550 } 551 552 return remaining_block; 553 } 554 555 static block_header_t* block_locate_free(control_t* control, size_t size) 556 { 557 int fl = 0, sl = 0; 558 block_header_t* block = 0; 559 560 if (size) 561 { 562 mapping_search(size, &fl, &sl); 563 block = search_suitable_block(control, &fl, &sl); 564 if(block && !block->size) 565 block = NULL; 566 567 } 568 569 if (block) 570 { 571 tlsf_assert(block_size(block) >= size); 572 remove_free_block(control, block, fl, sl); 573 } 574 575 return block; 576 } 577 578 static void* block_prepare_used(control_t* control, block_header_t* block, size_t size) 579 { 580 void* p = 0; 581 if (block) 582 { 583 block_trim_free(control, block, size); 584 block_mark_as_used(block); 585 p = block_to_ptr(block); 586 } 587 return p; 588 } 589 590 /* Clear structure and point all empty lists at the null block. */ 591 static void control_construct(control_t* control) 592 { 593 int i, j; 594 595 control->block_null.next_free = &control->block_null; 596 control->block_null.prev_free = &control->block_null; 597 598 control->fl_bitmap = 0; 599 for (i = 0; i < FL_INDEX_COUNT; ++i) 600 { 601 control->sl_bitmap[i] = 0; 602 for (j = 0; j < SL_INDEX_COUNT; ++j) 603 { 604 control->blocks[i][j] = &control->block_null; 605 } 606 } 607 } 608 609 /* 610 ** Debugging utilities. 611 */ 612 613 typedef struct integrity_t 614 { 615 int prev_status; 616 int status; 617 } integrity_t; 618 619 #define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } } 620 621 static void integrity_walker(void* ptr, size_t size, int used, void* user) 622 { 623 (void) used; 624 block_header_t* block = block_from_ptr(ptr); 625 integrity_t* integ = tlsf_cast(integrity_t*, user); 626 const int this_prev_status = block_is_prev_free(block) ? 1 : 0; 627 const int this_status = block_is_free(block) ? 1 : 0; 628 const size_t this_block_size = block_size(block); 629 630 int status = 0; 631 tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect"); 632 tlsf_insist(size == this_block_size && "block size incorrect"); 633 634 integ->prev_status = this_status; 635 integ->status += status; 636 } 637 638 int tlsf_check(tlsf_t tlsf) 639 { 640 int i, j; 641 642 control_t* control = tlsf_cast(control_t*, tlsf); 643 int status = 0; 644 645 /* Check that the free lists and bitmaps are accurate. */ 646 for (i = 0; i < FL_INDEX_COUNT; ++i) 647 { 648 for (j = 0; j < SL_INDEX_COUNT; ++j) 649 { 650 const int fl_map = control->fl_bitmap & (1 << i); 651 const int sl_list = control->sl_bitmap[i]; 652 const int sl_map = sl_list & (1 << j); 653 const block_header_t* block = control->blocks[i][j]; 654 655 /* Check that first- and second-level lists agree. */ 656 if (!fl_map) 657 { 658 tlsf_insist(!sl_map && "second-level map must be null"); 659 } 660 661 if (!sl_map) 662 { 663 tlsf_insist(block == &control->block_null && "block list must be null"); 664 continue; 665 } 666 667 /* Check that there is at least one free block. */ 668 tlsf_insist(sl_list && "no free blocks in second-level map"); 669 tlsf_insist(block != &control->block_null && "block should not be null"); 670 671 while (block != &control->block_null) 672 { 673 int fli, sli; 674 tlsf_insist(block_is_free(block) && "block should be free"); 675 tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced"); 676 tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced"); 677 tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free"); 678 tlsf_insist(block_size(block) >= block_size_min && "block not minimum size"); 679 680 mapping_insert(block_size(block), &fli, &sli); 681 tlsf_insist(fli == i && sli == j && "block size indexed in wrong list"); 682 block = block->next_free; 683 } 684 } 685 } 686 687 return status; 688 } 689 690 #undef tlsf_insist 691 692 static void default_walker(void* ptr, size_t size, int used, void* user) 693 { 694 (void)user; 695 printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr)); 696 } 697 698 void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user) 699 { 700 tlsf_walker pool_walker = walker ? walker : default_walker; 701 block_header_t* block = 702 offset_to_block(pool, -(int)block_header_overhead); 703 704 while (block && !block_is_last(block)) 705 { 706 pool_walker( 707 block_to_ptr(block), 708 block_size(block), 709 !block_is_free(block), 710 user); 711 block = block_next(block); 712 } 713 } 714 715 size_t tlsf_block_size(void* ptr) 716 { 717 size_t size = 0; 718 if (ptr) 719 { 720 const block_header_t* block = block_from_ptr(ptr); 721 size = block_size(block); 722 } 723 return size; 724 } 725 726 int tlsf_check_pool(pool_t pool) 727 { 728 /* Check that the blocks are physically correct. */ 729 integrity_t integ = { 0, 0 }; 730 tlsf_walk_pool(pool, integrity_walker, &integ); 731 732 return integ.status; 733 } 734 735 /* 736 ** Size of the TLSF structures in a given memory block passed to 737 ** tlsf_create, equal to the size of a control_t 738 */ 739 size_t tlsf_size() 740 { 741 return sizeof(control_t); 742 } 743 744 size_t tlsf_align_size() 745 { 746 return ALIGN_SIZE; 747 } 748 749 size_t tlsf_block_size_min() 750 { 751 return block_size_min; 752 } 753 754 size_t tlsf_block_size_max() 755 { 756 return block_size_max; 757 } 758 759 /* 760 ** Overhead of the TLSF structures in a given memory block passes to 761 ** tlsf_add_pool, equal to the overhead of a free block and the 762 ** sentinel block. 763 */ 764 size_t tlsf_pool_overhead() 765 { 766 return 2 * block_header_overhead; 767 } 768 769 size_t tlsf_alloc_overhead() 770 { 771 return block_header_overhead; 772 } 773 774 pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes) 775 { 776 block_header_t* block; 777 block_header_t* next; 778 779 const size_t pool_overhead = tlsf_pool_overhead(); 780 const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE); 781 782 if (((ptrdiff_t)mem % ALIGN_SIZE) != 0) 783 { 784 printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n", 785 (unsigned int)ALIGN_SIZE); 786 return 0; 787 } 788 789 if (pool_bytes < block_size_min || pool_bytes > block_size_max) 790 { 791 #if defined (TLSF_64BIT) 792 printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", 793 (unsigned int)(pool_overhead + block_size_min), 794 (unsigned int)((pool_overhead + block_size_max) / 256)); 795 #else 796 printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n", 797 (unsigned int)(pool_overhead + block_size_min), 798 (unsigned int)(pool_overhead + block_size_max)); 799 #endif 800 return 0; 801 } 802 803 /* 804 ** Create the main free block. Offset the start of the block slightly 805 ** so that the prev_phys_block field falls outside of the pool - 806 ** it will never be used. 807 */ 808 block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead); 809 block_set_size(block, pool_bytes); 810 block_set_free(block); 811 block_set_prev_used(block); 812 block_insert(tlsf_cast(control_t*, tlsf), block); 813 814 /* Split the block to create a zero-size sentinel block. */ 815 next = block_link_next(block); 816 block_set_size(next, 0); 817 block_set_used(next); 818 block_set_prev_free(next); 819 820 return mem; 821 } 822 823 void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) 824 { 825 control_t* control = tlsf_cast(control_t*, tlsf); 826 block_header_t* block = offset_to_block(pool, -(int)block_header_overhead); 827 828 int fl = 0, sl = 0; 829 830 tlsf_assert(block_is_free(block) && "block should be free"); 831 tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free"); 832 tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero"); 833 834 mapping_insert(block_size(block), &fl, &sl); 835 remove_free_block(control, block, fl, sl); 836 } 837 838 /* 839 ** TLSF main interface. 840 */ 841 842 #if _DEBUG 843 int test_ffs_fls() 844 { 845 /* Verify ffs/fls work properly. */ 846 int rv = 0; 847 rv += (tlsf_ffs(0) == -1) ? 0 : 0x1; 848 rv += (tlsf_fls(0) == -1) ? 0 : 0x2; 849 rv += (tlsf_ffs(1) == 0) ? 0 : 0x4; 850 rv += (tlsf_fls(1) == 0) ? 0 : 0x8; 851 rv += (tlsf_ffs(0x80000000) == 31) ? 0 : 0x10; 852 rv += (tlsf_ffs(0x80008000) == 15) ? 0 : 0x20; 853 rv += (tlsf_fls(0x80000008) == 31) ? 0 : 0x40; 854 rv += (tlsf_fls(0x7FFFFFFF) == 30) ? 0 : 0x80; 855 856 #if defined (TLSF_64BIT) 857 rv += (tlsf_fls_sizet(0x80000000) == 31) ? 0 : 0x100; 858 rv += (tlsf_fls_sizet(0x100000000) == 32) ? 0 : 0x200; 859 rv += (tlsf_fls_sizet(0xffffffffffffffff) == 63) ? 0 : 0x400; 860 #endif 861 862 if (rv) 863 { 864 printf("tlsf_create: %x ffs/fls tests failed!\n", rv); 865 } 866 return rv; 867 } 868 #endif 869 870 tlsf_t tlsf_create(void* mem) 871 { 872 #if _DEBUG 873 if (test_ffs_fls()) 874 { 875 return 0; 876 } 877 #endif 878 879 if (((tlsfptr_t)mem % ALIGN_SIZE) != 0) 880 { 881 printf("tlsf_create: Memory must be aligned to %u bytes.\n", 882 (unsigned int)ALIGN_SIZE); 883 return 0; 884 } 885 886 control_construct(tlsf_cast(control_t*, mem)); 887 888 return tlsf_cast(tlsf_t, mem); 889 } 890 891 tlsf_t tlsf_create_with_pool(void* mem, size_t bytes) 892 { 893 tlsf_t tlsf = tlsf_create(mem); 894 tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size()); 895 return tlsf; 896 } 897 898 void tlsf_destroy(tlsf_t tlsf) 899 { 900 /* Nothing to do. */ 901 (void)tlsf; 902 } 903 904 pool_t tlsf_get_pool(tlsf_t tlsf) 905 { 906 return tlsf_cast(pool_t, (char*)tlsf + tlsf_size()); 907 } 908 909 void* tlsf_malloc(tlsf_t tlsf, size_t size) 910 { 911 control_t* control = tlsf_cast(control_t*, tlsf); 912 const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 913 block_header_t* block = block_locate_free(control, adjust); 914 return block_prepare_used(control, block, adjust); 915 } 916 917 void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size) 918 { 919 control_t* control = tlsf_cast(control_t*, tlsf); 920 const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 921 922 /* 923 ** We must allocate an additional minimum block size bytes so that if 924 ** our free block will leave an alignment gap which is smaller, we can 925 ** trim a leading free block and release it back to the pool. We must 926 ** do this because the previous physical block is in use, therefore 927 ** the prev_phys_block field is not valid, and we can't simply adjust 928 ** the size of that block. 929 */ 930 const size_t gap_minimum = sizeof(block_header_t); 931 const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align); 932 933 /* If alignment is less than or equals base alignment, we're done. */ 934 const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap; 935 936 block_header_t* block = block_locate_free(control, aligned_size); 937 938 /* This can't be a static assert. */ 939 tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead); 940 941 if (block) 942 { 943 void* ptr = block_to_ptr(block); 944 void* aligned = align_ptr(ptr, align); 945 size_t gap = tlsf_cast(size_t, 946 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); 947 948 /* If gap size is too small, offset to next aligned boundary. */ 949 if (gap && gap < gap_minimum) 950 { 951 const size_t gap_remain = gap_minimum - gap; 952 const size_t offset = tlsf_max(gap_remain, align); 953 const void* next_aligned = tlsf_cast(void*, 954 tlsf_cast(tlsfptr_t, aligned) + offset); 955 956 aligned = align_ptr(next_aligned, align); 957 gap = tlsf_cast(size_t, 958 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); 959 } 960 961 if (gap) 962 { 963 tlsf_assert(gap >= gap_minimum && "gap size too small"); 964 block = block_trim_free_leading(control, block, gap); 965 } 966 } 967 968 return block_prepare_used(control, block, adjust); 969 } 970 971 void tlsf_free(tlsf_t tlsf, void* ptr) 972 { 973 /* Don't attempt to free a NULL pointer. */ 974 if (ptr) 975 { 976 control_t* control = tlsf_cast(control_t*, tlsf); 977 block_header_t* block = block_from_ptr(ptr); 978 tlsf_assert(!block_is_free(block) && "block already marked as free"); 979 block_mark_as_free(block); 980 block = block_merge_prev(control, block); 981 block = block_merge_next(control, block); 982 block_insert(control, block); 983 } 984 } 985 986 /* 987 ** The TLSF block information provides us with enough information to 988 ** provide a reasonably intelligent implementation of realloc, growing or 989 ** shrinking the currently allocated block as required. 990 ** 991 ** This routine handles the somewhat esoteric edge cases of realloc: 992 ** - a non-zero size with a null pointer will behave like malloc 993 ** - a zero size with a non-null pointer will behave like free 994 ** - a request that cannot be satisfied will leave the original buffer 995 ** untouched 996 ** - an extended buffer size will leave the newly-allocated area with 997 ** contents undefined 998 */ 999 void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) 1000 { 1001 control_t* control = tlsf_cast(control_t*, tlsf); 1002 void* p = 0; 1003 1004 /* Zero-size requests are treated as free. */ 1005 if (ptr && size == 0) 1006 { 1007 tlsf_free(tlsf, ptr); 1008 } 1009 /* Requests with NULL pointers are treated as malloc. */ 1010 else if (!ptr) 1011 { 1012 p = tlsf_malloc(tlsf, size); 1013 } 1014 else 1015 { 1016 block_header_t* block = block_from_ptr(ptr); 1017 block_header_t* next = block_next(block); 1018 1019 const size_t cursize = block_size(block); 1020 const size_t combined = cursize + block_size(next) + block_header_overhead; 1021 const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 1022 1023 tlsf_assert(!block_is_free(block) && "block already marked as free"); 1024 1025 /* 1026 ** If the next block is used, or when combined with the current 1027 ** block, does not offer enough space, we must reallocate and copy. 1028 */ 1029 if (adjust > cursize && (!block_is_free(next) || adjust > combined)) 1030 { 1031 p = tlsf_malloc(tlsf, size); 1032 if (p) 1033 { 1034 const size_t minsize = tlsf_min(cursize, size); 1035 memcpy(p, ptr, minsize); 1036 tlsf_free(tlsf, ptr); 1037 } 1038 } 1039 else 1040 { 1041 /* Do we need to expand to the next block? */ 1042 if (adjust > cursize) 1043 { 1044 block_merge_next(control, block); 1045 block_mark_as_used(block); 1046 } 1047 1048 /* Trim the resulting block and return the original pointer. */ 1049 block_trim_used(control, block, adjust); 1050 p = ptr; 1051 } 1052 } 1053 1054 return p; 1055 }