A platform for high-performance distributed tool and library development written in C++. It can be deployed in two different cluster modes: standalone or distributed. API for v0.5.0, released on June 13, 2018.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
tlsf.cc
Go to the documentation of this file.
1 #include <assert.h>
2 #include <limits.h>
3 #include <stddef.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 
8 #include "tlsf.h"
9 
10 #if defined(__cplusplus)
11 #define tlsf_decl inline
12 #else
13 #define tlsf_decl static
14 #endif
15 
16 /*
17 ** Architecture-specific bit manipulation routines.
18 **
19 ** TLSF achieves O(1) cost for malloc and free operations by limiting
20 ** the search for a free block to a free list of guaranteed size
21 ** adequate to fulfill the request, combined with efficient free list
22 ** queries using bitmasks and architecture-specific bit-manipulation
23 ** routines.
24 **
25 ** Most modern processors provide instructions to count leading zeroes
26 ** in a word, find the lowest and highest set bit, etc. These
27 ** specific implementations will be used when available, falling back
28 ** to a reasonably efficient generic implementation.
29 **
30 ** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
31 ** ffs/fls return 1-32 by default, returning 0 for error.
32 */
33 
34 /*
35 ** Detect whether or not we are building for a 32- or 64-bit (LP/LLP)
36 ** architecture. There is no reliable portable method at compile-time.
37 */
38 #if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) || defined(_WIN64) || \
39  defined(__LP64__) || defined(__LLP64__)
40 #define TLSF_64BIT
41 #endif
42 
43 /*
44 ** gcc 3.4 and above have builtin support, specialized for architecture.
45 ** Some compilers masquerade as gcc; patchlevel test filters them out.
46 */
47 #if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) && \
48  defined(__GNUC_PATCHLEVEL__)
49 
50 
51 tlsf_decl int tlsf_ffs(unsigned int word) {
52  return __builtin_ffs(word) - 1;
53 }
54 
55 
56 tlsf_decl int tlsf_fls(unsigned int word) {
57  const int bit = word ? 32 - __builtin_clz(word) : 0;
58  return bit - 1;
59 }
60 
61 #if defined(TLSF_64BIT)
62 tlsf_decl int tlsf_fls_sizet(size_t size) {
63  const int bit = size ? 64 - __builtin_clzl(size) : 0;
64  return bit - 1;
65 }
66 #endif
67 
68 #else
69 /* Fall back to generic implementation. */
70 
71 tlsf_decl int tlsf_fls_generic(unsigned int word) {
72  int bit = 32;
73 
74  if (!word)
75  bit -= 1;
76  if (!(word & 0xffff0000)) {
77  word <<= 16;
78  bit -= 16;
79  }
80  if (!(word & 0xff000000)) {
81  word <<= 8;
82  bit -= 8;
83  }
84  if (!(word & 0xf0000000)) {
85  word <<= 4;
86  bit -= 4;
87  }
88  if (!(word & 0xc0000000)) {
89  word <<= 2;
90  bit -= 2;
91  }
92  if (!(word & 0x80000000)) {
93  word <<= 1;
94  bit -= 1;
95  }
96 
97  return bit;
98 }
99 
100 /* Implement ffs in terms of fls. */
101 tlsf_decl int tlsf_ffs(unsigned int word) {
102  return tlsf_fls_generic(word & (~word + 1)) - 1;
103 }
104 
105 tlsf_decl int tlsf_fls(unsigned int word) {
106  return tlsf_fls_generic(word) - 1;
107 }
108 
109 /* Possibly 64-bit version of tlsf_fls. */
110 #if defined(TLSF_64BIT)
111 tlsf_decl int tlsf_fls_sizet(size_t size) {
112  int high = (int)(size >> 32);
113  int bits = 0;
114  if (high) {
115  bits = 32 + tlsf_fls(high);
116  } else {
117  bits = tlsf_fls((int)size & 0xffffffff);
118  }
119  return bits;
120 }
121 #endif
122 #endif
123 
124 #if !defined(TLSF_64BIT)
125 #define tlsf_fls_sizet tlsf_fls
126 #endif
127 
128 #undef tlsf_decl
129 
130 /*
131 ** Constants.
132 */
133 
134 /* Public constants: may be modified. */
136  /* log2 of number of linear subdivisions of block sizes. Larger
137  ** values require more memory in the control structure. Values of
138  ** 4 or 5 are typical.
139  */
141 };
142 
143 /* Private constants: do not modify. */
145 #if defined(TLSF_64BIT)
146  /* All allocation sizes and addresses are aligned to 8 bytes. */
147  ALIGN_SIZE_LOG2 = 3,
148 #else
149  /* All allocation sizes and addresses are aligned to 4 bytes. */
151 #endif
153 
154 /*
155 ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
156 ** However, because we linearly subdivide the second-level lists, and
157 ** our minimum size granularity is 4 bytes, it doesn't make sense to
158 ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
159 ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
160 ** trying to split size ranges into more slots than we have available.
161 ** Instead, we calculate the minimum threshold size, and place all
162 ** blocks below that size into the 0th first-level list.
163 */
164 
165 #if defined(TLSF_64BIT)
166  /*
167  ** TODO: We can increase this to support larger sizes, at the expense
168  ** of more overhead in the TLSF structure.
169  */
170  FL_INDEX_MAX = 39,
171 #else
173 #endif
177 
179 };
180 
181 /*
182 ** Cast and min/max macros.
183 */
184 
185 #define tlsf_cast(t, exp) ((t)(exp))
186 #define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
187 #define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
188 
189 /*
190 ** Set assert macro, if it has not been provided by the user.
191 */
192 #if !defined(tlsf_assert)
193 #define tlsf_assert assert
194 #endif
195 
196 /*
197 ** Static assertion mechanism.
198 */
199 
200 #define _tlsf_glue2(x, y) x##y
201 #define _tlsf_glue(x, y) _tlsf_glue2(x, y)
202 #define tlsf_static_assert(exp) typedef char _tlsf_glue(static_assert, __LINE__)[(exp) ? 1 : -1]
203 
204 /* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
205 tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
206 tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
207 tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
208 
209 /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
210 tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
211 
212 /* Ensure we've properly tuned our sizes. */
214 
215 /*
216 ** Data structures and associated constants.
217 */
218 
219 /*
220 ** Block header structure.
221 **
222 ** There are several implementation subtleties involved:
223 ** - The prev_phys_block field is only valid if the previous block is free.
224 ** - The prev_phys_block field is actually stored at the end of the
225 ** previous block. It appears at the beginning of this structure only to
226 ** simplify the implementation.
227 ** - The next_free / prev_free fields are only valid if the block is free.
228 */
229 typedef struct block_header_t {
230  /* Points to the previous physical block. */
232 
233  /* The size of this block, excluding the block header. */
234  size_t size;
235 
236  /* Next and previous free blocks. */
240 
241 /*
242 ** Since block sizes are always at least a multiple of 4, the two least
243 ** significant bits of the size field are used to store the block status:
244 ** - bit 0: whether block is busy or free
245 ** - bit 1: whether previous block is busy or free
246 */
247 static const size_t block_header_free_bit = 1 << 0;
248 static const size_t block_header_prev_free_bit = 1 << 1;
249 
250 /*
251 ** The size of the block header exposed to used blocks is the size field.
252 ** The prev_phys_block field is stored *inside* the previous free block.
253 */
254 static const size_t block_header_overhead = sizeof(size_t);
255 
256 /* User data starts directly after the size field in a used block. */
257 static const size_t block_start_offset = offsetof(block_header_t, size) + sizeof(size_t);
258 
259 /*
260 ** A free block must be large enough to store its header minus the size of
261 ** the prev_phys_block field, and no larger than the number of addressable
262 ** bits for FL_INDEX.
263 */
264 static const size_t block_size_min = sizeof(block_header_t) - sizeof(block_header_t*);
265 static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX;
266 
267 
268 #define TLSF_INCREASE_REAL_USED(control, increment) \
269  do { \
270  control->real_used += (increment); \
271  control->max_used = tlsf_max(control->real_used, control->max_used); \
272  } while (0)
273 #define TLSF_INCREASE_FRAGMENTS(control) \
274  do { \
275  control->fragments++; \
276  control->max_fragments = tlsf_max(control->fragments, control->max_fragments); \
277  } while (0)
278 
279 /* The TLSF control structure. */
280 typedef struct control_t {
281  /* Empty lists point at this block to indicate they are free. */
283  size_t total_size;
284  size_t allocated;
285  size_t real_used;
286  size_t max_used;
287  size_t fragments;
289 
290  /* Bitmaps for free lists. */
291  unsigned int fl_bitmap;
292  unsigned int sl_bitmap[FL_INDEX_COUNT];
293 
294  /* Head of free lists. */
296 } control_t;
297 
298 /* A type used for casting when doing pointer arithmetic. */
299 typedef ptrdiff_t tlsfptr_t;
300 
301 /*
302 ** block_header_t member functions.
303 */
304 
305 static size_t block_size(const block_header_t* block) {
306  return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
307 }
308 
309 static void block_set_size(block_header_t* block, size_t size) {
310  const size_t oldsize = block->size;
311  block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
312 }
313 
314 static int block_is_last(const block_header_t* block) {
315  return block_size(block) == 0;
316 }
317 
318 static int block_is_free(const block_header_t* block) {
319  return tlsf_cast(int, block->size& block_header_free_bit);
320 }
321 
322 static void block_set_free(block_header_t* block) {
323  block->size |= block_header_free_bit;
324 }
325 
326 static void block_set_used(block_header_t* block) {
327  block->size &= ~block_header_free_bit;
328 }
329 
330 static int block_is_prev_free(const block_header_t* block) {
331  return tlsf_cast(int, block->size& block_header_prev_free_bit);
332 }
333 
334 static void block_set_prev_free(block_header_t* block) {
336 }
337 
338 static void block_set_prev_used(block_header_t* block) {
339  block->size &= ~block_header_prev_free_bit;
340 }
341 
342 static block_header_t* block_from_ptr(const void* ptr) {
343  return tlsf_cast(block_header_t*, tlsf_cast(unsigned char*, ptr) - block_start_offset);
344 }
345 
346 static void* block_to_ptr(const block_header_t* block) {
347  return tlsf_cast(void*, tlsf_cast(unsigned char*, block) + block_start_offset);
348 }
349 
350 /* Return location of next block after block of given size. */
351 static block_header_t* offset_to_block(const void* ptr, size_t size) {
352  return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
353 }
354 
355 /* Return location of previous block. */
356 static block_header_t* block_prev(const block_header_t* block) {
357  // tlsf_assert(block_is_prev_free(block) && "previous block must be free");
358  return block->prev_phys_block;
359 }
360 
361 /* Return location of next existing block. */
362 static block_header_t* block_next(const block_header_t* block) {
363  block_header_t* next =
364  offset_to_block(block_to_ptr(block), block_size(block) - block_header_overhead);
365  tlsf_assert(!block_is_last(block));
366  return next;
367 }
368 
369 /* Link a new block with its physical neighbor, return the neighbor. */
371  block_header_t* next = block_next(block);
372  next->prev_phys_block = block;
373  return next;
374 }
375 
376 static void block_mark_as_free(block_header_t* block) {
377  /* Link the block to the next block, first. */
378  block_header_t* next = block_link_next(block);
379  block_set_prev_free(next);
380  block_set_free(block);
381 }
382 
383 static void block_mark_as_used(block_header_t* block) {
384  block_header_t* next = block_next(block);
385  block_set_prev_used(next);
386  block_set_used(block);
387 }
388 
389 static size_t align_up(size_t x, size_t align) {
390  tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
391  return (x + (align - 1)) & ~(align - 1);
392 }
393 
394 static size_t align_down(size_t x, size_t align) {
395  tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
396  return x - (x & (align - 1));
397 }
398 
399 static void* align_ptr(const void* ptr, size_t align) {
400  const tlsfptr_t aligned = (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
401  tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
402  return tlsf_cast(void*, aligned);
403 }
404 
405 /*
406 ** Adjust an allocation size to be aligned to word size, and no smaller
407 ** than internal minimum.
408 */
409 static size_t adjust_request_size(size_t size, size_t align) {
410  size_t adjust = 0;
411  if (size && size < block_size_max) {
412  const size_t aligned = align_up(size, align);
413  adjust = tlsf_max(aligned, block_size_min);
414  }
415  return adjust;
416 }
417 
418 /*
419 ** TLSF utility functions. In most cases, these are direct translations of
420 ** the documentation found in the white paper.
421 */
422 
423 static void mapping_insert(size_t size, int* fli, int* sli) {
424  int fl, sl;
425  if (size < SMALL_BLOCK_SIZE) {
426  /* Store small blocks in first list. */
427  fl = 0;
428  sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
429  } else {
430  fl = tlsf_fls_sizet(size);
431  sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
432  fl -= (FL_INDEX_SHIFT - 1);
433  }
434  *fli = fl;
435  *sli = sl;
436 }
437 
438 /* This version rounds up to the next block size (for allocations) */
439 static void mapping_search(size_t size, int* fli, int* sli) {
440  if (size >= SMALL_BLOCK_SIZE) {
441  const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1;
442  size += round;
443  }
444  mapping_insert(size, fli, sli);
445 }
446 
447 static block_header_t* search_suitable_block(control_t* control, int* fli, int* sli) {
448  int fl = *fli;
449  int sl = *sli;
450 
451  /*
452  ** First, search for a block in the list associated with the given
453  ** fl/sl index.
454  */
455  unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);
456  if (!sl_map) {
457  /* No block exists. Search in the next largest first-level list. */
458  const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));
459  if (!fl_map) {
460  /* No free blocks available, memory has been exhausted. */
461  return 0;
462  }
463 
464  fl = tlsf_ffs(fl_map);
465  *fli = fl;
466  sl_map = control->sl_bitmap[fl];
467  }
468  tlsf_assert(sl_map && "internal error - second level bitmap is null");
469  sl = tlsf_ffs(sl_map);
470  *sli = sl;
471 
472  /* Return the first block in the free list. */
473  return control->blocks[fl][sl];
474 }
475 
476 /* Remove a free block from the free list.*/
477 static void remove_free_block(control_t* control, block_header_t* block, int fl, int sl) {
478  block_header_t* prev = block->prev_free;
479  block_header_t* next = block->next_free;
480  tlsf_assert(prev && "prev_free field can not be null");
481  tlsf_assert(next && "next_free field can not be null");
482  next->prev_free = prev;
483  prev->next_free = next;
484 
485  /* If this block is the head of the free list, set new head. */
486  if (control->blocks[fl][sl] == block) {
487  control->blocks[fl][sl] = next;
488 
489  /* If the new head is null, clear the bitmap. */
490  if (next == &control->block_null) {
491  control->sl_bitmap[fl] &= ~(1 << sl);
492 
493  /* If the second bitmap is now empty, clear the fl bitmap. */
494  if (!control->sl_bitmap[fl]) {
495  control->fl_bitmap &= ~(1 << fl);
496  }
497  }
498  }
499 }
500 
501 /* Insert a free block into the free block list. */
502 static void insert_free_block(control_t* control, block_header_t* block, int fl, int sl) {
503  block_header_t* current = control->blocks[fl][sl];
504  tlsf_assert(current && "free list cannot have a null entry");
505  tlsf_assert(block && "cannot insert a null entry into the free list");
506  block->next_free = current;
507  block->prev_free = &control->block_null;
508  current->prev_free = block;
509 
511  "block not aligned properly");
512  /*
513  ** Insert the new block at the head of the list, and mark the first-
514  ** and second-level bitmaps appropriately.
515  */
516  control->blocks[fl][sl] = block;
517  control->fl_bitmap |= (1 << fl);
518  control->sl_bitmap[fl] |= (1 << sl);
519  TLSF_INCREASE_FRAGMENTS(control);
520 }
521 
522 /* Remove a given block from the free list. */
523 static void block_remove(control_t* control, block_header_t* block) {
524  int fl, sl;
525  mapping_insert(block_size(block), &fl, &sl);
526  remove_free_block(control, block, fl, sl);
527 }
528 
529 /* Insert a given block into the free list. */
530 static void block_insert(control_t* control, block_header_t* block) {
531  int fl, sl;
532  mapping_insert(block_size(block), &fl, &sl);
533  insert_free_block(control, block, fl, sl);
534 }
535 
536 static int block_can_split(block_header_t* block, size_t size) {
537  return block_size(block) >= sizeof(block_header_t) + size;
538 }
539 
540 /* Split a block into two, the second of which is free. */
541 static block_header_t* block_split(block_header_t* block, size_t size) {
542  /* Calculate the amount of space left in the remaining block. */
543  block_header_t* remaining = offset_to_block(block_to_ptr(block), size - block_header_overhead);
544 
545  const size_t remain_size = block_size(block) - (size + block_header_overhead);
546 
547  tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE) &&
548  "remaining block not aligned properly");
549 
550  tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
551  block_set_size(remaining, remain_size);
552  tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
553 
554  block_set_size(block, size);
555  block_mark_as_free(remaining);
556 
557  return remaining;
558 }
559 
560 /* Absorb a free block's storage into an adjacent previous free block. */
562  tlsf_assert(!block_is_last(prev) && "previous block can't be last");
563  /* Note: Leaves flags untouched. */
564  prev->size += block_size(block) + block_header_overhead;
565  block_link_next(prev);
566  return prev;
567 }
568 
569 /* Merge a just-freed block with an adjacent previous free block. */
571  if (block_is_prev_free(block)) {
572  block_header_t* prev = block_prev(block);
573  tlsf_assert(prev && "prev physical block can't be null");
574  tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
575  block_remove(control, prev);
576  block = block_absorb(prev, block);
577  }
578 
579  return block;
580 }
581 
582 /* Merge a just-freed block with an adjacent free block. */
584  block_header_t* next = block_next(block);
585  tlsf_assert(next && "next physical block can't be null");
586 
587  if (block_is_free(next)) {
588  tlsf_assert(!block_is_last(block) && "previous block can't be last");
589  block_remove(control, next);
590  block = block_absorb(block, next);
591  }
592 
593  return block;
594 }
595 
596 /* Trim any trailing block space off the end of a block, return to pool. */
597 static void block_trim_free(control_t* control, block_header_t* block, size_t size) {
598  tlsf_assert(block_is_free(block) && "block must be free");
599  if (block_can_split(block, size)) {
600  block_header_t* remaining_block = block_split(block, size);
601  block_link_next(block);
602  block_set_prev_free(remaining_block);
603  block_insert(control, remaining_block);
604  }
605 }
606 
607 /* Trim any trailing block space off the end of a used block, return to pool. */
608 static void block_trim_used(control_t* control, block_header_t* block, size_t size) {
609  tlsf_assert(!block_is_free(block) && "block must be used");
610  if (block_can_split(block, size)) {
611  /* If the next block is free, we must coalesce. */
612  block_header_t* remaining_block = block_split(block, size);
613  block_set_prev_used(remaining_block);
614 
615  remaining_block = block_merge_next(control, remaining_block);
616  block_insert(control, remaining_block);
617  }
618 }
619 
621  block_header_t* block,
622  size_t size) {
623  block_header_t* remaining_block = block;
624  if (block_can_split(block, size)) {
625  /* We want the 2nd block. */
626  remaining_block = block_split(block, size - block_header_overhead);
627  block_set_prev_free(remaining_block);
628 
629  block_link_next(block);
630  block_insert(control, block);
631  }
632 
633  return remaining_block;
634 }
635 
636 static block_header_t* block_locate_free(control_t* control, size_t size) {
637  int fl = 0, sl = 0;
638  block_header_t* block = 0;
639 
640  if (size) {
641  mapping_search(size, &fl, &sl);
642  block = search_suitable_block(control, &fl, &sl);
643  }
644 
645  if (block) {
646  tlsf_assert(block_size(block) >= size);
647  remove_free_block(control, block, fl, sl);
648  }
649 
650  return block;
651 }
652 
653 static void* block_prepare_used(control_t* control, block_header_t* block, size_t size) {
654  void* p = 0;
655  if (block) {
656  block_trim_free(control, block, size);
657  block_mark_as_used(block);
658  p = block_to_ptr(block);
659  TLSF_INCREASE_REAL_USED(control,
660  block_size(block) + ((char*)p - (char*)block
661  /* prev_phys_block is melted in the previous
662  block when the current block is used */
663  +
664  sizeof(block->prev_phys_block)));
665  control->allocated += block_size(block);
666  }
667  return p;
668 }
669 
670 /* Clear structure and point all empty lists at the null block. */
671 static void control_construct(control_t* control) {
672  int i, j;
673 
674  control->block_null.next_free = &control->block_null;
675  control->block_null.prev_free = &control->block_null;
676 
677  control->fl_bitmap = 0;
678  for (i = 0; i < FL_INDEX_COUNT; ++i) {
679  control->sl_bitmap[i] = 0;
680  for (j = 0; j < SL_INDEX_COUNT; ++j) {
681  control->blocks[i][j] = &control->block_null;
682  }
683  }
684 }
685 
686 /*
687 ** Debugging utilities.
688 */
689 
690 typedef struct integrity_t {
692  int status;
693 } integrity_t;
694 
695 #define tlsf_insist(x) \
696  { \
697  tlsf_assert(x); \
698  if (!(x)) { \
699  status--; \
700  } \
701  }
702 
703 static void integrity_walker(void* ptr, size_t size, int used, void* user) {
704  block_header_t* block = block_from_ptr(ptr);
705  integrity_t* integ = tlsf_cast(integrity_t*, user);
706  const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
707  const int this_status = block_is_free(block) ? 1 : 0;
708  const size_t this_block_size = block_size(block);
709 
710  int status = 0;
711  (void)used;
712  tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
713  tlsf_insist(size == this_block_size && "block size incorrect");
714 
715  integ->prev_status = this_status;
716  integ->status += status;
717 }
718 
720  int i, j;
721 
722  control_t* control = tlsf_cast(control_t*, tlsf);
723  int status = 0;
724 
725  /* Check that the free lists and bitmaps are accurate. */
726  for (i = 0; i < FL_INDEX_COUNT; ++i) {
727  for (j = 0; j < SL_INDEX_COUNT; ++j) {
728  const int fl_map = control->fl_bitmap & (1 << i);
729  const int sl_list = control->sl_bitmap[i];
730  const int sl_map = sl_list & (1 << j);
731  const block_header_t* block = control->blocks[i][j];
732 
733  /* Check that first- and second-level lists agree. */
734  if (!fl_map) {
735  tlsf_insist(!sl_map && "second-level map must be null");
736  }
737 
738  if (!sl_map) {
739  tlsf_insist(block == &control->block_null && "block list must be null");
740  continue;
741  }
742 
743  /* Check that there is at least one free block. */
744  tlsf_insist(sl_list && "no free blocks in second-level map");
745  tlsf_insist(block != &control->block_null && "block should not be null");
746 
747  while (block != &control->block_null) {
748  int fli, sli;
749  tlsf_insist(block_is_free(block) && "block should be free");
750  tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
751  tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
752  tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
753  tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
754 
755  mapping_insert(block_size(block), &fli, &sli);
756  tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
757  block = block->next_free;
758  }
759  }
760  }
761 
762  return status;
763 }
764 
765 #undef tlsf_insist
766 
767 static void default_walker(void* ptr, size_t size, int used, void* user) {
768  (void)user;
769  printf("\t%p %s size: %x (%p)\n",
770  ptr,
771  used ? "used" : "free",
772  (unsigned int)size,
773  block_from_ptr(ptr));
774 }
775 
776 void tlsfAllocator::tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user) {
777  tlsf_walker pool_walker = walker ? walker : default_walker;
778  block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
779 
780  while (block && !block_is_last(block)) {
781  pool_walker(block_to_ptr(block), block_size(block), !block_is_free(block), user);
782  block = block_next(block);
783  }
784 }
785 
787  size_t size = 0;
788  if (ptr) {
789  const block_header_t* block = block_from_ptr(ptr);
790  size = block_size(block);
791  }
792  return size;
793 }
794 
796  /* Check that the blocks are physically correct. */
797  integrity_t integ = {0, 0};
798  tlsf_walk_pool(pool, integrity_walker, &integ);
799 
800  return integ.status;
801 }
802 
803 /*
804 ** Size of the TLSF structures in a given memory block passed to
805 ** tlsf_create, equal to the size of a control_t
806 */
808  return sizeof(control_t);
809 }
810 
812  return ALIGN_SIZE;
813 }
814 
816  return block_size_min;
817 }
818 
820  return block_size_max;
821 }
822 
823 /*
824 ** Overhead of the TLSF structures in a given memory block passed to
825 ** tlsf_add_pool, equal to the overhead of a free block and the
826 ** sentinel block.
827 */
829  return 2 * block_header_overhead;
830 }
831 
833  return block_header_overhead;
834 }
835 
836 pool_t tlsfAllocator::tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes) {
837  block_header_t* block;
838  block_header_t* next;
839 
840  const size_t pool_overhead = tlsf_pool_overhead();
841  const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
842 
843  if (((ptrdiff_t)mem % ALIGN_SIZE) != 0) {
844  printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n", (unsigned int)ALIGN_SIZE);
845  return 0;
846  }
847 
848  if (pool_bytes < block_size_min || pool_bytes > block_size_max) {
849 #if defined(TLSF_64BIT)
850  printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
851  (unsigned int)(pool_overhead + block_size_min),
852  (unsigned int)((pool_overhead + block_size_max) / 256));
853 #else
854  printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
855  (unsigned int)(pool_overhead + block_size_min),
856  (unsigned int)(pool_overhead + block_size_max));
857 #endif
858  return 0;
859  }
860 
861  /*
862  ** Create the main free block. Offset the start of the block slightly
863  ** so that the prev_phys_block field falls outside of the pool -
864  ** it will never be used.
865  */
866  block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
867  block_set_size(block, pool_bytes);
868  block_set_free(block);
869  block_set_prev_used(block);
870  block_insert(tlsf_cast(control_t*, tlsf), block);
871  tlsf_cast(control_t*, tlsf)->total_size += block_size(block);
872  /* Split the block to create a zero-size sentinel block. */
873  next = block_link_next(block);
874  block_set_size(next, 0);
875  block_set_used(next);
876  block_set_prev_free(next);
877 
878  return mem;
879 }
880 
882  control_t* control = tlsf_cast(control_t*, tlsf);
883  block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
884 
885  int fl = 0, sl = 0;
886 
887  tlsf_assert(block_is_free(block) && "block should be free");
888  tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
889  tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
890 
891  mapping_insert(block_size(block), &fl, &sl);
892  remove_free_block(control, block, fl, sl);
893  tlsf_cast(control_t*, tlsf)->total_size -= block_size(block);
894 }
895 
896 /*
897 ** TLSF main interface.
898 */
899 
900 #if _DEBUG
901 int test_ffs_fls() {
902  /* Verify ffs/fls work properly. */
903  int rv = 0;
904  rv += (tlsf_ffs(0) == -1) ? 0 : 0x1;
905  rv += (tlsf_fls(0) == -1) ? 0 : 0x2;
906  rv += (tlsf_ffs(1) == 0) ? 0 : 0x4;
907  rv += (tlsf_fls(1) == 0) ? 0 : 0x8;
908  rv += (tlsf_ffs(0x80000000) == 31) ? 0 : 0x10;
909  rv += (tlsf_ffs(0x80008000) == 15) ? 0 : 0x20;
910  rv += (tlsf_fls(0x80000008) == 31) ? 0 : 0x40;
911  rv += (tlsf_fls(0x7FFFFFFF) == 30) ? 0 : 0x80;
912 
913 #if defined(TLSF_64BIT)
914  rv += (tlsf_fls_sizet(0x80000000) == 31) ? 0 : 0x100;
915  rv += (tlsf_fls_sizet(0x100000000) == 32) ? 0 : 0x200;
916  rv += (tlsf_fls_sizet(0xffffffffffffffff) == 63) ? 0 : 0x400;
917 #endif
918 
919  if (rv) {
920  printf("test_ffs_fls: %x ffs/fls tests failed.\n", rv);
921  }
922  return rv;
923 }
924 #endif
925 
927 #if _DEBUG
928  if (test_ffs_fls()) {
929  return 0;
930  }
931 #endif
932 
933  if (((tlsfptr_t)mem % ALIGN_SIZE) != 0) {
934  printf("tlsf_create: Memory must be aligned to %u bytes.\n", (unsigned int)ALIGN_SIZE);
935  return 0;
936  }
937 
939  tlsf_cast(control_t*, mem)->real_used = tlsf_size();
940  tlsf_cast(control_t*, mem)->max_used = tlsf_size();
941  tlsf_cast(control_t*, mem)->allocated = 0;
942  tlsf_cast(control_t*, mem)->total_size = tlsf_size();
943  tlsf_cast(control_t*, mem)->fragments = 0;
944  tlsf_cast(control_t*, mem)->max_fragments = 0;
945  return tlsf_cast(tlsf_t, mem);
946 }
947 
949  tlsf_t tlsf = tlsf_create(mem);
950  tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
951  return tlsf;
952 }
953 
955  /* Nothing to do. */
956  (void)tlsf;
957 }
958 
960  return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
961 }
962 
963 void* tlsfAllocator::tlsf_malloc(tlsf_t tlsf, size_t size) {
964  control_t* control = tlsf_cast(control_t*, tlsf);
965  const size_t adjust = adjust_request_size(size ? size : 4, ALIGN_SIZE);
966  block_header_t* block = block_locate_free(control, adjust);
967  return block_prepare_used(control, block, adjust);
968 }
969 
970 void* tlsfAllocator::tlsf_mallocxz(tlsf_t tlsf, size_t size) {
971  void* p;
972  p = tlsf_malloc(tlsf, size);
973  if (p)
974  memset(p, 0, size);
975 
976  return p;
977 }
978 
979 
980 void tlsfAllocator::tlsf_free(tlsf_t tlsf, void* ptr) {
981  /* Don't attempt to free a NULL pointer. */
982  if (ptr) {
983  control_t* control = tlsf_cast(control_t*, tlsf);
984  block_header_t* block = block_from_ptr(ptr);
985  control->allocated -= block_size(block);
986  control->real_used -=
987  (block_size(block) +
988  ((char*)ptr - (char*)block
989  /* prev_phys_block is melted in the previous block when the current block is used */
990  +
991  sizeof(block->prev_phys_block)));
992  block_mark_as_free(block);
993  block = block_merge_prev(control, block);
994  block = block_merge_next(control, block);
995  block_insert(control, block);
996  }
997 }
998 
999 /*
1000 ** The TLSF block information provides us with enough information to
1001 ** provide a reasonably intelligent implementation of realloc, growing or
1002 ** shrinking the currently allocated block as required.
1003 **
1004 ** This routine handles the somewhat esoteric edge cases of realloc:
1005 ** - a non-zero size with a null pointer will behave like malloc
1006 ** - a zero size with a non-null pointer will behave like free
1007 ** - a request that cannot be satisfied will leave the original buffer
1008 ** untouched
1009 ** - an extended buffer size will leave the newly-allocated area with
1010 ** contents undefined
1011 */
1012 void* tlsfAllocator::tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) {
1013  control_t* control = tlsf_cast(control_t*, tlsf);
1014  void* p = 0;
1015 
1016  /* Zero-size requests are treated as free. */
1017  if (ptr && size == 0) {
1018  tlsf_free(tlsf, ptr);
1019  }
1020  /* Requests with NULL pointers are treated as malloc. */
1021  else if (!ptr) {
1022  p = tlsf_malloc(tlsf, size);
1023  } else {
1024  block_header_t* block = block_from_ptr(ptr);
1025  block_header_t* next = block_next(block);
1026 
1027  const size_t cursize = block_size(block);
1028  const size_t combined = cursize + block_size(next) + block_header_overhead;
1029  const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1030 
1031  tlsf_assert(!block_is_free(block) && "block already marked as free");
1032 
1033  /*
1034  ** If the next block is used, or when combined with the current
1035  ** block, does not offer enough space, we must reallocate and copy.
1036  */
1037  if (adjust > cursize && (!block_is_free(next) || adjust > combined)) {
1038  p = tlsf_malloc(tlsf, size);
1039  if (p) {
1040  const size_t minsize = tlsf_min(cursize, size);
1041  memcpy(p, ptr, minsize);
1042  tlsf_free(tlsf, ptr);
1043  }
1044  } else {
1045  control->allocated -= block_size(block);
1046  control->real_used -= block_size(block);
1047  /* Do we need to expand to the next block? */
1048  if (adjust > cursize) {
1049  block_merge_next(control, block);
1050  block_mark_as_used(block);
1051  }
1052 
1053  /* Trim the resulting block and return the original pointer. */
1054  block_trim_used(control, block, adjust);
1055  p = ptr;
1056  control->allocated += block_size(block);
1057  TLSF_INCREASE_REAL_USED(control, block_size(block));
1058  }
1059  }
1060 
1061  return p;
1062 }
1063 
1064 
1065 void* tlsfAllocator::tlsf_reallocxf(tlsf_t tlsf, void* ptr, size_t size) {
1066  void* r;
1067  r = tlsf_realloc(tlsf, ptr, size);
1068  if (!r && ptr) {
1069  tlsf_free(tlsf, ptr);
1070  }
1071  return r;
1072 }
static void block_set_size(block_header_t *block, size_t size)
Definition: tlsf.cc:309
struct block_header_t block_header_t
static block_header_t * block_from_ptr(const void *ptr)
Definition: tlsf.cc:342
ptrdiff_t tlsfptr_t
Definition: tlsf.cc:299
void * pool_t
Definition: tlsf.h:47
static void remove_free_block(control_t *control, block_header_t *block, int fl, int sl)
Definition: tlsf.cc:477
static block_header_t * block_split(block_header_t *block, size_t size)
Definition: tlsf.cc:541
size_t size
Definition: tlsf.cc:234
static void block_set_used(block_header_t *block)
Definition: tlsf.cc:326
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void *user)
Definition: tlsf.cc:776
static void block_mark_as_used(block_header_t *block)
Definition: tlsf.cc:383
static const size_t block_start_offset
Definition: tlsf.cc:257
static block_header_t * block_next(const block_header_t *block)
Definition: tlsf.cc:362
struct control_t control_t
#define tlsf_insist(x)
Definition: tlsf.cc:695
static size_t align_up(size_t x, size_t align)
Definition: tlsf.cc:389
tlsf_decl int tlsf_ffs(unsigned int word)
Definition: tlsf.cc:101
size_t max_fragments
Definition: tlsf.cc:288
tlsf_decl int tlsf_fls(unsigned int word)
Definition: tlsf.cc:105
int tlsf_check(tlsf_t tlsf)
Definition: tlsf.cc:719
static const size_t block_header_free_bit
Definition: tlsf.cc:247
block_header_t * blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]
Definition: tlsf.cc:295
static block_header_t * block_trim_free_leading(control_t *control, block_header_t *block, size_t size)
Definition: tlsf.cc:620
struct block_header_t * prev_phys_block
Definition: tlsf.cc:231
static int block_can_split(block_header_t *block, size_t size)
Definition: tlsf.cc:536
#define tlsf_static_assert(exp)
Definition: tlsf.cc:202
static block_header_t * block_absorb(block_header_t *prev, block_header_t *block)
Definition: tlsf.cc:561
#define TLSF_INCREASE_REAL_USED(control, increment)
Definition: tlsf.cc:268
size_t tlsf_alloc_overhead(void)
Definition: tlsf.cc:832
static void block_remove(control_t *control, block_header_t *block)
Definition: tlsf.cc:523
static void block_set_prev_free(block_header_t *block)
Definition: tlsf.cc:334
static void block_mark_as_free(block_header_t *block)
Definition: tlsf.cc:376
static const size_t block_header_prev_free_bit
Definition: tlsf.cc:248
struct block_header_t * next_free
Definition: tlsf.cc:237
block_header_t block_null
Definition: tlsf.cc:282
void tlsf_destroy(tlsf_t tlsf)
Definition: tlsf.cc:954
static void * align_ptr(const void *ptr, size_t align)
Definition: tlsf.cc:399
size_t tlsf_block_size_min(void)
Definition: tlsf.cc:815
static block_header_t * offset_to_block(const void *ptr, size_t size)
Definition: tlsf.cc:351
size_t total_size
Definition: tlsf.cc:283
#define TLSF_INCREASE_FRAGMENTS(control)
Definition: tlsf.cc:273
void tlsf_free(tlsf_t tlsf, void *ptr)
Definition: tlsf.cc:980
size_t tlsf_align_size(void)
Definition: tlsf.cc:811
pool_t tlsf_get_pool(tlsf_t tlsf)
Definition: tlsf.cc:959
static int block_is_free(const block_header_t *block)
Definition: tlsf.cc:318
size_t tlsf_pool_overhead(void)
Definition: tlsf.cc:828
unsigned int sl_bitmap[FL_INDEX_COUNT]
Definition: tlsf.cc:292
static size_t align_down(size_t x, size_t align)
Definition: tlsf.cc:394
static void mapping_insert(size_t size, int *fli, int *sli)
Definition: tlsf.cc:423
static void block_set_prev_used(block_header_t *block)
Definition: tlsf.cc:338
tlsf_public
Definition: tlsf.cc:135
size_t real_used
Definition: tlsf.cc:285
static int block_is_last(const block_header_t *block)
Definition: tlsf.cc:314
void * tlsf_mallocxz(tlsf_t tlsf, size_t bytes)
Definition: tlsf.cc:970
static void block_set_free(block_header_t *block)
Definition: tlsf.cc:322
static void mapping_search(size_t size, int *fli, int *sli)
Definition: tlsf.cc:439
static size_t adjust_request_size(size_t size, size_t align)
Definition: tlsf.cc:409
static size_t block_size(const block_header_t *block)
Definition: tlsf.cc:305
tlsf_t tlsf_create_with_pool(void *mem, size_t bytes)
Definition: tlsf.cc:948
unsigned int fl_bitmap
Definition: tlsf.cc:291
static block_header_t * block_prev(const block_header_t *block)
Definition: tlsf.cc:356
size_t allocated
Definition: tlsf.cc:284
tlsf_private
Definition: tlsf.cc:144
size_t max_used
Definition: tlsf.cc:286
#define tlsf_cast(t, exp)
Definition: tlsf.cc:185
struct integrity_t integrity_t
static block_header_t * search_suitable_block(control_t *control, int *fli, int *sli)
Definition: tlsf.cc:447
static void * block_prepare_used(control_t *control, block_header_t *block, size_t size)
Definition: tlsf.cc:653
pool_t tlsf_add_pool(tlsf_t tlsf, void *mem, size_t bytes)
Definition: tlsf.cc:836
int prev_status
Definition: tlsf.cc:691
size_t tlsf_block_size_max(void)
Definition: tlsf.cc:819
int tlsf_check_pool(pool_t pool)
Definition: tlsf.cc:795
static void * block_to_ptr(const block_header_t *block)
Definition: tlsf.cc:346
size_t tlsf_size(void)
Definition: tlsf.cc:807
static int block_is_prev_free(const block_header_t *block)
Definition: tlsf.cc:330
#define tlsf_min(a, b)
Definition: tlsf.cc:186
static const size_t block_header_overhead
Definition: tlsf.cc:254
static void control_construct(control_t *control)
Definition: tlsf.cc:671
void(* tlsf_walker)(void *ptr, size_t size, int used, void *user)
Definition: tlsf.h:81
static void integrity_walker(void *ptr, size_t size, int used, void *user)
Definition: tlsf.cc:703
static void block_trim_free(control_t *control, block_header_t *block, size_t size)
Definition: tlsf.cc:597
static block_header_t * block_locate_free(control_t *control, size_t size)
Definition: tlsf.cc:636
static block_header_t * block_merge_next(control_t *control, block_header_t *block)
Definition: tlsf.cc:583
tlsf_t tlsf_create(void *mem)
Definition: tlsf.cc:926
#define tlsf_assert
Definition: tlsf.cc:193
#define tlsf_max(a, b)
Definition: tlsf.cc:187
static void block_insert(control_t *control, block_header_t *block)
Definition: tlsf.cc:530
void * tlsf_malloc(tlsf_t tlsf, size_t bytes)
Definition: tlsf.cc:963
tlsf_decl int tlsf_fls_generic(unsigned int word)
Definition: tlsf.cc:71
static void default_walker(void *ptr, size_t size, int used, void *user)
Definition: tlsf.cc:767
size_t tlsf_block_size(void *ptr)
Definition: tlsf.cc:786
static void block_trim_used(control_t *control, block_header_t *block, size_t size)
Definition: tlsf.cc:608
static block_header_t * block_link_next(block_header_t *block)
Definition: tlsf.cc:370
static const size_t block_size_max
Definition: tlsf.cc:265
void * tlsf_t
Definition: tlsf.h:44
#define tlsf_decl
Definition: tlsf.cc:13
int status
Definition: tlsf.cc:692
void * tlsf_reallocxf(tlsf_t tlsf, void *ptr, size_t size)
Definition: tlsf.cc:1065
static block_header_t * block_merge_prev(control_t *control, block_header_t *block)
Definition: tlsf.cc:570
static const size_t block_size_min
Definition: tlsf.cc:264
size_t fragments
Definition: tlsf.cc:287
void * tlsf_realloc(tlsf_t tlsf, void *ptr, size_t size)
Definition: tlsf.cc:1012
#define tlsf_fls_sizet
Definition: tlsf.cc:125
static void insert_free_block(control_t *control, block_header_t *block, int fl, int sl)
Definition: tlsf.cc:502
struct block_header_t * prev_free
Definition: tlsf.cc:238
void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
Definition: tlsf.cc:881