A platform for high-performance distributed tool and library development written in C++. It can be deployed in two different cluster modes: standalone or distributed. API for v0.5.0, released on June 13, 2018.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
SlabAllocator.cc
Go to the documentation of this file.
1 
6 #ifndef SLAB_ALLOCATOR_CC
7 #define SLAB_ALLOCATOR_CC
8 
9 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
10 /*
11  * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size
12  * and are divided into chunks. The chunk sizes start off at the size of the
13  * "item" structure plus space for a small key and value. They increase by
14  * a multiplier factor from there, up to half the maximum slab size. The last
15  * slab size is always 1MB, since that's the maximum item size allowed by the
16  * memcached protocol.
17  */
18 #include <sys/stat.h>
19 #include <sys/socket.h>
20 #include <sys/signal.h>
21 #include <sys/resource.h>
22 #include <fcntl.h>
23 #include <netinet/in.h>
24 #include <errno.h>
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <string.h>
28 #include <assert.h>
29 #include <pthread.h>
30 #include <iostream>
31 #include "SlabAllocator.h"
32 
33 SlabAllocator::SlabAllocator(const size_t limit, bool opt4hashmap) {
34  this->opt4hashmap = opt4hashmap;
35  if (this->opt4hashmap == true) {
36  settings.verbose = 0;
37  settings.factor = 3 * 128 * 1024;
38  settings.chunk_size = 24;
39  settings.item_size_max = 9 * 1024 * 1024;
40  settings.slab_reassign = false;
41  } else {
42  settings.verbose = 0;
43  settings.factor = 1.25;
44  settings.chunk_size = 8;
45  settings.item_size_max = 1024;
46  settings.slab_reassign = false;
47  }
48  this->init(limit, settings.factor, true);
49  pthread_mutex_init(&slabs_lock, nullptr);
50 }
51 
52 SlabAllocator::SlabAllocator(void* memPool, const size_t limit, size_t pageSize, size_t alignment) {
53  settings.verbose = 0;
54  settings.factor = 2;
55  settings.chunk_size = pageSize + alignment;
56  settings.item_size_max = pageSize + alignment;
57  settings.slab_reassign = false;
58  this->opt4sharedmem = true;
59  this->mem_base = memPool;
60  this->init(limit, settings.factor, true);
61  pthread_mutex_init(&slabs_lock, nullptr);
62 }
63 
64 SlabAllocator::SlabAllocator(void* memPool, const size_t limit, bool opt4hashmap) {
65  this->opt4hashmap = opt4hashmap;
66  if (this->opt4hashmap == true) {
67  settings.verbose = 0;
68  settings.factor = 3 * 128 * 1024;
69  settings.chunk_size = 24;
70  settings.item_size_max = 9 * 1024 * 1024;
71  settings.slab_reassign = false;
72  } else {
73  settings.verbose = 0;
74  settings.factor = 2;
75  settings.chunk_size = 8;
76  settings.item_size_max = 1 * 1024 * 1024;
77  settings.slab_reassign = false;
78  }
79  this->useExternalMemory = true;
80  this->mem_base = memPool;
81  this->init(limit, settings.factor, true);
82  pthread_mutex_init(&slabs_lock, nullptr);
83 }
84 
85 
87  if ((mem_base != nullptr) && (useExternalMemory == false)) {
88  free(mem_base);
89  }
90  for (int i = POWER_SMALLEST; i < MAX_NUMBER_OF_SLAB_CLASSES - 1; i++) {
91  slabclass_t* p = &slabclass[i];
92  if (p->slab_list != nullptr) {
93  free(p->slab_list);
94  }
95  }
96  pthread_mutex_destroy(&slabs_lock);
97 }
98 
99 void SlabAllocator::init(const size_t limit, const double factor, const bool prealloc) {
100  int i = POWER_SMALLEST - 1;
101  unsigned int size = sizeof(item) + settings.chunk_size;
102  // cout << "sizeof(item)="<<sizeof(item)<<"\n";
103  // cout << "settings.chunk_size="<<settings.chunk_size<<"\n";
104  mem_limit = limit;
105 
106  if (prealloc) {
107  /* Allocate everything in a big chunk with malloc */
108  if (this->mem_base == nullptr) {
109  mem_base = malloc(mem_limit);
110  }
111  if (mem_base != nullptr) {
114  } else {
115  fprintf(stderr,
116  "Warning: Failed to allocate requested memory in"
117  " one large chunk.\nWill allocate in smaller chunks\n");
118  }
119  }
120  memset(slabclass, 0, sizeof(slabclass));
121  while (++i < MAX_NUMBER_OF_SLAB_CLASSES - 1 &&
122  (size - sizeof(item)) <= settings.item_size_max / factor) {
123  /* Make sure items are always n-byte aligned */
124  if (size % CHUNK_ALIGN_BYTES)
125  size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
126  // cout << "initialize slab i="<<i<<"\n";
127 
128  slabclass[i].size = size;
129  if (opt4sharedmem == true) {
130  slabclass[i].perslab = mem_limit / (2 * (settings.item_size_max + sizeof(item)));
131  } else {
132  if (size <= 64) {
133  if (this->opt4hashmap == false) {
135  } else {
136  slabclass[i].perslab = 10000;
137  }
138  } else {
139  slabclass[i].perslab = 1;
140  }
141  }
142  // cout << "size="<<size<<"\n";
143  // cout << "perslab="<<slabclass[i].perslab<<"\n";
144  size *= factor;
145  if (settings.verbose > 1) {
146  fprintf(stderr,
147  "slab class %3d: chunk size %9u perslab %7u\n",
148  i,
149  slabclass[i].size,
150  slabclass[i].perslab);
151  }
152  }
153  power_largest = i;
156  // cout << "initialize for slab i="<< i<<"\n";
157  // cout << "size="<<slabclass[i].size<<"\n";
158  // cout << "perslab="<<slabclass[i].perslab<<"\n";
159  if (settings.verbose > 1) {
160  fprintf(stderr,
161  "slab class %3d: chunk size %9u perslab %7u\n",
162  i,
163  slabclass[i].size,
164  slabclass[i].perslab);
165  }
166  /*
167  if (prealloc) {
168  slabs_preallocate(power_largest);
169  }
170  */
171 }
172 
173 void SlabAllocator::slabs_preallocate(const unsigned int maxslabs) {
174  int i;
175  unsigned int prealloc = 0;
176 
177  /* pre-allocate a 1MB slab in every size class so people don't get
178  confused by non-intuitive "SERVER_ERROR out of memory"
179  messages. this is the most common question on the mailing
180  list. if you really don't want this, you can rebuild without
181  these three lines. */
182 
183  for (i = POWER_SMALLEST; i < MAX_NUMBER_OF_SLAB_CLASSES; i++) {
184  if (++prealloc > maxslabs)
185  return;
186  if (do_slabs_newslab(i) == 0) {
187  fprintf(stderr,
188  "Error while preallocating slab memory!\n"
189  "If using -L or other prealloc options, max memory must be "
190  "at least %d megabytes.\n",
191  power_largest);
192  std::cout << "Fatal Error: SlabAllocator::slabs_preallocate()" << std::endl;
193  exit(1);
194  }
195  }
196 }
197 
198 int SlabAllocator::do_slabs_newslab(const unsigned int id) {
199  slabclass_t* p = &slabclass[id];
201  int len = p->size * p->perslab;
202  // cout << "len for newslab="<<len<<"\n";
203  char* ptr;
204 
205  if ((mem_limit && mem_malloced + len > mem_limit && p->slabs > 0 && g->slabs == 0)) {
206  mem_limit_reached = true;
207  return 0;
208  }
209 
210  if ((grow_slab_list(id) == 0) || (((ptr = (char*)get_page_from_global_pool()) == nullptr) &&
211  ((ptr = (char*)memory_allocate((size_t)len)) == 0))) {
212 
213  return 0;
214  }
215 
216  memset(ptr, 0, (size_t)len);
218 
219  p->slab_list[p->slabs++] = ptr;
220 
221  return 1;
222 }
223 
224 int SlabAllocator::grow_slab_list(const unsigned int id) {
225  slabclass_t* p = &slabclass[id];
226  if (p->slabs == p->list_size) {
227  size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16;
228  void* new_list = realloc(p->slab_list, new_size * sizeof(void*));
229  if (new_list == 0)
230  return 0;
231  p->list_size = new_size;
232  p->slab_list = (void**)new_list;
233  }
234  return 1;
235 }
236 
237 /* Fast FIFO queue */
240  if (p->slabs < 1) {
241  return nullptr;
242  }
243  void* ret = p->slab_list[p->slabs - 1];
244  p->slabs--;
245  return ret;
246 }
247 
248 void SlabAllocator::split_slab_page_into_freelist(char* ptr, const unsigned int id) {
249  slabclass_t* p = &slabclass[id];
250  int x;
251  for (x = 0; x < p->perslab; x++) {
252  do_slabs_free(ptr, 0, id);
253  ptr += p->size;
254  }
255 }
256 
257 void* SlabAllocator::memory_allocate(size_t size) {
258  void* ret;
259 
260  if (mem_base == nullptr) {
261  /* We are not using a preallocated large memory chunk */
262  ret = malloc(size);
263  } else {
264  ret = mem_current;
265 
266  if (size > mem_avail) {
267  return nullptr;
268  }
269 
270  /* mem_current pointer _must_ be aligned!!! */
271  if (size % CHUNK_ALIGN_BYTES) {
272  size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
273  }
274 
275  mem_current = ((char*)mem_current) + size;
276  if (size < mem_avail) {
277  mem_avail -= size;
278  } else {
279  mem_avail = 0;
280  }
281  }
282  mem_malloced += size;
283 
284  return ret;
285 }
286 
287 void* SlabAllocator::slabs_alloc(const size_t size) {
288  pthread_mutex_lock(&slabs_lock);
289  void* ret = slabs_alloc_unsafe(size);
290  pthread_mutex_unlock(&slabs_lock);
291  return ret;
292 }
293 
294 void* SlabAllocator::slabs_alloc_unsafe(const size_t size) {
295  void* ret;
296  ret = do_slabs_alloc(size);
297  return ret;
298 }
299 unsigned int SlabAllocator::slabs_clsid(const size_t size) {
300  int res = POWER_SMALLEST;
301  if (size == 0) {
302  return 0;
303  }
304  while (size > slabclass[res].size) {
305  if (res++ == power_largest) {
306  return 0;
307  }
308  }
309  return res;
310 }
311 void* SlabAllocator::do_slabs_alloc(const size_t size) {
312  // cout << "size to alloc="<< size <<"\n";
313  slabclass_t* p;
314  void* ret = nullptr;
315  item* it = nullptr;
316  unsigned int id = slabs_clsid(size);
317  // cout << "slab id=" << id << "\n";
318  if (id < POWER_SMALLEST || id > power_largest) {
319  return nullptr;
320  }
321  p = &slabclass[id];
322  // assert(p->sl_curr == 0 || ((item *)p->slots)->slabs_clsid == 0);
323  // assert(p->sl_curr == 0);
324  /* fail unless we have space at the end of a recently allocated page,
325  we have something on our freelist, or we could allocate a new page */
326  if (p->sl_curr == 0) {
327  do_slabs_newslab(id);
328  }
329 
330  if (p->sl_curr != 0) {
331  /* return off our freelist */
332  it = (item*)p->slots;
333  p->slots = it->next;
334  if (it->next)
335  it->next->prev = 0;
336  p->sl_curr--;
337  ret = (void*)it;
338  } else {
339  ret = nullptr;
340  }
341 
342  if (ret) {
343  p->requested += size;
344  }
345 
346  return ret;
347 }
348 
349 void SlabAllocator::slabs_free(void* ptr, size_t size) {
350  pthread_mutex_lock(&slabs_lock);
351  do_slabs_free(ptr, size);
352  pthread_mutex_unlock(&slabs_lock);
353 }
354 
355 void SlabAllocator::slabs_free_unsafe(void* ptr, size_t size) {
356  do_slabs_free(ptr, size);
357 }
358 
359 void SlabAllocator::do_slabs_free(void* ptr, const size_t size, unsigned int id) {
360  slabclass_t* p;
361  item* it;
362 
363  assert(id >= POWER_SMALLEST && id <= power_largest);
364  if (id < POWER_SMALLEST || id > power_largest)
365  return;
366 
367  p = &slabclass[id];
368 
369  it = (item*)ptr;
370  it->prev = 0;
371  it->next = (item*)p->slots;
372  if (it->next)
373  it->next->prev = it;
374  p->slots = it;
375 
376  p->sl_curr++;
377  p->requested -= size;
378  return;
379 }
380 
381 void SlabAllocator::do_slabs_free(void* ptr, const size_t size) {
382  slabclass_t* p;
383  item* it;
384  unsigned int id = slabs_clsid(size);
385  assert(id >= POWER_SMALLEST && id <= power_largest);
386  if (id < POWER_SMALLEST || id > power_largest)
387  return;
388 
389  p = &slabclass[id];
390 
391  it = (item*)ptr;
392  // it->slabs_clsid = 0;
393  it->prev = 0;
394  it->next = (item*)p->slots;
395  if (it->next)
396  it->next->prev = it;
397  p->slots = it;
398 
399  p->sl_curr++;
400  p->requested -= size;
401  return;
402 }
403 
404 
405 #endif
void * get_page_from_global_pool(void)
void * slabs_alloc_unsafe(const size_t size)
struct _stritem * prev
Definition: SlabAllocator.h:48
#define POWER_SMALLEST
Definition: SlabAllocator.h:22
pthread_mutex_t slabs_lock
void do_slabs_free(void *ptr, const size_t size)
void * memory_allocate(size_t size)
bool useExternalMemory
slabclass_t slabclass[MAX_NUMBER_OF_SLAB_CLASSES]
int grow_slab_list(const unsigned int id)
void slabs_free(void *ptr, size_t size)
#define CHUNK_ALIGN_BYTES
Definition: SlabAllocator.h:25
unsigned int perslab
Definition: SlabAllocator.h:59
size_t mem_malloced
void init(const size_t limit, const double factor, const bool prealloc)
void * mem_current
unsigned int slabs_clsid(const size_t size)
int do_slabs_newslab(const unsigned int id)
struct settings_t settings
SlabAllocator(const size_t limit, bool opt4hashmap=false)
struct _stritem * next
Definition: SlabAllocator.h:47
#define SLAB_GLOBAL_PAGE_POOL
Definition: SlabAllocator.h:24
void slabs_free_unsafe(void *ptr, size_t size)
void * slots
Definition: SlabAllocator.h:61
unsigned int slabs
Definition: SlabAllocator.h:64
#define MAX_NUMBER_OF_SLAB_CLASSES
Definition: SlabAllocator.h:27
size_t requested
Definition: SlabAllocator.h:69
void * do_slabs_alloc(const size_t size)
void slabs_preallocate(const unsigned int maxslabs)
bool mem_limit_reached
struct _stritem item
double factor
Definition: SlabAllocator.h:35
int item_size_max
Definition: SlabAllocator.h:37
unsigned int size
Definition: SlabAllocator.h:58
void split_slab_page_into_freelist(char *ptr, const unsigned int id)
unsigned int sl_curr
Definition: SlabAllocator.h:62
unsigned int list_size
Definition: SlabAllocator.h:67
void * slabs_alloc(const size_t size)
bool slab_reassign
Definition: SlabAllocator.h:38
void ** slab_list
Definition: SlabAllocator.h:66