PoolChunked.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /*
10  * DEBUG: section 63 Low Level Memory Pool Management
11  * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
12  */
13 
14 #include "squid.h"
15 #include "mem/PoolChunked.h"
16 
17 #include <cassert>
18 #include <cstring>
19 
20 #define MEM_MAX_MMAP_CHUNKS 2048
21 
22 /*
23  * Old way:
24  * xmalloc each item separately, upon free stack into idle pool array.
25  * each item is individually malloc()ed from system, imposing libmalloc
26  * overhead, and additionally we add our overhead of pointer size per item
27  * as we keep a list of pointer to free items.
28  *
29  * Chunking:
30  * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
31  * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
32  * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
33  * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
34  * A number of items fits into a single chunk, depending on item size.
35  * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
36  *
37  * We populate Chunk with a linkedlist, each node at first word of item,
38  * and pointing at next free item. Chunk->FreeList is pointing at first
39  * free node. Thus we stuff free housekeeping into the Chunk itself, and
40  * omit pointer overhead per item.
41  *
42  * Chunks are created on demand, and new chunks are inserted into linklist
43  * of chunks so that Chunks with smaller pointer value are placed closer
44  * to the linklist head. Head is a hotspot, servicing most of requests, so
45  * slow sorting occurs and Chunks in highest memory tend to become idle
46  * and freeable.
47  *
48  * event is registered that runs every 15 secs and checks reference time
49  * of each idle chunk. If a chunk is not referenced for 15 secs, it is
50  * released.
51  *
52  * [If mem_idle_limit is exceeded with pools, every chunk that becomes
53  * idle is immediately considered for release, unless this is the only
54  * chunk with free items in it.] (not implemented)
55  *
56  * In cachemgr output, there are new columns for chunking. Special item,
57  * Frag, is shown to estimate approximately fragmentation of chunked
58  * pools. Fragmentation is calculated by taking amount of items in use,
59  * calculating needed amount of chunks to fit all, and then comparing to
60  * actual amount of chunks in use. Frag number, in percent, is showing
61  * how many percent of chunks are in use excessively. 100% meaning that
62  * twice the needed amount of chunks are in use.
63  * "part" item shows number of chunks partially filled. This shows how
64  * badly fragmentation is spread across all chunks.
65  *
66  * Andres Kroonmaa.
67  */
68 
69 extern time_t squid_curtime;
70 
71 /* local prototypes */
72 static int memCompChunks(MemChunk * const &, MemChunk * const &);
73 static int memCompObjChunks(void * const &, MemChunk * const &);
74 
75 /* Compare chunks */
76 static int
77 memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
78 {
79  if (chunkA->objCache > chunkB->objCache)
80  return 1;
81  else if (chunkA->objCache < chunkB->objCache)
82  return -1;
83  else
84  return 0;
85 }
86 
87 /* Compare object to chunk */
88 static int
89 memCompObjChunks(void *const &obj, MemChunk * const &chunk)
90 {
91  /* object is lower in memory than the chunks arena */
92  if (obj < chunk->objCache)
93  return -1;
94  /* object is within the pool */
95  if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
96  return 0;
97  /* object is above the pool */
98  return 1;
99 }
100 
102 {
103  /* should have a pool for this too -
104  * note that this requres:
105  * allocate one chunk for the pool of chunks's first chunk
106  * allocate a chunk from that pool
107  * move the contents of one chunk into the other
108  * free the first chunk.
109  */
110  inuse_count = 0;
111  next = NULL;
112  pool = aPool;
113 
114  if (pool->doZero)
116  else
118 
119  freeList = objCache;
120  void **Free = (void **)freeList;
121 
122  for (int i = 1; i < pool->chunk_capacity; ++i) {
123  *Free = (void *) ((char *) Free + pool->obj_size);
124  void **nextFree = (void **)*Free;
126  Free = nextFree;
127  }
129  pool->nextFreeChunk = this;
130 
133  ++pool->chunkCount;
136 }
137 
138 MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) :
139  MemImplementingAllocator(aLabel, aSize), chunk_size(0),
140  chunk_capacity(0), chunkCount(0), freeCache(0), nextFreeChunk(0),
141  Chunks(0), allChunks(Splay<MemChunk *>())
142 {
144 
145 #if HAVE_MALLOPT && M_MMAP_MAX
147 #endif
148 }
149 
151 {
154  -- pool->chunkCount;
156  xfree(objCache);
157 }
158 
159 void
161 {
162  void **Free;
163  /* XXX We should figure out a sane way of avoiding having to clear
164  * all buffers. For example data buffers such as used by MemBuf do
165  * not really need to be cleared.. There was a condition based on
166  * the object size here, but such condition is not safe.
167  */
168  if (doZero)
169  memset(obj, 0, obj_size);
170  Free = (void **)obj;
171  *Free = freeCache;
172  freeCache = obj;
174 }
175 
176 /*
177  * Find a chunk with a free item.
178  * Create new chunk on demand if no chunk with frees found.
179  * Insert new chunk in front of lowest ram chunk, making it preferred in future,
180  * and resulting slow compaction towards lowest ram area.
181  */
182 void *
184 {
185  void **Free;
186 
187  ++saved_calls;
188 
189  /* first, try cache */
190  if (freeCache) {
191  Free = (void **)freeCache;
192  (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
193  freeCache = *Free;
194  *Free = NULL;
195  return Free;
196  }
197  /* then try perchunk freelist chain */
198  if (nextFreeChunk == NULL) {
199  /* no chunk with frees, so create new one */
200  -- saved_calls; // compensate for the ++ above
201  createChunk();
202  }
203  /* now we have some in perchunk freelist chain */
204  MemChunk *chunk = nextFreeChunk;
205 
206  Free = (void **)chunk->freeList;
207  chunk->freeList = *Free;
208  *Free = NULL;
209  ++chunk->inuse_count;
210  chunk->lastref = squid_curtime;
211 
212  if (chunk->freeList == NULL) {
213  /* last free in this chunk, so remove us from perchunk freelist chain */
214  nextFreeChunk = chunk->nextFreeChunk;
215  }
216  (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
217  return Free;
218 }
219 
220 /* just create a new chunk and place it into a good spot in the chunk chain */
221 void
223 {
224  MemChunk *chunk, *newChunk;
225 
226  newChunk = new MemChunk(this);
227 
228  chunk = Chunks;
229  if (chunk == NULL) { /* first chunk in pool */
230  Chunks = newChunk;
231  return;
232  }
233  if (newChunk->objCache < chunk->objCache) {
234  /* we are lowest ram chunk, insert as first chunk */
235  newChunk->next = chunk;
236  Chunks = newChunk;
237  return;
238  }
239  while (chunk->next) {
240  if (newChunk->objCache < chunk->next->objCache) {
241  /* new chunk is in lower ram, insert here */
242  newChunk->next = chunk->next;
243  chunk->next = newChunk;
244  return;
245  }
246  chunk = chunk->next;
247  }
248  /* we are the worst chunk in chain, add as last */
249  chunk->next = newChunk;
250 }
251 
252 void
254 {
255  int cap;
256  size_t csize = chunksize;
257 
258  if (Chunks) /* unsafe to tamper */
259  return;
260 
261  csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
262  cap = csize / obj_size;
263 
264  if (cap < MEM_MIN_FREE)
265  cap = MEM_MIN_FREE;
266  if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
268  if (cap > MEM_MAX_FREE)
269  cap = MEM_MAX_FREE;
270  if (cap < 1)
271  cap = 1;
272 
273  csize = cap * obj_size;
274  csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
275  cap = csize / obj_size;
276 
277  chunk_capacity = cap;
278  chunk_size = csize;
279 }
280 
281 /*
282  * warning: we do not clean this entry from Pools assuming destruction
283  * is used at the end of the program only
284  */
286 {
287  MemChunk *chunk, *fchunk;
288 
289  flushMetersFull();
290  clean(0);
291  assert(meter.inuse.currentLevel() == 0);
292 
293  chunk = Chunks;
294  while ( (fchunk = chunk) != NULL) {
295  chunk = chunk->next;
296  delete fchunk;
297  }
298  /* TODO we should be doing something about the original Chunks pointer here. */
299 
300 }
301 
302 int
304 {
305  return meter.inuse.currentLevel();
306 }
307 
308 void *
310 {
311  void *p = get();
312  assert(meter.idle.currentLevel() > 0);
313  --meter.idle;
314  ++meter.inuse;
315  return p;
316 }
317 
318 void
320 {
321  push(obj);
323  --meter.inuse;
324  ++meter.idle;
325 }
326 
327 void
329 {
330  void *Free;
331  /*
332  * OK, so we have to go through all the global freeCache and find the Chunk
333  * any given Free belongs to, and stuff it into that Chunk's freelist
334  */
335 
336  while ((Free = freeCache) != NULL) {
337  MemChunk *chunk = NULL;
338  chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
339  assert(splayLastResult == 0);
340  assert(chunk->inuse_count > 0);
341  -- chunk->inuse_count;
342  (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
343  freeCache = *(void **)Free; /* remove from global cache */
344  *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
345  (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
346  chunk->freeList = Free;
347  chunk->lastref = squid_curtime;
348  }
349 
350 }
351 
352 /* removes empty Chunks from pool */
353 void
354 MemPoolChunked::clean(time_t maxage)
355 {
356  MemChunk *chunk, *freechunk, *listTail;
357  time_t age;
358 
359  if (!Chunks)
360  return;
361 
362  flushMetersFull();
364  /* Now we have all chunks in this pool cleared up, all free items returned to their home */
365  /* We start now checking all chunks to see if we can release any */
366  /* We start from Chunks->next, so first chunk is not released */
367  /* Recreate nextFreeChunk list from scratch */
368 
369  chunk = Chunks;
370  while ((freechunk = chunk->next) != NULL) {
371  age = squid_curtime - freechunk->lastref;
372  freechunk->nextFreeChunk = NULL;
373  if (freechunk->inuse_count == 0)
374  if (age >= maxage) {
375  chunk->next = freechunk->next;
376  delete freechunk;
377  freechunk = NULL;
378  }
379  if (chunk->next == NULL)
380  break;
381  chunk = chunk->next;
382  }
383 
384  /* Recreate nextFreeChunk list from scratch */
385  /* Populate nextFreeChunk list in order of "most filled chunk first" */
386  /* in case of equal fill, put chunk in lower ram first */
387  /* First (create time) chunk is always on top, no matter how full */
388 
389  chunk = Chunks;
390  nextFreeChunk = chunk;
391  chunk->nextFreeChunk = NULL;
392 
393  while (chunk->next) {
394  chunk->next->nextFreeChunk = NULL;
395  if (chunk->next->inuse_count < chunk_capacity) {
396  listTail = nextFreeChunk;
397  while (listTail->nextFreeChunk) {
398  if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
399  break;
400  if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
401  (chunk->next->objCache < listTail->nextFreeChunk->objCache))
402  break;
403  listTail = listTail->nextFreeChunk;
404  }
405  chunk->next->nextFreeChunk = listTail->nextFreeChunk;
406  listTail->nextFreeChunk = chunk->next;
407  }
408  chunk = chunk->next;
409  }
410  /* We started from 2nd chunk. If first chunk is full, remove it */
413 
414  return;
415 }
416 
417 bool
419 {
420  return meter.idle.currentLevel() > (chunk_capacity << shift);
421 }
422 
423 /*
424  * Update MemPoolStats struct for single pool
425  */
426 int
428 {
429  MemChunk *chunk;
430  int chunks_free = 0;
431  int chunks_partial = 0;
432 
433  if (!accumulate) /* need skip memset for GlobalStats accumulation */
434  memset(stats, 0, sizeof(MemPoolStats));
435 
436  clean((time_t) 555555); /* don't want to get chunks released before reporting */
437 
438  stats->pool = this;
439  stats->label = objectType();
440  stats->meter = &meter;
441  stats->obj_size = obj_size;
443 
444  /* gather stats for each Chunk */
445  chunk = Chunks;
446  while (chunk) {
447  if (chunk->inuse_count == 0)
448  ++chunks_free;
449  else if (chunk->inuse_count < chunk_capacity)
450  ++chunks_partial;
451  chunk = chunk->next;
452  }
453 
454  stats->chunks_alloc += chunkCount;
455  stats->chunks_inuse += chunkCount - chunks_free;
456  stats->chunks_partial += chunks_partial;
457  stats->chunks_free += chunks_free;
458 
459  stats->items_alloc += meter.alloc.currentLevel();
460  stats->items_inuse += meter.inuse.currentLevel();
461  stats->items_idle += meter.idle.currentLevel();
462 
463  stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
464 
465  return meter.inuse.currentLevel();
466 }
467 
void insert(Value const &, SPLAYCMP *compare)
Definition: splay.h:300
int chunks_partial
Definition: Pool.h:291
SQUIDCEXTERN int splayLastResult
Definition: splay.h:94
static int memCompObjChunks(void *const &, MemChunk *const &)
Definition: PoolChunked.cc:89
virtual int getInUseCount()
Definition: PoolChunked.cc:303
#define assert(EX)
Definition: assert.h:17
virtual int getStats(MemPoolStats *stats, int accumulate)
Definition: PoolChunked.cc:427
int chunks_inuse
Definition: Pool.h:290
#define VALGRIND_MAKE_MEM_DEFINED
Definition: valgrind.h:27
MemPoolMeter * meter
Definition: Pool.h:284
int items_idle
Definition: Pool.h:296
bool doZero
Definition: Pool.h:233
int items_alloc
Definition: Pool.h:294
virtual MemPoolMeter const & getMeter() const
Definition: Pool.cc:349
#define M_MMAP_MAX
Definition: Pool.h:45
MemChunk * nextFreeChunk
Definition: PoolChunked.h:76
#define xcalloc
Definition: membanger.c:57
int items_inuse
Definition: Pool.h:295
int i
Definition: membanger.c:49
int overhead
Definition: Pool.h:298
friend class MemChunk
Definition: PoolChunked.h:24
MemChunk * nextFreeChunk
Definition: PoolChunked.h:62
class Ping::pingStats_ stats
int obj_size
Definition: Pool.h:285
time_t squid_curtime
Definition: stub_time.cc:17
Definition: splay.h:56
int chunks_alloc
Definition: Pool.h:289
Value const * find(FindValue const &, int(*compare)(FindValue const &a, Value const &b)) const
char * p
Definition: membanger.c:43
int chunk_capacity
Definition: Pool.h:286
#define MEM_CHUNK_MAX_SIZE
Definition: PoolChunked.h:16
int inuse_count
Definition: PoolChunked.h:75
virtual bool idleTrigger(int shift) const
Definition: PoolChunked.cc:418
Mem::Meter idle
Definition: Pool.h:100
MemPoolChunked(const char *label, size_t obj_size)
Definition: PoolChunked.cc:138
MemPoolChunked * pool
Definition: PoolChunked.h:79
#define MEM_MAX_FREE
Definition: Pool.h:59
MemAllocator * pool
Definition: Pool.h:282
virtual void setChunkSize(size_t chunksize)
Definition: PoolChunked.cc:253
MemChunk * next
Definition: PoolChunked.h:77
int chunks_free
Definition: Pool.h:292
MemPoolMeter meter
Definition: Pool.h:267
void remove(Value const &, SPLAYCMP *compare)
Definition: splay.h:314
Mem::Meter alloc
Definition: Pool.h:98
virtual char const * objectType() const
Definition: Pool.cc:107
void * objCache
Definition: PoolChunked.h:74
virtual void deallocate(void *, bool aggressive)
Definition: PoolChunked.cc:319
#define MEM_PAGE_SIZE
Definition: Pool.h:55
#define MEM_CHUNK_SIZE
Definition: PoolChunked.h:15
const char * label
Definition: Pool.h:283
Mem::Meter inuse
Definition: Pool.h:99
#define MEM_MAX_MMAP_CHUNKS
Definition: PoolChunked.cc:20
void * freeCache
Definition: PoolChunked.h:61
#define VALGRIND_MAKE_MEM_NOACCESS
Definition: valgrind.h:25
MemChunk * Chunks
Definition: PoolChunked.h:63
#define xmalloc
virtual void flushMetersFull()
Definition: Pool.cc:141
#define MEM_MIN_FREE
Definition: Pool.h:57
void * freeList
Definition: PoolChunked.h:73
MemChunk(MemPoolChunked *pool)
Definition: PoolChunked.cc:101
void createChunk()
Definition: PoolChunked.cc:222
virtual void * allocate()
Definition: PoolChunked.cc:309
static int memCompChunks(MemChunk *const &, MemChunk *const &)
Definition: PoolChunked.cc:77
size_t chunk_size
Definition: PoolChunked.h:58
#define xfree
ssize_t currentLevel() const
Definition: Meter.h:28
time_t lastref
Definition: PoolChunked.h:78
Splay< MemChunk * > allChunks
Definition: PoolChunked.h:64
#define NULL
Definition: types.h:166
void push(void *obj)
Definition: PoolChunked.cc:160
void convertFreeCacheToChunkFreeCache()
Definition: PoolChunked.cc:328
virtual void clean(time_t maxage)
Definition: PoolChunked.cc:354

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors