PoolChunked.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /*
10  * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
11  */
12 
13 #include "squid.h"
14 #include "mem/PoolChunked.h"
15 
16 #include <cassert>
17 #include <cstring>
18 
19 #define MEM_MAX_MMAP_CHUNKS 2048
20 
21 /*
22  * Old way:
23  * xmalloc each item separately, upon free stack into idle pool array.
24  * each item is individually malloc()ed from system, imposing libmalloc
25  * overhead, and additionally we add our overhead of pointer size per item
26  * as we keep a list of pointer to free items.
27  *
28  * Chunking:
29  * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
30  * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
31  * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
32  * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
33  * A number of items fits into a single chunk, depending on item size.
34  * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
35  *
36  * We populate Chunk with a linkedlist, each node at first word of item,
37  * and pointing at next free item. Chunk->FreeList is pointing at first
38  * free node. Thus we stuff free housekeeping into the Chunk itself, and
39  * omit pointer overhead per item.
40  *
41  * Chunks are created on demand, and new chunks are inserted into linklist
42  * of chunks so that Chunks with smaller pointer value are placed closer
43  * to the linklist head. Head is a hotspot, servicing most of requests, so
44  * slow sorting occurs and Chunks in highest memory tend to become idle
45  * and freeable.
46  *
47  * event is registered that runs every 15 secs and checks reference time
48  * of each idle chunk. If a chunk is not referenced for 15 secs, it is
49  * released.
50  *
51  * [If mem_idle_limit is exceeded with pools, every chunk that becomes
52  * idle is immediately considered for release, unless this is the only
53  * chunk with free items in it.] (not implemented)
54  *
55  * In cachemgr output, there are new columns for chunking. Special item,
56  * Frag, is shown to estimate approximately fragmentation of chunked
57  * pools. Fragmentation is calculated by taking amount of items in use,
58  * calculating needed amount of chunks to fit all, and then comparing to
59  * actual amount of chunks in use. Frag number, in percent, is showing
60  * how many percent of chunks are in use excessively. 100% meaning that
61  * twice the needed amount of chunks are in use.
62  * "part" item shows number of chunks partially filled. This shows how
63  * badly fragmentation is spread across all chunks.
64  *
65  * Andres Kroonmaa.
66  */
67 
68 extern time_t squid_curtime;
69 
70 /* local prototypes */
71 static int memCompChunks(MemChunk * const &, MemChunk * const &);
72 static int memCompObjChunks(void * const &, MemChunk * const &);
73 
74 /* Compare chunks */
75 static int
76 memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
77 {
78  if (chunkA->objCache > chunkB->objCache)
79  return 1;
80  else if (chunkA->objCache < chunkB->objCache)
81  return -1;
82  else
83  return 0;
84 }
85 
86 /* Compare object to chunk */
87 static int
88 memCompObjChunks(void *const &obj, MemChunk * const &chunk)
89 {
90  /* object is lower in memory than the chunks arena */
91  if (obj < chunk->objCache)
92  return -1;
93  /* object is within the pool */
94  if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
95  return 0;
96  /* object is above the pool */
97  return 1;
98 }
99 
101 {
102  /* should have a pool for this too -
103  * note that this requires:
104  * allocate one chunk for the pool of chunks's first chunk
105  * allocate a chunk from that pool
106  * move the contents of one chunk into the other
107  * free the first chunk.
108  */
109  inuse_count = 0;
110  next = NULL;
111  pool = aPool;
112 
113  if (pool->doZero)
115  else
117 
118  freeList = objCache;
119  void **Free = (void **)freeList;
120 
121  for (int i = 1; i < pool->chunk_capacity; ++i) {
122  *Free = (void *) ((char *) Free + pool->obj_size);
123  void **nextFree = (void **)*Free;
125  Free = nextFree;
126  }
128  pool->nextFreeChunk = this;
129 
132  ++pool->chunkCount;
135 }
136 
137 MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) :
138  MemImplementingAllocator(aLabel, aSize), chunk_size(0),
139  chunk_capacity(0), chunkCount(0), freeCache(0), nextFreeChunk(0),
140  Chunks(0), allChunks(Splay<MemChunk *>())
141 {
143 
144 #if HAVE_MALLOPT && M_MMAP_MAX
146 #endif
147 }
148 
150 {
153  -- pool->chunkCount;
155  xfree(objCache);
156 }
157 
158 void
160 {
161  void **Free;
162  /* XXX We should figure out a sane way of avoiding having to clear
163  * all buffers. For example data buffers such as used by MemBuf do
164  * not really need to be cleared.. There was a condition based on
165  * the object size here, but such condition is not safe.
166  */
167  if (doZero)
168  memset(obj, 0, obj_size);
169  Free = (void **)obj;
170  *Free = freeCache;
171  freeCache = obj;
173 }
174 
175 /*
176  * Find a chunk with a free item.
177  * Create new chunk on demand if no chunk with frees found.
178  * Insert new chunk in front of lowest ram chunk, making it preferred in future,
179  * and resulting slow compaction towards lowest ram area.
180  */
181 void *
183 {
184  void **Free;
185 
186  ++saved_calls;
187 
188  /* first, try cache */
189  if (freeCache) {
190  Free = (void **)freeCache;
191  (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
192  freeCache = *Free;
193  *Free = NULL;
194  return Free;
195  }
196  /* then try perchunk freelist chain */
197  if (nextFreeChunk == NULL) {
198  /* no chunk with frees, so create new one */
199  -- saved_calls; // compensate for the ++ above
200  createChunk();
201  }
202  /* now we have some in perchunk freelist chain */
203  MemChunk *chunk = nextFreeChunk;
204 
205  Free = (void **)chunk->freeList;
206  chunk->freeList = *Free;
207  *Free = NULL;
208  ++chunk->inuse_count;
209  chunk->lastref = squid_curtime;
210 
211  if (chunk->freeList == NULL) {
212  /* last free in this chunk, so remove us from perchunk freelist chain */
213  nextFreeChunk = chunk->nextFreeChunk;
214  }
215  (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
216  return Free;
217 }
218 
219 /* just create a new chunk and place it into a good spot in the chunk chain */
220 void
222 {
223  MemChunk *chunk, *newChunk;
224 
225  newChunk = new MemChunk(this);
226 
227  chunk = Chunks;
228  if (chunk == NULL) { /* first chunk in pool */
229  Chunks = newChunk;
230  return;
231  }
232  if (newChunk->objCache < chunk->objCache) {
233  /* we are lowest ram chunk, insert as first chunk */
234  newChunk->next = chunk;
235  Chunks = newChunk;
236  return;
237  }
238  while (chunk->next) {
239  if (newChunk->objCache < chunk->next->objCache) {
240  /* new chunk is in lower ram, insert here */
241  newChunk->next = chunk->next;
242  chunk->next = newChunk;
243  return;
244  }
245  chunk = chunk->next;
246  }
247  /* we are the worst chunk in chain, add as last */
248  chunk->next = newChunk;
249 }
250 
251 void
253 {
254  int cap;
255  size_t csize = chunksize;
256 
257  if (Chunks) /* unsafe to tamper */
258  return;
259 
260  csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
261  cap = csize / obj_size;
262 
263  if (cap < MEM_MIN_FREE)
264  cap = MEM_MIN_FREE;
265  if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
267  if (cap > MEM_MAX_FREE)
268  cap = MEM_MAX_FREE;
269  if (cap < 1)
270  cap = 1;
271 
272  csize = cap * obj_size;
273  csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
274  cap = csize / obj_size;
275 
276  chunk_capacity = cap;
277  chunk_size = csize;
278 }
279 
280 /*
281  * warning: we do not clean this entry from Pools assuming destruction
282  * is used at the end of the program only
283  */
285 {
286  MemChunk *chunk, *fchunk;
287 
288  flushMetersFull();
289  clean(0);
290  assert(meter.inuse.currentLevel() == 0);
291 
292  chunk = Chunks;
293  while ( (fchunk = chunk) != NULL) {
294  chunk = chunk->next;
295  delete fchunk;
296  }
297  /* TODO we should be doing something about the original Chunks pointer here. */
298 
299 }
300 
301 int
303 {
304  return meter.inuse.currentLevel();
305 }
306 
307 void *
309 {
310  void *p = get();
311  assert(meter.idle.currentLevel() > 0);
312  --meter.idle;
313  ++meter.inuse;
314  return p;
315 }
316 
317 void
319 {
320  push(obj);
322  --meter.inuse;
323  ++meter.idle;
324 }
325 
326 void
328 {
329  void *Free;
330  /*
331  * OK, so we have to go through all the global freeCache and find the Chunk
332  * any given Free belongs to, and stuff it into that Chunk's freelist
333  */
334 
335  while ((Free = freeCache) != NULL) {
336  MemChunk *chunk = NULL;
337  chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
338  assert(splayLastResult == 0);
339  assert(chunk->inuse_count > 0);
340  -- chunk->inuse_count;
341  (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
342  freeCache = *(void **)Free; /* remove from global cache */
343  *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
344  (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
345  chunk->freeList = Free;
346  chunk->lastref = squid_curtime;
347  }
348 
349 }
350 
351 /* removes empty Chunks from pool */
352 void
353 MemPoolChunked::clean(time_t maxage)
354 {
355  MemChunk *chunk, *freechunk, *listTail;
356  time_t age;
357 
358  if (!Chunks)
359  return;
360 
361  flushMetersFull();
363  /* Now we have all chunks in this pool cleared up, all free items returned to their home */
364  /* We start now checking all chunks to see if we can release any */
365  /* We start from Chunks->next, so first chunk is not released */
366  /* Recreate nextFreeChunk list from scratch */
367 
368  chunk = Chunks;
369  while ((freechunk = chunk->next) != NULL) {
370  age = squid_curtime - freechunk->lastref;
371  freechunk->nextFreeChunk = NULL;
372  if (freechunk->inuse_count == 0)
373  if (age >= maxage) {
374  chunk->next = freechunk->next;
375  delete freechunk;
376  freechunk = NULL;
377  }
378  if (chunk->next == NULL)
379  break;
380  chunk = chunk->next;
381  }
382 
383  /* Recreate nextFreeChunk list from scratch */
384  /* Populate nextFreeChunk list in order of "most filled chunk first" */
385  /* in case of equal fill, put chunk in lower ram first */
386  /* First (create time) chunk is always on top, no matter how full */
387 
388  chunk = Chunks;
389  nextFreeChunk = chunk;
390  chunk->nextFreeChunk = NULL;
391 
392  while (chunk->next) {
393  chunk->next->nextFreeChunk = NULL;
394  if (chunk->next->inuse_count < chunk_capacity) {
395  listTail = nextFreeChunk;
396  while (listTail->nextFreeChunk) {
397  if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
398  break;
399  if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
400  (chunk->next->objCache < listTail->nextFreeChunk->objCache))
401  break;
402  listTail = listTail->nextFreeChunk;
403  }
404  chunk->next->nextFreeChunk = listTail->nextFreeChunk;
405  listTail->nextFreeChunk = chunk->next;
406  }
407  chunk = chunk->next;
408  }
409  /* We started from 2nd chunk. If first chunk is full, remove it */
412 
413  return;
414 }
415 
416 bool
418 {
419  return meter.idle.currentLevel() > (chunk_capacity << shift);
420 }
421 
422 /*
423  * Update MemPoolStats struct for single pool
424  */
425 int
427 {
428  MemChunk *chunk;
429  int chunks_free = 0;
430  int chunks_partial = 0;
431 
432  if (!accumulate) /* need skip memset for GlobalStats accumulation */
433  memset(stats, 0, sizeof(MemPoolStats));
434 
435  clean((time_t) 555555); /* don't want to get chunks released before reporting */
436 
437  stats->pool = this;
438  stats->label = objectType();
439  stats->meter = &meter;
440  stats->obj_size = obj_size;
441  stats->chunk_capacity = chunk_capacity;
442 
443  /* gather stats for each Chunk */
444  chunk = Chunks;
445  while (chunk) {
446  if (chunk->inuse_count == 0)
447  ++chunks_free;
448  else if (chunk->inuse_count < chunk_capacity)
449  ++chunks_partial;
450  chunk = chunk->next;
451  }
452 
453  stats->chunks_alloc += chunkCount;
454  stats->chunks_inuse += chunkCount - chunks_free;
455  stats->chunks_partial += chunks_partial;
456  stats->chunks_free += chunks_free;
457 
458  stats->items_alloc += meter.alloc.currentLevel();
459  stats->items_inuse += meter.inuse.currentLevel();
460  stats->items_idle += meter.idle.currentLevel();
461 
462  stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
463 
464  return meter.inuse.currentLevel();
465 }
466 
MemChunk * nextFreeChunk
Definition: PoolChunked.h:76
Value const * find(FindValue const &, int(*compare)(FindValue const &a, Value const &b)) const
Definition: splay.h:285
void remove(Value const &, SPLAYCMP *compare)
Definition: splay.h:314
class Ping::pingStats_ stats
#define MEM_CHUNK_SIZE
Definition: PoolChunked.h:15
Mem::Meter inuse
Definition: Pool.h:99
#define VALGRIND_MAKE_MEM_DEFINED
Definition: valgrind.h:27
void * xcalloc(size_t n, size_t sz)
Definition: xalloc.cc:71
MemPoolChunked(const char *label, size_t obj_size)
Definition: PoolChunked.cc:137
void push(void *obj)
Definition: PoolChunked.cc:159
#define xmalloc
virtual void clean(time_t maxage)
Definition: PoolChunked.cc:353
virtual int getStats(MemPoolStats *stats, int accumulate)
Definition: PoolChunked.cc:426
void * freeList
Definition: PoolChunked.h:73
void * objCache
Definition: PoolChunked.h:74
#define MEM_CHUNK_MAX_SIZE
Definition: PoolChunked.h:16
#define MEM_MAX_FREE
Definition: Pool.h:59
MemChunk(MemPoolChunked *pool)
Definition: PoolChunked.cc:100
#define M_MMAP_MAX
Definition: Pool.h:45
void createChunk()
Definition: PoolChunked.cc:221
MemChunk * next
Definition: PoolChunked.h:77
MemPoolMeter meter
Definition: Pool.h:267
virtual void * allocate()
Definition: PoolChunked.cc:308
ssize_t currentLevel() const
Definition: Meter.h:28
virtual int getInUseCount()
Definition: PoolChunked.cc:302
virtual bool idleTrigger(int shift) const
Definition: PoolChunked.cc:417
size_t chunk_size
Definition: PoolChunked.h:58
bool doZero
Definition: Pool.h:233
virtual void flushMetersFull()
Definition: Pool.cc:140
Definition: splay.h:57
#define NULL
Definition: types.h:166
virtual void deallocate(void *, bool aggressive)
Definition: PoolChunked.cc:318
void insert(Value const &, SPLAYCMP *compare)
Definition: splay.h:300
static int memCompObjChunks(void *const &, MemChunk *const &)
Definition: PoolChunked.cc:88
friend class MemChunk
Definition: PoolChunked.h:24
virtual MemPoolMeter const & getMeter() const
Definition: Pool.cc:348
#define assert(EX)
Definition: assert.h:19
MemChunk * Chunks
Definition: PoolChunked.h:63
time_t lastref
Definition: PoolChunked.h:78
#define VALGRIND_MAKE_MEM_NOACCESS
Definition: valgrind.h:25
#define MEM_PAGE_SIZE
Definition: Pool.h:55
Splay< MemChunk * > allChunks
Definition: PoolChunked.h:64
#define xfree
void convertFreeCacheToChunkFreeCache()
Definition: PoolChunked.cc:327
virtual void setChunkSize(size_t chunksize)
Definition: PoolChunked.cc:252
SQUIDCEXTERN int splayLastResult
Definition: splay.h:94
static int memCompChunks(MemChunk *const &, MemChunk *const &)
Definition: PoolChunked.cc:76
#define MEM_MAX_MMAP_CHUNKS
Definition: PoolChunked.cc:19
MemChunk * nextFreeChunk
Definition: PoolChunked.h:62
#define MEM_MIN_FREE
Definition: Pool.h:57
Mem::Meter alloc
Definition: Pool.h:98
MemPoolChunked * pool
Definition: PoolChunked.h:79
time_t squid_curtime
Definition: stub_libtime.cc:20
virtual char const * objectType() const
Definition: Pool.cc:106
void * freeCache
Definition: PoolChunked.h:61
int inuse_count
Definition: PoolChunked.h:75
Mem::Meter idle
Definition: Pool.h:100

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors