RockSwapDir.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "cache_cf.h"
13 #include "CollapsedForwarding.h"
14 #include "ConfigOption.h"
15 #include "DiskIO/DiskIOModule.h"
16 #include "DiskIO/DiskIOStrategy.h"
17 #include "DiskIO/ReadRequest.h"
18 #include "DiskIO/WriteRequest.h"
20 #include "fs/rock/RockIoRequests.h"
21 #include "fs/rock/RockIoState.h"
22 #include "fs/rock/RockRebuild.h"
23 #include "fs/rock/RockSwapDir.h"
24 #include "globals.h"
25 #include "ipc/mem/Pages.h"
26 #include "MemObject.h"
27 #include "Parsing.h"
28 #include "SquidConfig.h"
29 #include "SquidMath.h"
30 #include "tools.h"
31 
32 #include <cstdlib>
33 #include <iomanip>
34 #include <limits>
35 
36 #if HAVE_SYS_STAT_H
37 #include <sys/stat.h>
38 #endif
39 
40 const int64_t Rock::SwapDir::HeaderSize = 16*1024;
41 
43  slotSize(HeaderSize), filePath(NULL), map(NULL), io(NULL),
44  waitingForPage(NULL)
45 {
46 }
47 
49 {
50  delete io;
51  delete map;
52  safe_free(filePath);
53 }
54 
55 // called when Squid core needs a StoreEntry with a given key
56 StoreEntry *
58 {
59  if (!map || !theFile || !theFile->canRead())
60  return NULL;
61 
62  sfileno filen;
63  const Ipc::StoreMapAnchor *const slot = map->openForReading(key, filen);
64  if (!slot)
65  return NULL;
66 
67  // create a brand new store entry and initialize it with stored basics
68  StoreEntry *e = new StoreEntry();
69  anchorEntry(*e, filen, *slot);
70 
71  e->hashInsert(key);
72  trackReferences(*e);
73 
74  return e;
75  // the disk entry remains open for reading, protected from modifications
76 }
77 
78 bool
79 Rock::SwapDir::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
80 {
81  if (!map || !theFile || !theFile->canRead())
82  return false;
83 
84  sfileno filen;
85  const Ipc::StoreMapAnchor *const slot = map->openForReading(
86  reinterpret_cast<cache_key*>(collapsed.key), filen);
87  if (!slot)
88  return false;
89 
90  anchorEntry(collapsed, filen, *slot);
91  inSync = updateCollapsedWith(collapsed, *slot);
92  return true; // even if inSync is false
93 }
94 
95 bool
97 {
98  if (!map || !theFile || !theFile->canRead())
99  return false;
100 
101  if (collapsed.swap_filen < 0) // no longer using a disk cache
102  return true;
103  assert(collapsed.swap_dirn == index);
104 
105  const Ipc::StoreMapAnchor &s = map->readableEntry(collapsed.swap_filen);
106  return updateCollapsedWith(collapsed, s);
107 }
108 
109 bool
111 {
112  collapsed.swap_file_sz = anchor.basics.swap_file_sz;
113  return true;
114 }
115 
116 void
118 {
119  const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
120 
121  e.swap_file_sz = basics.swap_file_sz;
122  e.lastref = basics.lastref;
123  e.timestamp = basics.timestamp;
124  e.expires = basics.expires;
125  e.lastModified(basics.lastmod);
126  e.refcount = basics.refcount;
127  e.flags = basics.flags;
128 
129  if (anchor.complete()) {
132  } else {
134  e.swap_status = SWAPOUT_WRITING; // even though another worker writes?
135  }
136 
138 
140  e.clearPrivate();
142 
143  e.swap_dirn = index;
144  e.swap_filen = filen;
145 }
146 
148 {
149  assert(e.swap_dirn == index);
150  assert(e.swap_filen >= 0);
151  // cannot have SWAPOUT_NONE entry with swap_filen >= 0
153 
154  // do not rely on e.swap_status here because there is an async delay
155  // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
156 
157  // since e has swap_filen, its slot is locked for reading and/or writing
158  // but it is difficult to know whether THIS worker is reading or writing e,
159  // especially since we may switch from writing to reading. This code relies
160  // on Rock::IoState::writeableAnchor_ being set when we locked for writing.
161  if (e.mem_obj && e.mem_obj->swapout.sio != NULL &&
162  dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
163  map->abortWriting(e.swap_filen);
164  e.swap_dirn = -1;
165  e.swap_filen = -1;
167  dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_ = NULL;
168  Store::Root().transientsAbandon(e); // broadcasts after the change
169  } else {
170  map->closeForReading(e.swap_filen);
171  e.swap_dirn = -1;
172  e.swap_filen = -1;
174  }
175 }
176 
177 uint64_t
179 {
180  const uint64_t spaceSize = !freeSlots ?
181  maxSize() : (slotSize * freeSlots->size());
182  // everything that is not free is in use
183  return maxSize() - spaceSize;
184 }
185 
186 uint64_t
188 {
189  return map ? map->entryCount() : 0;
190 }
191 
194 bool
196 {
197  return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
198 }
199 
200 void
202 {
203  // stats are not stored but computed when needed
204 }
205 
206 int64_t
208 {
209  // the max value is an invalid one; all values must be below the limit
213 }
214 
215 int64_t
217 {
218  const int64_t sWanted = (maxSize() - HeaderSize)/slotSize;
219  const int64_t sLimitLo = map ? map->sliceLimit() : 0; // dynamic shrinking unsupported
220  const int64_t sLimitHi = slotLimitAbsolute();
221  return min(max(sLimitLo, sWanted), sLimitHi);
222 }
223 
224 int64_t
226 {
227  return min(slotLimitActual(), entryLimitAbsolute());
228 }
229 
230 // TODO: encapsulate as a tool
231 void
233 {
234  assert(path);
235  assert(filePath);
236 
237  if (UsingSmp() && !IamDiskProcess()) {
238  debugs (47,3, HERE << "disker will create in " << path);
239  return;
240  }
241 
242  debugs (47,3, HERE << "creating in " << path);
243 
244  struct stat dir_sb;
245  if (::stat(path, &dir_sb) == 0) {
246  struct stat file_sb;
247  if (::stat(filePath, &file_sb) == 0) {
248  debugs (47, DBG_IMPORTANT, "Skipping existing Rock db: " << filePath);
249  return;
250  }
251  // else the db file is not there or is not accessible, and we will try
252  // to create it later below, generating a detailed error on failures.
253  } else { // path does not exist or is inaccessible
254  // If path exists but is not accessible, mkdir() below will fail, and
255  // the admin should see the error and act accordingly, so there is
256  // no need to distinguish ENOENT from other possible stat() errors.
257  debugs (47, DBG_IMPORTANT, "Creating Rock db directory: " << path);
258  const int res = mkdir(path, 0700);
259  if (res != 0)
260  createError("mkdir");
261  }
262 
263  debugs (47, DBG_IMPORTANT, "Creating Rock db: " << filePath);
264  const int swap = open(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600);
265  if (swap < 0)
266  createError("create");
267 
268 #if SLOWLY_FILL_WITH_ZEROS
269  char block[1024];
270  Must(maxSize() % sizeof(block) == 0);
271  memset(block, '\0', sizeof(block));
272 
273  for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) {
274  if (write(swap, block, sizeof(block)) != sizeof(block))
275  createError("write");
276  }
277 #else
278  if (ftruncate(swap, maxSize()) != 0)
279  createError("truncate");
280 
281  char header[HeaderSize];
282  memset(header, '\0', sizeof(header));
283  if (write(swap, header, sizeof(header)) != sizeof(header))
284  createError("write");
285 #endif
286 
287  close(swap);
288 }
289 
290 // report Rock DB creation error and exit
291 void
292 Rock::SwapDir::createError(const char *const msg)
293 {
294  int xerrno = errno; // XXX: where does errno come from?
295  debugs(47, DBG_CRITICAL, "ERROR: Failed to initialize Rock Store db in " <<
296  filePath << "; " << msg << " error: " << xstrerr(xerrno));
297  fatal("Rock Store db creation error");
298 }
299 
300 void
302 {
303  debugs(47,2, HERE);
304 
305  // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
306  // are refcounted. We up our count once to avoid implicit delete's.
307  lock();
308 
309  freeSlots = shm_old(Ipc::Mem::PageStack)(freeSlotsPath());
310 
311  Must(!map);
312  map = new DirMap(inodeMapPath());
313  map->cleaner = this;
314 
315  const char *ioModule = needsDiskStrand() ? "IpcIo" : "Blocking";
316  if (DiskIOModule *m = DiskIOModule::Find(ioModule)) {
317  debugs(47,2, HERE << "Using DiskIO module: " << ioModule);
318  io = m->createStrategy();
319  io->init();
320  } else {
321  debugs(47, DBG_CRITICAL, "FATAL: Rock store is missing DiskIO module: " <<
322  ioModule);
323  fatal("Rock Store missing a required DiskIO module");
324  }
325 
326  theFile = io->newFile(filePath);
327  theFile->configure(fileConfig);
328  theFile->open(O_RDWR, 0644, this);
329 
330  // Increment early. Otherwise, if one SwapDir finishes rebuild before
331  // others start, storeRebuildComplete() will think the rebuild is over!
332  // TODO: move store_dirs_rebuilding hack to store modules that need it.
334 }
335 
336 bool
338 {
339  const bool wontEvenWorkWithoutDisker = Config.workers > 1;
340  const bool wouldWorkBetterWithDisker = DiskIOModule::Find("IpcIo");
341  return InDaemonMode() && (wontEvenWorkWithoutDisker ||
342  wouldWorkBetterWithDisker);
343 }
344 
345 void
346 Rock::SwapDir::parse(int anIndex, char *aPath)
347 {
348  index = anIndex;
349 
350  path = xstrdup(aPath);
351 
352  // cache store is located at path/db
353  String fname(path);
354  fname.append("/rock");
355  filePath = xstrdup(fname.termedBuf());
356 
357  parseSize(false);
358  parseOptions(0);
359 
360  // Current openForWriting() code overwrites the old slot if needed
361  // and possible, so proactively removing old slots is probably useless.
362  assert(!repl); // repl = createRemovalPolicy(Config.replPolicy);
363 
364  validateOptions();
365 }
366 
367 void
369 {
370  parseSize(true);
371  parseOptions(1);
372  // TODO: can we reconfigure the replacement policy (repl)?
373  validateOptions();
374 }
375 
377 void
378 Rock::SwapDir::parseSize(const bool reconfig)
379 {
380  const int i = GetInteger();
381  if (i < 0)
382  fatal("negative Rock cache_dir size value");
383  const uint64_t new_max_size =
384  static_cast<uint64_t>(i) << 20; // MBytes to Bytes
385  if (!reconfig)
386  max_size = new_max_size;
387  else if (new_max_size != max_size) {
388  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir '" << path << "' size "
389  "cannot be changed dynamically, value left unchanged (" <<
390  (max_size >> 20) << " MB)");
391  }
392 }
393 
394 ConfigOption *
396 {
398  ConfigOptionVector *vector = dynamic_cast<ConfigOptionVector*>(copt);
399  if (vector) {
400  // if copt is actually a ConfigOptionVector
401  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseSizeOption, &SwapDir::dumpSizeOption));
402  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseTimeOption, &SwapDir::dumpTimeOption));
403  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseRateOption, &SwapDir::dumpRateOption));
404  } else {
405  // we don't know how to handle copt, as it's not a ConfigOptionVector.
406  // free it (and return nullptr)
407  delete copt;
408  copt = nullptr;
409  }
410  return copt;
411 }
412 
413 bool
414 Rock::SwapDir::allowOptionReconfigure(const char *const option) const
415 {
416  return strcmp(option, "slot-size") != 0 &&
418 }
419 
421 bool
422 Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconfig)
423 {
424  // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
425  // including time unit handling. Same for size and rate.
426 
427  time_msec_t *storedTime;
428  if (strcmp(option, "swap-timeout") == 0)
429  storedTime = &fileConfig.ioTimeout;
430  else
431  return false;
432 
433  if (!value) {
434  self_destruct();
435  return false;
436  }
437 
438  // TODO: handle time units and detect parsing errors better
439  const int64_t parsedValue = strtoll(value, NULL, 10);
440  if (parsedValue < 0) {
441  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
442  self_destruct();
443  return false;
444  }
445 
446  const time_msec_t newTime = static_cast<time_msec_t>(parsedValue);
447 
448  if (!reconfig)
449  *storedTime = newTime;
450  else if (*storedTime != newTime) {
451  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
452  << " cannot be changed dynamically, value left unchanged: " <<
453  *storedTime);
454  }
455 
456  return true;
457 }
458 
460 void
462 {
463  if (fileConfig.ioTimeout)
464  storeAppendPrintf(e, " swap-timeout=%" PRId64,
465  static_cast<int64_t>(fileConfig.ioTimeout));
466 }
467 
469 bool
470 Rock::SwapDir::parseRateOption(char const *option, const char *value, int isaReconfig)
471 {
472  int *storedRate;
473  if (strcmp(option, "max-swap-rate") == 0)
474  storedRate = &fileConfig.ioRate;
475  else
476  return false;
477 
478  if (!value) {
479  self_destruct();
480  return false;
481  }
482 
483  // TODO: handle time units and detect parsing errors better
484  const int64_t parsedValue = strtoll(value, NULL, 10);
485  if (parsedValue < 0) {
486  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
487  self_destruct();
488  return false;
489  }
490 
491  const int newRate = static_cast<int>(parsedValue);
492 
493  if (newRate < 0) {
494  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << newRate);
495  self_destruct();
496  return false;
497  }
498 
499  if (!isaReconfig)
500  *storedRate = newRate;
501  else if (*storedRate != newRate) {
502  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
503  << " cannot be changed dynamically, value left unchanged: " <<
504  *storedRate);
505  }
506 
507  return true;
508 }
509 
511 void
513 {
514  if (fileConfig.ioRate >= 0)
515  storeAppendPrintf(e, " max-swap-rate=%d", fileConfig.ioRate);
516 }
517 
519 bool
520 Rock::SwapDir::parseSizeOption(char const *option, const char *value, int reconfig)
521 {
522  uint64_t *storedSize;
523  if (strcmp(option, "slot-size") == 0)
524  storedSize = &slotSize;
525  else
526  return false;
527 
528  if (!value) {
529  self_destruct();
530  return false;
531  }
532 
533  // TODO: handle size units and detect parsing errors better
534  const uint64_t newSize = strtoll(value, NULL, 10);
535  if (newSize <= 0) {
536  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must be positive; got: " << newSize);
537  self_destruct();
538  return false;
539  }
540 
541  if (newSize <= sizeof(DbCellHeader)) {
542  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must exceed " << sizeof(DbCellHeader) << "; got: " << newSize);
543  self_destruct();
544  return false;
545  }
546 
547  if (!reconfig)
548  *storedSize = newSize;
549  else if (*storedSize != newSize) {
550  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
551  << " cannot be changed dynamically, value left unchanged: " <<
552  *storedSize);
553  }
554 
555  return true;
556 }
557 
559 void
561 {
562  storeAppendPrintf(e, " slot-size=%" PRId64, slotSize);
563 }
564 
566 void
568 {
569  if (slotSize <= 0)
570  fatal("Rock store requires a positive slot-size");
571 
572  const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB
573  const int64_t slotSizeRoundingWaste = slotSize;
574  const int64_t maxRoundingWaste =
575  max(maxSizeRoundingWaste, slotSizeRoundingWaste);
576 
577  // an entry consumes at least one slot; round up to reduce false warnings
578  const int64_t blockSize = static_cast<int64_t>(slotSize);
579  const int64_t maxObjSize = max(blockSize,
580  ((maxObjectSize()+blockSize-1)/blockSize)*blockSize);
581 
582  // Does the "sfileno*max-size" limit match configured db capacity?
583  const double entriesMayOccupy = entryLimitAbsolute()*static_cast<double>(maxObjSize);
584  if (entriesMayOccupy + maxRoundingWaste < maxSize()) {
585  const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
586  debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to entry limits:" <<
587  "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
588  "\n\tconfigured db slot size: " << slotSize << " bytes" <<
589  "\n\tconfigured maximum entry size: " << maxObjectSize() << " bytes" <<
590  "\n\tmaximum number of cache_dir entries supported by Squid: " << entryLimitAbsolute() <<
591  "\n\tdisk space all entries may use: " << entriesMayOccupy << " bytes" <<
592  "\n\tdisk space wasted: " << diskWasteSize << " bytes");
593  }
594 
595  // Does the "absolute slot count" limit match configured db capacity?
596  const double slotsMayOccupy = slotLimitAbsolute()*static_cast<double>(slotSize);
597  if (slotsMayOccupy + maxRoundingWaste < maxSize()) {
598  const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
599  debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to slot limits:" <<
600  "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
601  "\n\tconfigured db slot size: " << slotSize << " bytes" <<
602  "\n\tmaximum number of rock cache_dir slots supported by Squid: " << slotLimitAbsolute() <<
603  "\n\tdisk space all slots may use: " << slotsMayOccupy << " bytes" <<
604  "\n\tdisk space wasted: " << diskWasteSize << " bytes");
605  }
606 }
607 
608 void
610 {
611  //++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init()
612  AsyncJob::Start(new Rebuild(this));
613 }
614 
615 bool
616 Rock::SwapDir::canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const
617 {
618  if (diskSpaceNeeded >= 0)
619  diskSpaceNeeded += sizeof(DbCellHeader);
620  if (!::SwapDir::canStore(e, diskSpaceNeeded, load))
621  return false;
622 
623  if (!theFile || !theFile->canWrite())
624  return false;
625 
626  if (!map)
627  return false;
628 
629  // Do not start I/O transaction if there are less than 10% free pages left.
630  // TODO: reserve page instead
631  if (needsDiskStrand() &&
633  debugs(47, 5, HERE << "too few shared pages for IPC I/O left");
634  return false;
635  }
636 
637  if (io->shedLoad())
638  return false;
639 
640  load = io->load();
641  return true;
642 }
643 
646 {
647  if (!theFile || theFile->error()) {
648  debugs(47,4, HERE << theFile);
649  return NULL;
650  }
651 
652  sfileno filen;
653  Ipc::StoreMapAnchor *const slot =
654  map->openForWriting(reinterpret_cast<const cache_key *>(e.key), filen);
655  if (!slot) {
656  debugs(47, 5, HERE << "map->add failed");
657  return NULL;
658  }
659 
660  assert(filen >= 0);
661  slot->set(e);
662 
663  // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
664  // If that does not happen, the entry will not decrement the read level!
665 
666  Rock::SwapDir::Pointer self(this);
667  IoState *sio = new IoState(self, &e, cbFile, cbIo, data);
668 
669  sio->swap_dirn = index;
670  sio->swap_filen = filen;
671  sio->writeableAnchor_ = slot;
672 
673  debugs(47,5, HERE << "dir " << index << " created new filen " <<
674  std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
675  sio->swap_filen << std::dec << " starting at " <<
676  diskOffset(sio->swap_filen));
677 
678  sio->file(theFile);
679 
680  trackReferences(e);
681  return sio;
682 }
683 
686 {
687  if (!theFile || theFile->error()) {
688  debugs(47,4, theFile);
689  return nullptr;
690  }
691 
692  Must(update.fresh);
693  Must(update.fresh.fileNo >= 0);
694 
695  Rock::SwapDir::Pointer self(this);
696  IoState *sio = new IoState(self, update.entry, cbFile, cbIo, data);
697 
698  sio->swap_dirn = index;
699  sio->swap_filen = update.fresh.fileNo;
700  sio->writeableAnchor_ = update.fresh.anchor;
701 
702  debugs(47,5, "dir " << index << " updating filen " <<
703  std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
704  sio->swap_filen << std::dec << " starting at " <<
705  diskOffset(sio->swap_filen));
706 
707  sio->file(theFile);
708  return sio;
709 }
710 
711 int64_t
712 Rock::SwapDir::diskOffset(const SlotId sid) const
713 {
714  assert(sid >= 0);
715  return HeaderSize + slotSize*sid;
716 }
717 
718 int64_t
720 {
721  assert(pageId);
722  return diskOffset(pageId.number - 1);
723 }
724 
725 int64_t
727 {
728  assert(map);
729  return diskOffset(map->sliceLimit());
730 }
731 
732 bool
734 {
735  if (freeSlots->pop(pageId)) {
736  debugs(47, 5, "got a previously free slot: " << pageId);
737  return true;
738  }
739 
740  // catch free slots delivered to noteFreeMapSlice()
741  assert(!waitingForPage);
742  waitingForPage = &pageId;
743  if (map->purgeOne()) {
744  assert(!waitingForPage); // noteFreeMapSlice() should have cleared it
745  assert(pageId.set());
746  debugs(47, 5, "got a previously busy slot: " << pageId);
747  return true;
748  }
749  assert(waitingForPage == &pageId);
750  waitingForPage = NULL;
751 
752  debugs(47, 3, "cannot get a slot; entries: " << map->entryCount());
753  return false;
754 }
755 
756 bool
758 {
759  return 0 <= slotId && slotId < slotLimitActual();
760 }
761 
762 void
764 {
765  Ipc::Mem::PageId pageId;
766  pageId.pool = index+1;
767  pageId.number = sliceId+1;
768  if (waitingForPage) {
769  *waitingForPage = pageId;
770  waitingForPage = NULL;
771  } else {
772  freeSlots->push(pageId);
773  }
774 }
775 
776 // tries to open an old entry with swap_filen for reading
779 {
780  if (!theFile || theFile->error()) {
781  debugs(47,4, HERE << theFile);
782  return NULL;
783  }
784 
785  if (e.swap_filen < 0) {
786  debugs(47,4, HERE << e);
787  return NULL;
788  }
789 
790  // Do not start I/O transaction if there are less than 10% free pages left.
791  // TODO: reserve page instead
792  if (needsDiskStrand() &&
794  debugs(47, 5, HERE << "too few shared pages for IPC I/O left");
795  return NULL;
796  }
797 
798  // The are two ways an entry can get swap_filen: our get() locked it for
799  // reading or our storeSwapOutStart() locked it for writing. Peeking at our
800  // locked entry is safe, but no support for reading the entry we swap out.
801  const Ipc::StoreMapAnchor *slot = map->peekAtReader(e.swap_filen);
802  if (!slot)
803  return NULL; // we were writing afterall
804 
805  Rock::SwapDir::Pointer self(this);
806  IoState *sio = new IoState(self, &e, cbFile, cbIo, data);
807 
808  sio->swap_dirn = index;
809  sio->swap_filen = e.swap_filen;
810  sio->readableAnchor_ = slot;
811  sio->file(theFile);
812 
813  debugs(47,5, HERE << "dir " << index << " has old filen: " <<
814  std::setfill('0') << std::hex << std::uppercase << std::setw(8) <<
815  sio->swap_filen);
816 
817  assert(slot->sameKey(static_cast<const cache_key*>(e.key)));
818  // For collapsed disk hits: e.swap_file_sz and slot->basics.swap_file_sz
819  // may still be zero and basics.swap_file_sz may grow.
821 
822  return sio;
823 }
824 
825 void
827 {
828  if (!theFile)
829  fatalf("Rock cache_dir failed to initialize db file: %s", filePath);
830 
831  if (theFile->error()) {
832  int xerrno = errno; // XXX: where does errno come from
833  fatalf("Rock cache_dir at %s failed to open db file: %s", filePath,
834  xstrerr(xerrno));
835  }
836 
837  debugs(47, 2, "Rock cache_dir[" << index << "] limits: " <<
838  std::setw(12) << maxSize() << " disk bytes, " <<
839  std::setw(7) << map->entryLimit() << " entries, and " <<
840  std::setw(7) << map->sliceLimit() << " slots");
841 
842  rebuild();
843 }
844 
845 void
847 {
848  theFile = NULL;
849 }
850 
851 void
852 Rock::SwapDir::readCompleted(const char *, int rlen, int errflag, RefCount< ::ReadRequest> r)
853 {
854  ReadRequest *request = dynamic_cast<Rock::ReadRequest*>(r.getRaw());
855  assert(request);
856  IoState::Pointer sio = request->sio;
857 
858  if (errflag == DISK_OK && rlen > 0)
859  sio->offset_ += rlen;
860 
861  sio->callReaderBack(r->buf, rlen);
862 }
863 
864 void
866 {
867  Rock::WriteRequest *request = dynamic_cast<Rock::WriteRequest*>(r.getRaw());
868  assert(request);
869  assert(request->sio != NULL);
870  IoState &sio = *request->sio;
871 
872  // quit if somebody called IoState::close() while we were waiting
873  if (!sio.stillWaiting()) {
874  debugs(79, 3, "ignoring closed entry " << sio.swap_filen);
875  noteFreeMapSlice(request->sidNext);
876  return;
877  }
878 
879  debugs(79, 7, "errflag=" << errflag << " rlen=" << request->len << " eof=" << request->eof);
880 
881  // TODO: Fail if disk dropped one of the previous write requests.
882 
883  if (errflag == DISK_OK) {
884  // do not increment sio.offset_ because we do it in sio->write()
885 
886  // finalize the shared slice info after writing slice contents to disk
887  Ipc::StoreMap::Slice &slice =
888  map->writeableSlice(sio.swap_filen, request->sidCurrent);
889  slice.size = request->len - sizeof(DbCellHeader);
890  slice.next = request->sidNext;
891 
892  if (request->eof) {
893  assert(sio.e);
895  if (sio.touchingStoreEntry()) {
897  sio.offset_;
898 
899  // close, the entry gets the read lock
900  map->closeForWriting(sio.swap_filen, true);
901  }
902  sio.writeableAnchor_ = NULL;
903  sio.splicingPoint = request->sidCurrent;
904  sio.finishedWriting(errflag);
905  }
906  } else {
907  noteFreeMapSlice(request->sidNext);
908 
909  writeError(sio);
910  sio.finishedWriting(errflag);
911  // and hope that Core will call disconnect() to close the map entry
912  }
913 
914  if (sio.touchingStoreEntry())
916 }
917 
918 void
920 {
921  // Do not abortWriting here. The entry should keep the write lock
922  // instead of losing association with the store and confusing core.
923  map->freeEntry(sio.swap_filen); // will mark as unusable, just in case
924 
925  if (sio.touchingStoreEntry())
927  // else noop: a fresh entry update error does not affect stale entry readers
928 
929  // All callers must also call IoState callback, to propagate the error.
930 }
931 
932 void
934 {
935  if (!map)
936  return;
937 
938  Ipc::StoreMapUpdate update(updatedE);
939  if (!map->openForUpdating(update, updatedE->swap_filen))
940  return;
941 
942  try {
943  AsyncJob::Start(new HeaderUpdater(this, update));
944  } catch (const std::exception &ex) {
945  debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
946  map->abortUpdating(update);
947  }
948 }
949 
950 bool
952 {
953  return freeSlots != NULL && !freeSlots->size();
954 }
955 
956 // storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
957 // but it should not happen for us
958 void
960 {
961  debugs(20, DBG_IMPORTANT, "BUG: No space left with rock cache_dir: " <<
962  filePath);
963 }
964 
966 void
968 {
969  // The Store calls this to free some db space, but there is nothing wrong
970  // with a full() db, except when db has to shrink after reconfigure, and
971  // we do not support shrinking yet (it would have to purge specific slots).
972  // TODO: Disable maintain() requests when they are pointless.
973 }
974 
975 void
977 {
978  debugs(47, 5, HERE << &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
979  if (repl && repl->Referenced)
980  repl->Referenced(repl, &e, &e.repl);
981 }
982 
983 bool
985 {
986  debugs(47, 5, HERE << &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
987  if (repl && repl->Dereferenced)
988  repl->Dereferenced(repl, &e, &e.repl);
989 
990  // no need to keep e in the global store_table for us; we have our own map
991  return false;
992 }
993 
994 bool
996 {
997  // no entry-specific files to unlink
998  return false;
999 }
1000 
1001 void
1003 {
1004  debugs(47, 5, HERE << e);
1005  ignoreReferences(e);
1006  map->freeEntry(e.swap_filen);
1007  disconnect(e);
1008 }
1009 
1010 void
1012 {
1013  debugs(47, 5, e);
1014  map->freeEntry(e.swap_filen);
1015 }
1016 
1017 void
1019 {
1020  debugs(47, 5, HERE << e);
1021  if (repl)
1022  repl->Add(repl, &e, &e.repl);
1023 }
1024 
1025 void
1027 {
1028  debugs(47, 5, HERE << e);
1029  if (repl)
1030  repl->Remove(repl, &e, &e.repl);
1031 }
1032 
1033 void
1035 {
1036  storeAppendPrintf(&e, "\n");
1037  storeAppendPrintf(&e, "Maximum Size: %" PRIu64 " KB\n", maxSize() >> 10);
1038  storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
1039  currentSize() / 1024.0,
1040  Math::doublePercent(currentSize(), maxSize()));
1041 
1042  const int entryLimit = entryLimitActual();
1043  const int slotLimit = slotLimitActual();
1044  storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
1045  if (map && entryLimit > 0) {
1046  const int entryCount = map->entryCount();
1047  storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
1048  entryCount, (100.0 * entryCount / entryLimit));
1049  }
1050 
1051  storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
1052  if (map && slotLimit > 0) {
1053  const unsigned int slotsFree = !freeSlots ? 0 : freeSlots->size();
1054  if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
1055  const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
1056  storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
1057  usedSlots, (100.0 * usedSlots / slotLimit));
1058  }
1059  if (slotLimit < 100) { // XXX: otherwise too expensive to count
1061  map->updateStats(stats);
1062  stats.dump(e);
1063  }
1064  }
1065 
1066  storeAppendPrintf(&e, "Pending operations: %d out of %d\n",
1068 
1069  storeAppendPrintf(&e, "Flags:");
1070 
1071  if (flags.selected)
1072  storeAppendPrintf(&e, " SELECTED");
1073 
1074  if (flags.read_only)
1075  storeAppendPrintf(&e, " READ-ONLY");
1076 
1077  storeAppendPrintf(&e, "\n");
1078 
1079 }
1080 
1081 SBuf
1083 {
1084  return Ipc::Mem::Segment::Name(SBuf(path), "map");
1085 }
1086 
1087 const char *
1089 {
1090  static String spacesPath;
1091  spacesPath = path;
1092  spacesPath.append("_spaces");
1093  return spacesPath.termedBuf();
1094 }
1095 
1096 namespace Rock
1097 {
1099 }
1100 
1102 {
1103  Must(mapOwners.empty() && freeSlotsOwners.empty());
1104  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1105  if (const Rock::SwapDir *const sd = dynamic_cast<Rock::SwapDir *>(INDEXSD(i))) {
1106  const int64_t capacity = sd->slotLimitActual();
1107 
1108  SwapDir::DirMap::Owner *const mapOwner =
1109  SwapDir::DirMap::Init(sd->inodeMapPath(), capacity);
1110  mapOwners.push_back(mapOwner);
1111 
1112  // TODO: somehow remove pool id and counters from PageStack?
1113  Ipc::Mem::Owner<Ipc::Mem::PageStack> *const freeSlotsOwner =
1114  shm_new(Ipc::Mem::PageStack)(sd->freeSlotsPath(),
1115  i+1, capacity, 0);
1116  freeSlotsOwners.push_back(freeSlotsOwner);
1117 
1118  // TODO: add method to initialize PageStack with no free pages
1119  while (true) {
1120  Ipc::Mem::PageId pageId;
1121  if (!freeSlotsOwner->object()->pop(pageId))
1122  break;
1123  }
1124  }
1125  }
1126 }
1127 
1129 {
1130  for (size_t i = 0; i < mapOwners.size(); ++i) {
1131  delete mapOwners[i];
1132  delete freeSlotsOwners[i];
1133  }
1134 }
1135 
virtual bool doReportStat() const
Definition: RockSwapDir.cc:195
int max_open_disk_fds
Definition: SquidConfig.h:451
virtual bool needsDiskStrand() const
needs a dedicated kid process
Definition: RockSwapDir.cc:337
sdirno swap_dirn
Definition: Store.h:179
virtual ~SwapDirRr()
int GetInteger(void)
Definition: Parsing.cc:142
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:90
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition: StoreMap.h:159
virtual StoreIOState::Pointer createStoreIO(StoreEntry &, StoreIOState::STFNCB *, StoreIOState::STIOCB *, void *)
Definition: RockSwapDir.cc:645
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:89
bool complete() const
Definition: StoreMap.h:72
#define assert(EX)
Definition: assert.h:17
IoState::Pointer sio
struct Ipc::StoreMapAnchor::Basics basics
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:46
static char vector[AUTH_VECTOR_LEN]
Aggregates information required for updating entry metadata and headers.
Definition: StoreMap.h:146
bool updateCollapsedWith(StoreEntry &collapsed, const Ipc::StoreMapAnchor &anchor)
Definition: RockSwapDir.cc:110
virtual StoreIOState::Pointer openStoreIO(StoreEntry &, StoreIOState::STFNCB *, StoreIOState::STIOCB *, void *)
Definition: RockSwapDir.cc:778
bool eof
whether this is the last request for the entry
Definition: SBuf.h:87
RunnerRegistrationEntry(SwapDirRr)
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:45
unsigned char cache_key
Store key.
Definition: forward.h:29
#define shm_old(Class)
Definition: Pointer.h:180
void self_destruct(void)
Definition: cache_cf.cc:255
static SBuf Name(const SBuf &prefix, const char *suffix)
concatenates parts of a name to form a complete name (or its prefix)
Definition: Segment.cc:52
struct _request * request(char *urlin)
Definition: tcp-banger2.c:291
int i
Definition: membanger.c:49
bool touchingStoreEntry() const
Definition: StoreIOState.cc:56
#define xstrdup
#define PRId64
Definition: types.h:110
void dumpRateOption(StoreEntry *e) const
reports rate-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:512
void lastModified(const time_t when)
Definition: Store.h:142
#define safe_free(x)
Definition: xalloc.h:73
virtual void disconnect(StoreEntry &e)
called when the entry is about to forget its association with cache_dir
Definition: RockSwapDir.cc:147
class Ping::pingStats_ stats
virtual void ioCompletedNotification()
Definition: RockSwapDir.cc:826
sfileno SlotId
db cell number, starting with cell 0 (always occupied by the db header)
Definition: forward.h:30
virtual bool unlinkdUseful() const
whether SwapDir may benefit from unlinkd
Definition: RockSwapDir.cc:995
virtual void unlink(StoreEntry &e)
remove the entry from the store
bool validSlotId(const SlotId slotId) const
whether the given slot ID may point to a slot in this db
Definition: RockSwapDir.cc:757
char * buf
Definition: ReadRequest.h:24
approximate stats of a set of ReadWriteLocks
Definition: ReadWriteLock.h:56
virtual void init()
Definition: RockSwapDir.cc:301
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:177
virtual bool anchorCollapsed(StoreEntry &collapsed, bool &inSync)
Definition: RockSwapDir.cc:79
#define DBG_CRITICAL
Definition: Debug.h:44
virtual void diskFull()
Definition: RockSwapDir.cc:959
Controller & Root()
safely access controller singleton
Definition: Controller.cc:619
uint32_t pool
page pool ID within Squid
Definition: Page.h:33
time_t expires
Definition: Store.h:167
int64_t slotLimitAbsolute() const
Rock store implementation limit.
Definition: RockSwapDir.cc:207
virtual void reference(StoreEntry &e)
somebody needs this entry (many cache replacement policies need to know)
Definition: RockSwapDir.cc:976
A const & max(A const &lhs, A const &rhs)
void append(char const *buf, int len)
Definition: String.cc:161
time_t timestamp
Definition: Store.h:165
SlotId splicingPoint
the last db slot successfully read or written
Definition: RockIoState.h:55
void rebuild()
starts loading and validating stored entry metadata
Definition: RockSwapDir.cc:609
virtual void readCompleted(const char *buf, int len, int errflag, RefCount< ::ReadRequest >)
Definition: RockSwapDir.cc:852
static void Broadcast(const StoreEntry &e)
notify other workers about changes in entry state (e.g., new data)
StoreIOState::Pointer sio
Definition: MemObject.h:117
std::vector< ConfigOption * > options
Definition: ConfigOption.h:35
void fatalf(const char *fmt,...)
Definition: fatal.cc:79
virtual void closeCompleted()
Definition: RockSwapDir.cc:846
void dumpSizeOption(StoreEntry *e) const
reports size-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:560
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
static Pointer Start(AsyncJob *job)
starts a freshly created job (i.e., makes the job asynchronous)
Definition: AsyncJob.cc:23
virtual void updateHeaders(StoreEntry *e)
make stored metadata and HTTP headers the same as in the given entry
Definition: RockSwapDir.cc:933
virtual ~SwapDir()
Definition: RockSwapDir.cc:48
bool sameKey(const cache_key *const aKey) const
Definition: StoreMap.cc:740
uint64_t swap_file_sz
Definition: Store.h:171
static DiskIOModule * Find(char const *type)
uint32_t number
page number within the segment
Definition: Page.h:35
const char * xstrerr(int error)
Definition: xstrerror.cc:83
uint64_t time_msec_t
Definition: SquidTime.h:20
ping_status_t ping_status
Definition: Store.h:183
void const char HLPCB void * data
Definition: stub_helper.cc:16
void hashInsert(const cache_key *)
Definition: store.cc:413
virtual void reconfigure()
Definition: RockSwapDir.cc:368
virtual bool canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const
check whether we can store the entry; if we can, report current load
Definition: RockSwapDir.cc:616
virtual void noteFreeMapSlice(const Ipc::StoreMapSliceId fileno)
adjust slice-linked state before a locked Readable slice is erased
Definition: RockSwapDir.cc:763
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:123
Ipc::StoreMapAnchor * writeableAnchor_
starting point for writing
Definition: RockIoState.h:52
bool stillWaiting() const
whether we are still waiting for the I/O results (i.e., not closed)
Definition: RockIoState.h:42
#define DBG_IMPORTANT
Definition: Debug.h:45
bool pop(PageId &page)
sets value and returns true unless no free page numbers are found
Definition: PageStack.cc:40
void parseSize(const bool reconfiguring)
parses anonymous cache_dir size option
Definition: RockSwapDir.cc:378
void writeError(StoreIOState &sio)
Definition: RockSwapDir.cc:919
#define EBIT_CLR(flag, bit)
Definition: defines.h:106
void STFNCB(void *their_data, int errflag, StoreIOState::Pointer self)
Definition: StoreIOState.h:41
static const int64_t HeaderSize
on-disk db header size
Definition: RockSwapDir.h:146
char const * termedBuf() const
Definition: SquidString.h:90
StoreIOState::Pointer createUpdateIO(const Ipc::StoreMapUpdate &update, StoreIOState::STFNCB *, StoreIOState::STIOCB *, void *)
Definition: RockSwapDir.cc:685
void Init(void)
prepares to parse ACLs configuration
Definition: AclRegs.cc:111
int32_t StoreMapSliceId
Definition: StoreMap.h:24
int64_t diskOffsetLimit() const
Definition: RockSwapDir.cc:726
void dumpTimeOption(StoreEntry *e) const
reports time-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:461
void fatal(const char *message)
Definition: fatal.cc:39
virtual ConfigOption * getOptionTree() const
Definition: RockSwapDir.cc:395
void transientsAbandon(StoreEntry &)
calls Root().transients->abandon() if transients are tracked
Definition: Controller.cc:425
void set(const StoreEntry &anEntry)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:747
uint16_t flags
Definition: Store.h:173
RemovalPolicyNode repl
Definition: Store.h:163
int64_t diskOffset(Ipc::Mem::PageId &pageId) const
Definition: RockSwapDir.cc:719
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition: StoreMap.h:158
signed_int32_t sfileno
Definition: forward.h:22
MemObject * mem_obj
Definition: Store.h:162
bool useFreeSlot(Ipc::Mem::PageId &pageId)
removes a slot from a list of free slots or returns false
Definition: RockSwapDir.cc:733
void ignoreReferences(StoreEntry &e)
delete from repl policy scope
#define INDEXSD(i)
Definition: SquidConfig.h:64
virtual bool allowOptionReconfigure(const char *const option) const
Definition: RockSwapDir.cc:414
void validateOptions()
warns of configuration problems; may quit
Definition: RockSwapDir.cc:567
uint16_t refcount
Definition: Store.h:172
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:658
std::ostream & HERE(std::ostream &s)
Definition: Debug.h:147
virtual void create()
create system resources needed for this store to operate in the future
Definition: RockSwapDir.cc:232
#define shm_new(Class)
Definition: Pointer.h:179
bool set() const
true if and only if both critical components have been initialized
Definition: Page.h:27
#define Must(cond)
Definition: TextException.h:89
SwapOut swapout
Definition: MemObject.h:124
SlotId sidCurrent
slot being written using this write request
Definition: enums.h:46
virtual uint64_t currentSize() const
current size
Definition: RockSwapDir.cc:178
IoState::Pointer sio
void STIOCB(void *their_data, int errflag, StoreIOState::Pointer self)
Definition: StoreIOState.h:51
bool InDaemonMode()
Whether we are running in daemon mode.
Definition: tools.cc:652
void anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor)
Definition: RockSwapDir.cc:117
#define PRIu64
Definition: types.h:120
virtual bool allowOptionReconfigure(const char *const) const
Definition: Disk.h:79
void trackReferences(StoreEntry &e)
add to replacement policy scope
#define EBIT_SET(flag, bit)
Definition: defines.h:105
store_status_t store_status
Definition: Store.h:185
const char * freeSlotsPath() const
void finishedWriting(const int errFlag)
called by SwapDir::writeCompleted() after the last write and on error
Definition: RockIoState.cc:325
int64_t entryLimitActual() const
max number of possible entries in db
Definition: RockSwapDir.cc:225
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:418
virtual uint64_t currentCount() const
the total number of objects stored right now
Definition: RockSwapDir.cc:187
Edition fresh
new anchor and updated chain prefix
Definition: StoreMap.h:174
int store_open_disk_fd
void callReaderBack(const char *buf, int rlen)
forwards read data to the reader that initiated this I/O
Definition: RockIoState.cc:134
StoreEntry * e
Definition: StoreIOState.h:85
virtual bool dereference(StoreEntry &e)
Definition: RockSwapDir.cc:984
int64_t slotLimitActual() const
total number of slots in this db
Definition: RockSwapDir.cc:216
Shared memory page identifier, address, or handler.
Definition: Page.h:21
size_t PageLevel()
approximate total number of shared memory pages used now
Definition: Pages.cc:80
void createError(const char *const msg)
Definition: RockSwapDir.cc:292
void const cache_key * key
SBuf inodeMapPath() const
swap_status_t swap_status
Definition: Store.h:187
virtual ConfigOption * getOptionTree() const
Definition: Disk.cc:258
virtual void swappedOut(const StoreEntry &e)
called when entry swap out is complete
Definition: RockSwapDir.cc:201
virtual void markForUnlink(StoreEntry &e)
expect an unlink() call after the entry becomes idle
virtual void writeCompleted(int errflag, size_t len, RefCount< ::WriteRequest >)
Definition: RockSwapDir.cc:865
SlotId sidNext
allocated next slot (negative if we are writing the last slot)
void clearPrivate()
Definition: store.cc:158
virtual bool updateCollapsed(StoreEntry &collapsed)
Definition: RockSwapDir.cc:96
bool parseRateOption(char const *option, const char *value, int reconfiguring)
parses rate-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:470
time_t lastref
Definition: Store.h:166
bool IamDiskProcess() STUB_RETVAL_NOP(false) bool InDaemonMode() STUB_RETVAL_NOP(false) bool UsingSmp() STUB_RETVAL_NOP(false) bool IamCoordinatorProcess() STUB_RETVAL(false) bool IamPrimaryProcess() STUB_RETVAL(false) int NumberOfKids() STUB_RETVAL(0) void setMaxFD(void) STUB void setSystemLimits(void) STUB void squid_signal(int
whether the current process is dedicated to managing a cache_dir
#define DISK_OK
Definition: defines.h:39
int64_t strtoll(const char *nptr, char **endptr, int base)
Definition: strtoll.c:81
#define O_BINARY
Definition: defines.h:204
off_t offset_
number of bytes written or read for this entry so far
Definition: StoreIOState.h:87
virtual void statfs(StoreEntry &e) const
Class * object()
Raw access; handy to finalize initiatization, but avoid if possible.
Definition: Pointer.h:41
virtual void create()
called when the runner should create a new memory segment
virtual void maintain()
purge while full(); it should be sufficient to purge just one
Definition: RockSwapDir.cc:967
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:904
bool full() const
no more entries can be stored without purging
Definition: RockSwapDir.cc:951
virtual StoreEntry * get(const cache_key *key)
Retrieve a store entry from the store (blocking)
Definition: RockSwapDir.cc:57
C * getRaw() const
Definition: RefCount.h:74
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition: Pages.cc:55
class SquidConfig Config
Definition: SquidConfig.cc:12
#define NULL
Definition: types.h:166
bool parseSizeOption(char const *option, const char *value, int reconfiguring)
parses size-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:520
initializes shared memory segments used by Rock::SwapDir
Definition: RockSwapDir.h:150
A const & min(A const &lhs, A const &rhs)
sfileno swap_filen
Definition: StoreIOState.h:84
void dump(StoreEntry &e) const
virtual void parse(int index, char *path)
Definition: RockSwapDir.cc:346
StoreEntry * entry
the store entry being updated
Definition: StoreMap.h:172
bool parseTimeOption(char const *option, const char *value, int reconfiguring)
parses time-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:422

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors