Avoid "FATAL: Squid has attempted to read data from memory that is not present" crashes. Improve related code. Shared memory caching code was not checking whether the response is generally cachable and, hence, tried to store responses that Squid already refused to cache (e.g., release-requested entries). The shared memory cache should also check that the response is contiguous because the disk store may not check that (e.g., when disk caching is disabled). The problem was exacerbated by the broken entry dump code accompanying the FATAL message. The Splay tree iterator is evidently not iterating a portion of the tree. I added a visitor pattern code to work around that, but did not try to fix the Splay iterator itself because, in part, the whole iterator design felt inappropriate (storing and flattening the tree before iterating it?) for its intended purpose. I could not check quickly enough where the broken iterator is used besides mem_hdr::debugDump() so more bugs like this are possible. Optimized StoreEntry::checkCachable() a little and removed wrong TODO comment: Disk-only mayStartSwapOut() should not accumulate "general" cachability checks which are used by RAM caches as well. Added more mem_hdr debugging and polished method descriptions. Fixed NullStoreEntry::mayStartSwapout() spelling/case. It should override StoreEntry::mayStartSwapOut(). === modified file 'include/splay.h' --- include/splay.h 2014-03-17 21:03:04 +0000 +++ include/splay.h 2014-03-19 03:40:16 +0000 @@ -14,80 +14,86 @@ public: typedef int SPLAYCMP(Value const &a, Value const &b); typedef void SPLAYFREE(Value &); typedef void SPLAYWALKEE(Value const & nodedata, void *state); static void DefaultFree (Value &aValue) {delete aValue;} SplayNode (Value const &); Value data; mutable SplayNode *left; mutable SplayNode *right; void destroy(SPLAYFREE *); void walk(SPLAYWALKEE *, void *callerState); bool empty() const { return this == NULL; } SplayNode const * start() const; SplayNode const * finish() const; SplayNode * remove(const Value data, SPLAYCMP * compare); SplayNode * insert(Value data, SPLAYCMP * compare); template SplayNode * splay(const FindValue &data, int( * compare)(FindValue const &a, Value const &b)) const; + + /// recursively visit left nodes, this node, and then right nodes + template void visit(Visitor &v) const; }; typedef SplayNode splayNode; template class SplayConstIterator; template class SplayIterator; template class Splay { public: typedef V Value; typedef int SPLAYCMP(Value const &a, Value const &b); typedef void SPLAYFREE(Value &); typedef SplayIterator iterator; typedef const SplayConstIterator const_iterator; Splay():head(NULL), elements (0) {} mutable SplayNode * head; template Value const *find (FindValue const &, int( * compare)(FindValue const &a, Value const &b)) const; void insert(Value const &, SPLAYCMP *compare); void remove(Value const &, SPLAYCMP *compare); void destroy(SPLAYFREE *); SplayNode const * start() const; SplayNode const * finish() const; size_t size() const; const_iterator begin() const; const_iterator end() const; + /// recursively visit all nodes, in left-to-right order + template void visit(Visitor &v) const; + size_t elements; }; SQUIDCEXTERN int splayLastResult; SQUIDCEXTERN splayNode *splay_insert(void *, splayNode *, splayNode::SPLAYCMP *); SQUIDCEXTERN splayNode *splay_delete(const void *, splayNode *, splayNode::SPLAYCMP *); SQUIDCEXTERN splayNode *splay_splay(const void **, splayNode *, splayNode::SPLAYCMP *); SQUIDCEXTERN void splay_destroy(splayNode *, splayNode::SPLAYFREE *); SQUIDCEXTERN void splay_walk(splayNode *, splayNode::SPLAYWALKEE *, void *callerState); /* inline methods */ template SplayNode::SplayNode (Value const &someData) : data(someData), left(NULL), right (NULL) {} template @@ -258,40 +264,59 @@ SplayNode::splay(FindValue const &dat if (top->right == NULL) break; } l->right = top; /* link left */ l = top; top = top->right; } else { break; } } l->right = top->left; /* assemble */ r->left = top->right; top->left = N.right; top->right = N.left; return top; } template +template +void +SplayNode::visit(Visitor &visitor) const { + if (left) + left->visit(visitor); + visitor(data); + if (right) + right->visit(visitor); +} + +template +template +void +Splay::visit(Visitor &visitor) const { + if (head) + head->visit(visitor); +} + +template template typename Splay::Value const * Splay::find (FindValue const &value, int( * compare)(FindValue const &a, Value const &b)) const { head = head->splay(value, compare); if (splayLastResult != 0) return NULL; return &head->data; } template void Splay::insert(Value const &value, SPLAYCMP *compare) { assert (!find (value, compare)); head = head->insert(value, compare); ++elements; } @@ -343,40 +368,41 @@ template size_t Splay::size() const { return elements; } template const SplayConstIterator Splay::begin() const { return const_iterator(head); } template const SplayConstIterator Splay::end() const { return const_iterator(NULL); } +// XXX: This does not seem to iterate the whole thing in some cases. template class SplayConstIterator { public: typedef const V value_type; SplayConstIterator (SplayNode *aNode); bool operator == (SplayConstIterator const &right) const; SplayConstIterator operator ++ (int dummy); SplayConstIterator &operator ++ (); V const & operator * () const; private: void advance(); void addLeftPath(SplayNode *aNode); void init(SplayNode *); Stack *> toVisit; }; template === modified file 'src/MemStore.cc' --- src/MemStore.cc 2014-02-21 16:14:05 +0000 +++ src/MemStore.cc 2014-03-19 03:58:03 +0000 @@ -394,41 +394,41 @@ MemStore::copyFromShmSlice(StoreEntry &e debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e); return false; } else { // more slices are needed assert(!eof); } } debugs(20, 7, "rep pstate: " << rep->pstate); // local memory stores both headers and body so copy regardless of pstate const int64_t offBefore = e.mem_obj->endOffset(); assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write() const int64_t offAfter = e.mem_obj->endOffset(); // expect to write the entire buf because StoreEntry::write() never fails assert(offAfter >= 0 && offBefore <= offAfter && static_cast(offAfter - offBefore) == buf.length); return true; } /// whether we should cache the entry bool -MemStore::shouldCache(const StoreEntry &e) const +MemStore::shouldCache(StoreEntry &e) const { if (e.mem_status == IN_MEMORY) { debugs(20, 5, "already loaded from mem-cache: " << e); return false; } if (e.mem_obj && e.mem_obj->memCache.offset > 0) { debugs(20, 5, "already written to mem-cache: " << e); return false; } if (!e.memoryCachable()) { debugs(20, 7, HERE << "Not memory cachable: " << e); return false; // will not cache due to entry state or properties } assert(e.mem_obj); if (e.mem_obj->vary_headers) { // XXX: We must store/load SerialisedMetaData to cache Vary in RAM @@ -436,40 +436,45 @@ MemStore::shouldCache(const StoreEntry & return false; } const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 // objects of unknown size are not allowed into memory cache, for now if (expectedSize < 0) { debugs(20, 5, "Unknown expected size: " << e); return false; } const int64_t loadedSize = e.mem_obj->endOffset(); const int64_t ramSize = max(loadedSize, expectedSize); if (ramSize > maxObjectSize()) { debugs(20, 5, HERE << "Too big max(" << loadedSize << ", " << expectedSize << "): " << e); return false; // will not cache due to cachable entry size limits } + if (!e.mem_obj->isContiguous()) { + debugs(20, 5, "not contiguous"); + return false; + } + if (!map) { debugs(20, 5, HERE << "No map to mem-cache " << e); return false; } if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) { debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e); return false; } return true; } /// locks map anchor and preps to store the entry in shared memory bool MemStore::startCaching(StoreEntry &e) { sfileno index = 0; Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast(e.key), index); if (!slot) { === modified file 'src/MemStore.h' --- src/MemStore.h 2013-12-31 18:49:41 +0000 +++ src/MemStore.h 2014-03-19 01:33:53 +0000 @@ -41,41 +41,41 @@ public: virtual void get(String const key , STOREGETCLIENT callback, void *cbdata); virtual void init(); virtual uint64_t maxSize() const; virtual uint64_t minSize() const; virtual uint64_t currentSize() const; virtual uint64_t currentCount() const; virtual int64_t maxObjectSize() const; virtual void getStats(StoreInfoStats &stats) const; virtual void stat(StoreEntry &) const; virtual StoreSearch *search(String const url, HttpRequest *); virtual void markForUnlink(StoreEntry &e); virtual void reference(StoreEntry &); virtual bool dereference(StoreEntry &, bool); virtual void maintain(); virtual bool anchorCollapsed(StoreEntry &collapsed, bool &inSync); virtual bool updateCollapsed(StoreEntry &collapsed); static int64_t EntryLimit(); protected: - bool shouldCache(const StoreEntry &e) const; + bool shouldCache(StoreEntry &e) const; bool startCaching(StoreEntry &e); void copyToShm(StoreEntry &e); void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor); bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor); bool copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof); void anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor); bool updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor); sfileno reserveSapForWriting(Ipc::Mem::PageId &page); // Ipc::StoreMapCleaner API virtual void noteFreeMapSlice(const sfileno sliceId); private: // TODO: move freeSlots into map Ipc::Mem::Pointer freeSlots; ///< unused map slot IDs MemStoreMap *map; ///< index of mem-cached entries === modified file 'src/Store.h' --- src/Store.h 2014-02-21 10:46:19 +0000 +++ src/Store.h 2014-03-19 03:53:05 +0000 @@ -103,45 +103,48 @@ public: virtual bool mayStartSwapOut(); virtual void trimMemory(const bool preserveSwappable); void abort(); void unlink(); void makePublic(); void makePrivate(); void setPublicKey(); void setPrivateKey(); void expireNow(); void releaseRequest(); void negativeCache(); void cacheNegatively(); /** \todo argh, why both? */ void invokeHandlers(); void purgeMem(); void cacheInMemory(); ///< start or continue storing in memory cache void swapOut(); /// whether we are in the process of writing this entry to disk bool swappingOut() const { return swap_status == SWAPOUT_WRITING; } void swapOutFileClose(int how); const char *url() const; + /// generally cachable (checks agnostic to disk/RAM-specific requirements) + /// common part of mayStartSwapOut() and memoryCachable() + /// TODO: Make private so only those two methods can call this. int checkCachable(); int checkNegativeHit() const; int locked() const; int validToSend() const; - bool memoryCachable() const; ///< may be cached in memory + bool memoryCachable(); ///< checkCachable() and can be cached in memory /// if needed, initialize mem_obj member w/o URI-related information MemObject *makeMemObject(); /// initialize mem_obj member (if needed) and supply URI-related info void createMemObject(const char *storeId, const char *logUri, const HttpRequestMethod &aMethod); void dump(int debug_lvl) const; void hashDelete(); void hashInsert(const cache_key *); void registerAbort(STABH * cb, void *); void reset(); void setMemStatus(mem_status_t); void timestampsSet(); void unregisterAbort(); void destroyMemObject(); int checkTooSmall(); void delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback); @@ -255,41 +258,41 @@ public: static NullStoreEntry *getInstance(); bool isNull() { return true; } const char *getMD5Text() const; HttpReply const *getReply() const { return NULL; } void write (StoreIOBuffer) {} bool isEmpty () const {return true;} virtual size_t bytesWanted(Range const aRange, bool ignoreDelayPool = false) const { return aRange.end; } void operator delete(void *address); void complete() {} private: store_client_t storeClientType() const {return STORE_MEM_CLIENT;} char const *getSerialisedMetaData(); - bool mayStartSwapout() {return false;} + virtual bool mayStartSwapOut() { return false; } void trimMemory(const bool preserveSwappable) {} static NullStoreEntry _instance; }; /// \ingroup StoreAPI typedef void (*STOREGETCLIENT) (StoreEntry *, void *cbdata); /** \ingroup StoreAPI * Abstract base class that will replace the whole store and swapdir interface. */ class Store : public RefCountable { public: /** The root store */ static Store &Root() { if (CurrentRoot == NULL) === modified file 'src/SwapDir.h' --- src/SwapDir.h 2013-07-02 19:23:49 +0000 +++ src/SwapDir.h 2014-03-19 01:32:16 +0000 @@ -87,41 +87,41 @@ public: virtual int64_t maxObjectSize() const; virtual void getStats(StoreInfoStats &stats) const; virtual void stat(StoreEntry &) const; virtual void sync(); /* Sync the store prior to shutdown */ virtual StoreSearch *search(String const url, HttpRequest *); virtual void reference(StoreEntry &); /* Reference this object */ virtual bool dereference(StoreEntry &, bool); /* Unreference this object */ /* the number of store dirs being rebuilt. */ static int store_dirs_rebuilding; private: void createOneStore(Store &aStore); StoreEntry *find(const cache_key *key); - bool keepForLocalMemoryCache(const StoreEntry &e) const; + bool keepForLocalMemoryCache(StoreEntry &e) const; bool anchorCollapsed(StoreEntry &collapsed, bool &inSync); bool anchorCollapsedOnDisk(StoreEntry &collapsed, bool &inSync); StorePointer swapDir; ///< summary view of all disk caches MemStore *memStore; ///< memory cache /// A shared table of public store entries that do not know whether they /// will belong to a memory cache, a disk cache, or will be uncachable /// when the response header comes. Used for SMP collapsed forwarding. Transients *transients; }; /* migrating from the Config based list of swapdirs */ void allocate_new_swapdir(SquidConfig::_cacheSwap *); void free_cachedir(SquidConfig::_cacheSwap * swap); extern OBJH storeDirStats; char *storeDirSwapLogFile(int, const char *); char *storeSwapFullPath(int, char *); char *storeSwapSubSubDir(int, char *); const char *storeSwapPath(int); === modified file 'src/stmem.cc' --- src/stmem.cc 2014-03-18 22:49:20 +0000 +++ src/stmem.cc 2014-03-19 03:40:24 +0000 @@ -78,48 +78,50 @@ mem_hdr::endOffset () const return result; } void mem_hdr::freeContent() { nodes.destroy(SplayNode::DefaultFree); inmem_hi = 0; debugs(19, 9, HERE << this << " hi: " << inmem_hi); } bool mem_hdr::unlink(mem_node *aNode) { if (aNode->write_pending) { debugs(0, DBG_CRITICAL, "cannot unlink mem_node " << aNode << " while write_pending"); return false; } + debugs(19, 8, this << " removing " << aNode); nodes.remove (aNode, NodeCompare); delete aNode; return true; } int64_t mem_hdr::freeDataUpto(int64_t target_offset) { + debugs(19, 8, this << " up to " << target_offset); /* keep the last one to avoid change to other part of code */ SplayNode const * theStart; while ((theStart = nodes.start())) { if (theStart == nodes.finish()) break; if (theStart->data->end() > target_offset ) break; if (!unlink(theStart->data)) break; } assert (lowestOffset () <= target_offset); return lowestOffset (); } int @@ -215,41 +217,41 @@ mem_hdr::copyAvailable(mem_node *aNode, assert (aNode->nodeBuffer.offset <= location); assert (aNode->end() > location); size_t copyOffset = location - aNode->nodeBuffer.offset; size_t copyLen = min(amount, aNode->nodeBuffer.length - copyOffset); memcpy(target, aNode->nodeBuffer.data + copyOffset, copyLen); return copyLen; } void mem_hdr::debugDump() const { debugs (19, 0, "mem_hdr::debugDump: lowest offset: " << lowestOffset() << " highest offset + 1: " << endOffset() << "."); std::ostringstream result; PointerPrinter foo(result, " - "); - for_each (getNodes().begin(), getNodes().end(), foo); + getNodes().visit(foo); debugs (19, 0, "mem_hdr::debugDump: Current available data is: " << result.str() << "."); } /* FIXME: how do we deal with sparse results - * where we have (say) * 0-500 and 1000-1500, but are asked for * 0-2000 * Partial answer: * we supply 0-500 and stop. */ ssize_t mem_hdr::copy(StoreIOBuffer const &target) const { assert(target.range().end > target.range().start); debugs(19, 6, "memCopy: " << this << " " << target.range()); /* we shouldn't ever ask for absent offsets */ if (nodes.size() == 0) { === modified file 'src/store.cc' --- src/store.cc 2014-03-18 00:51:48 +0000 +++ src/store.cc 2014-03-19 03:43:59 +0000 @@ -932,58 +932,67 @@ storeTooManyDiskFilesOpen(void) return 0; } int StoreEntry::checkTooSmall() { if (EBIT_TEST(flags, ENTRY_SPECIAL)) return 0; if (STORE_OK == store_status) if (mem_obj->object_sz < 0 || mem_obj->object_sz < Config.Store.minObjectSize) return 1; if (getReply()->content_length > -1) if (getReply()->content_length < Config.Store.minObjectSize) return 1; return 0; } -// TODO: remove checks already performed by swapoutPossible() // TODO: move "too many open..." checks outside -- we are called too early/late int StoreEntry::checkCachable() { + // XXX: This method is used for both memory and disk caches, but some + // checks are specific to disk caches. Move them to mayStartSwapOut(). + + // XXX: This method may be called several times, sometimes with different + // outcomes, making store_check_cachable_hist counters misleading. + + // check this first to optimize handling of repeated calls for uncachables + if (EBIT_TEST(flags, RELEASE_REQUEST)) { + debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable"); + ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename? + return 0; // avoid rerequesting release below + } + #if CACHE_ALL_METHODS if (mem_obj->method != Http::METHOD_GET) { debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method"); ++store_check_cachable_hist.no.non_get; } else #endif if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) { debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length"); ++store_check_cachable_hist.no.wrong_content_length; - } else if (EBIT_TEST(flags, RELEASE_REQUEST)) { - debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable"); - ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename? } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) { debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached"); ++store_check_cachable_hist.no.negative_cached; return 0; /* avoid release call below */ } else if ((getReply()->content_length > 0 && getReply()->content_length > store_maxobjsize) || mem_obj->endOffset() > store_maxobjsize) { debugs(20, 2, "StoreEntry::checkCachable: NO: too big"); ++store_check_cachable_hist.no.too_big; } else if (checkTooSmall()) { debugs(20, 2, "StoreEntry::checkCachable: NO: too small"); ++store_check_cachable_hist.no.too_small; } else if (EBIT_TEST(flags, KEY_PRIVATE)) { debugs(20, 3, "StoreEntry::checkCachable: NO: private key"); ++store_check_cachable_hist.no.private_key; } else if (swap_status != SWAPOUT_NONE) { /* * here we checked the swap_status because the remaining * cases are only relevant only if we haven't started swapping * out the object yet. @@ -1383,42 +1392,45 @@ storeInit(void) storeDigestInit(); storeLogOpen(); eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1); Store::Root().init(); storeRebuildStart(); storeRegisterWithCacheManager(); } void storeConfigure(void) { store_swap_high = (long) (((float) Store::Root().maxSize() * (float) Config.Swap.highWaterMark) / (float) 100); store_swap_low = (long) (((float) Store::Root().maxSize() * (float) Config.Swap.lowWaterMark) / (float) 100); store_pages_max = Config.memMaxSize / sizeof(mem_node); } bool -StoreEntry::memoryCachable() const +StoreEntry::memoryCachable() { + if (!checkCachable()) + return 0; + if (mem_obj == NULL) return 0; if (mem_obj->data_hdr.size() == 0) return 0; if (mem_obj->inmem_lo != 0) return 0; if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1) return 0; return 1; } int StoreEntry::checkNegativeHit() const { if (!EBIT_TEST(flags, ENTRY_NEGCACHED)) return 0; === modified file 'src/store_dir.cc' --- src/store_dir.cc 2014-03-18 00:51:48 +0000 +++ src/store_dir.cc 2014-03-19 01:32:16 +0000 @@ -848,41 +848,41 @@ StoreController::anchorCollapsedOnDisk(S } debugs(20, 4, "none of " << Config.cacheSwap.n_configured << " cache_dirs have " << collapsed); return false; } void StoreController::markForUnlink(StoreEntry &e) { if (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) transients->markForUnlink(e); if (memStore && e.mem_obj && e.mem_obj->memCache.index >= 0) memStore->markForUnlink(e); if (e.swap_filen >= 0) e.store()->markForUnlink(e); } // move this into [non-shared] memory cache class when we have one /// whether e should be kept in local RAM for possible future caching bool -StoreController::keepForLocalMemoryCache(const StoreEntry &e) const +StoreController::keepForLocalMemoryCache(StoreEntry &e) const { if (!e.memoryCachable()) return false; // does the current and expected size obey memory caching limits? assert(e.mem_obj); const int64_t loadedSize = e.mem_obj->endOffset(); const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 const int64_t ramSize = max(loadedSize, expectedSize); const int64_t ramLimit = min( static_cast(Config.memMaxSize), static_cast(Config.Store.maxInMemObjSize)); return ramSize <= ramLimit; } void StoreController::memoryOut(StoreEntry &e, const bool preserveSwappable) { bool keepInLocalMemory = false; if (memStore) === modified file 'src/tests/stub_store.cc' --- src/tests/stub_store.cc 2014-01-03 10:32:53 +0000 +++ src/tests/stub_store.cc 2014-03-19 04:01:26 +0000 @@ -33,41 +33,41 @@ void StoreEntry::replaceHttpReply(HttpRe bool StoreEntry::mayStartSwapOut() STUB_RETVAL(false) void StoreEntry::trimMemory(const bool preserveSwappable) STUB void StoreEntry::abort() STUB void StoreEntry::unlink() STUB void StoreEntry::makePublic() STUB void StoreEntry::makePrivate() STUB void StoreEntry::setPublicKey() STUB void StoreEntry::setPrivateKey() STUB void StoreEntry::expireNow() STUB void StoreEntry::releaseRequest() STUB void StoreEntry::negativeCache() STUB void StoreEntry::cacheNegatively() STUB void StoreEntry::purgeMem() STUB void StoreEntry::swapOut() STUB void StoreEntry::swapOutFileClose(int how) STUB const char *StoreEntry::url() const STUB_RETVAL(NULL) int StoreEntry::checkCachable() STUB_RETVAL(0) int StoreEntry::checkNegativeHit() const STUB_RETVAL(0) int StoreEntry::locked() const STUB_RETVAL(0) int StoreEntry::validToSend() const STUB_RETVAL(0) -bool StoreEntry::memoryCachable() const STUB_RETVAL(false) +bool StoreEntry::memoryCachable() STUB_RETVAL(false) MemObject *StoreEntry::makeMemObject() STUB_RETVAL(NULL) void StoreEntry::createMemObject(const char *, const char *, const HttpRequestMethod &aMethod) STUB void StoreEntry::dump(int debug_lvl) const STUB void StoreEntry::hashDelete() STUB void StoreEntry::hashInsert(const cache_key *) STUB void StoreEntry::registerAbort(STABH * cb, void *) STUB void StoreEntry::reset() STUB void StoreEntry::setMemStatus(mem_status_t) STUB void StoreEntry::timestampsSet() STUB void StoreEntry::unregisterAbort() STUB void StoreEntry::destroyMemObject() STUB int StoreEntry::checkTooSmall() STUB_RETVAL(0) void StoreEntry::delayAwareRead(const Comm::ConnectionPointer&, char *buf, int len, AsyncCall::Pointer callback) STUB void StoreEntry::setNoDelay (bool const) STUB bool StoreEntry::modifiedSince(HttpRequest * request) const STUB_RETVAL(false) bool StoreEntry::hasIfMatchEtag(const HttpRequest &request) const STUB_RETVAL(false) bool StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const STUB_RETVAL(false) RefCount StoreEntry::store() const STUB_RETVAL(NULL) size_t StoreEntry::inUseCount() STUB_RETVAL(0) void StoreEntry::getPublicByRequestMethod(StoreClient * aClient, HttpRequest * request, const HttpRequestMethod& method) STUB