Do not assert on native FTP ERR_TOO_BIG. Do not check for ERR_TOO_BIG twice. The assertion occurred because both the FTP request parser and the generic ConnStateData::checkHeaderLimits() code would try to write their own error message to the user. Reworked all error reporting code in the FTP parser to avoid writing early responses (that were bypassing the overall transaction flow with various negative side effects such as lack of logging). Removed ConnStateData::checkHeaderLimits(): We already have protocol-specific checks for huge HTTP and FTP requests. There is no point in duplicating them. Centralizing them sounds like a good idea, but a general checkHeaderLimits() cannot produce protocol-specific errors messages that we need, so it hurts more than it helps. Moreover, checkHeaderLimits() was handling errors differently than protocol parsing code, making the code more complex overall. All that remains from the checkHeaderLimits() code now is a single Must(), checking that the protocol parsers did what they were supposed to do: Return NULL to request more data after checking any applicable limits. If parsers do not (a Squid bug!), the ConnStateData job gets killed (and connection gets closed) as the last resort. Added clientReplyContext::setReplyToReply() and StoreEntry::storeErrorResponse() to handle storing of a response to an FTP command parsing error. The old code was using ErrorState to store parsing errors, but ErrorState is still HTTP-specific and cannot relay the right FTP codes/reasons to the user. The setReplyToReply() sounds silly but it matches the existing setReplyTo*() naming scheme well. Make sure parsed native FTP command tokens are not even close to the String buffer limit. These checks are not a firm guarantee, but are better than nothing until we replace String. Handle ClientSocketContext registration centrally because all parsers need it. Call quitAfterError() on fatal native FTP errors. Probably not necessary due to fssError handling code that closes the FTP control connection, but adds helpful debugging and brings us closer to the HTTP error handling code. Described ConnStateData::clientParseRequests(). === modified file 'src/Store.h' --- src/Store.h 2014-06-25 00:14:36 +0000 +++ src/Store.h 2014-08-12 17:42:51 +0000 @@ -80,40 +80,42 @@ public: StoreEntry(); virtual ~StoreEntry(); virtual HttpReply const *getReply() const; virtual void write (StoreIOBuffer); /** Check if the Store entry is emtpty * \retval true Store contains 0 bytes of data. * \retval false Store contains 1 or more bytes of data. * \retval false Store contains negative content !!!!!! */ virtual bool isEmpty() const { assert (mem_obj); return mem_obj->endOffset() == 0; } virtual bool isAccepting() const; virtual size_t bytesWanted(Range const aRange, bool ignoreDelayPool = false) const; virtual void complete(); virtual store_client_t storeClientType() const; virtual char const *getSerialisedMetaData(); + /// Store a prepared error response. MemObject locks the reply object. + void storeErrorResponse(HttpReply *reply); void replaceHttpReply(HttpReply *, bool andStartWriting = true); void startWriting(); ///< pack and write reply headers and, maybe, body /// whether we may start writing to disk (now or in the future) virtual bool mayStartSwapOut(); virtual void trimMemory(const bool preserveSwappable); // called when a decision to cache in memory has been made void memOutDecision(const bool willCacheInRam); // called when a decision to cache on disk has been made void swapOutDecision(const MemObject::SwapOut::Decision &decision); void abort(); void unlink(); void makePublic(); void makePrivate(); void setPublicKey(); void setPrivateKey(); void expireNow(); void releaseRequest(); void negativeCache(); === modified file 'src/client_side.cc' --- src/client_side.cc 2014-08-11 16:09:06 +0000 +++ src/client_side.cc 2014-08-12 23:20:15 +0000 @@ -180,41 +180,40 @@ public: private: AnyP::PortCfgPointer portCfg; ///< from HttpPortList Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened Subscription::Pointer sub; ///< The handler to be subscribed for this connetion listener }; static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub); /* our socket-related context */ CBDATA_CLASS_INIT(ClientSocketContext); /* Local functions */ static IOCB clientWriteComplete; static IOCB clientWriteBodyComplete; static IOACB httpAccept; #if USE_OPENSSL static IOACB httpsAccept; #endif static CTCB clientLifetimeTimeout; -static ClientSocketContext *parseHttpRequestAbort(ConnStateData * conn, const char *uri); #if USE_IDENT static IDCB clientIdentDone; #endif static int clientIsContentLengthValid(HttpRequest * r); static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength); static void clientUpdateStatHistCounters(LogTags logType, int svc_time); static void clientUpdateStatCounters(LogTags logType); static void clientUpdateHierCounters(HierarchyLogEntry *); static bool clientPingHasFinished(ping_data const *aPing); void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry::Pointer &); #ifndef PURIFY static bool connIsUsable(ConnStateData * conn); #endif static void ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest, ConnStateData * conn); static void clientUpdateSocketStats(LogTags logType, size_t size); char *skipLeadingSpace(char *aString); static void connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount); @@ -1889,51 +1888,49 @@ ClientSocketContext::writeComplete(const debugs(33, 5, conn << "Stream complete, keepalive is " << http->request->flags.proxyKeepalive); if (http->request->flags.proxyKeepalive) keepaliveNextRequest(); else initiateClose("STREAM_COMPLETE NOKEEPALIVE"); return; case STREAM_UNPLANNED_COMPLETE: initiateClose("STREAM_UNPLANNED_COMPLETE"); return; case STREAM_FAILED: initiateClose("STREAM_FAILED"); return; default: fatal("Hit unreachable code in clientWriteComplete\n"); } } -static ClientSocketContext * -parseHttpRequestAbort(ConnStateData * csd, const char *uri) +ClientSocketContext * +ConnStateData::abortRequestParsing(const char *const uri) { - ClientHttpRequest *http; - ClientSocketContext *context; - StoreIOBuffer tempBuffer; - http = new ClientHttpRequest(csd); - http->req_sz = csd->in.buf.length(); + ClientHttpRequest *http = new ClientHttpRequest(this); + http->req_sz = in.buf.length(); http->uri = xstrdup(uri); setLogUri (http, uri); - context = new ClientSocketContext(csd->clientConnection, http); + ClientSocketContext *context = new ClientSocketContext(clientConnection, http); + StoreIOBuffer tempBuffer; tempBuffer.data = context->reqbuf; tempBuffer.length = HTTP_REQBUF_SZ; clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach, clientReplyStatus, new clientReplyContext(http), clientSocketRecipient, clientSocketDetach, context, tempBuffer); return context; } char * skipLeadingSpace(char *aString) { char *result = aString; while (xisspace(*aString)) ++aString; return result; } /** @@ -2024,41 +2021,41 @@ prepareAcceleratedURL(ConnStateData * co char ipbuf[MAX_IPSTRLEN]; http->flags.accel = true; /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */ if (strncasecmp(url, "cache_object://", 15) == 0) return; /* already in good shape */ if (*url != '/') { if (conn->port->vhost) return; /* already in good shape */ /* else we need to ignore the host name */ url = strstr(url, "//"); #if SHOULD_REJECT_UNKNOWN_URLS if (!url) { hp->request_parse_status = Http::scBadRequest; - return parseHttpRequestAbort(conn, "error:invalid-request"); + return conn->abortRequestParsing("error:invalid-request"); } #endif if (url) url = strchr(url + 2, '/'); if (!url) url = (char *) "/"; } if (vport < 0) vport = http->getConn()->clientConnection->local.port(); const bool switchedToHttps = conn->switchedToHttps(); const bool tryHostHeader = vhost || switchedToHttps; if (tryHostHeader && (host = mime_get_header(req_hdr, "Host")) != NULL) { debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport); char thost[256]; if (vport > 0) { thost[0] = '\0'; @@ -2152,105 +2149,105 @@ parseHttpRequest(ConnStateData *csd, Htt { char *req_hdr = NULL; char *end; size_t req_sz; ClientHttpRequest *http; ClientSocketContext *result; StoreIOBuffer tempBuffer; int r; /* pre-set these values to make aborting simpler */ *method_p = Http::METHOD_NONE; /* NP: don't be tempted to move this down or remove again. * It's the only DDoS protection old-String has against long URL */ if ( hp->bufsiz <= 0) { debugs(33, 5, "Incomplete request, waiting for end of request line"); return NULL; } else if ( (size_t)hp->bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp->buf, Config.maxRequestHeaderSize) == 0) { debugs(33, 5, "parseHttpRequest: Too large request"); hp->request_parse_status = Http::scHeaderTooLarge; - return parseHttpRequestAbort(csd, "error:request-too-large"); + return csd->abortRequestParsing("error:request-too-large"); } /* Attempt to parse the first line; this'll define the method, url, version and header begin */ r = HttpParserParseReqLine(hp); if (r == 0) { debugs(33, 5, "Incomplete request, waiting for end of request line"); return NULL; } if (r == -1) { - return parseHttpRequestAbort(csd, "error:invalid-request"); + return csd->abortRequestParsing("error:invalid-request"); } /* Request line is valid here .. */ *http_ver = Http::ProtocolVersion(hp->req.v_maj, hp->req.v_min); /* This call scans the entire request, not just the headers */ if (hp->req.v_maj > 0) { if ((req_sz = headersEnd(hp->buf, hp->bufsiz)) == 0) { debugs(33, 5, "Incomplete request, waiting for end of headers"); return NULL; } } else { debugs(33, 3, "parseHttpRequest: Missing HTTP identifier"); req_sz = HttpParserReqSz(hp); } /* We know the whole request is in hp->buf now */ assert(req_sz <= (size_t) hp->bufsiz); /* Will the following be true with HTTP/0.9 requests? probably not .. */ /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */ assert(req_sz > 0); hp->hdr_end = req_sz - 1; hp->hdr_start = hp->req.end + 1; /* Enforce max_request_size */ if (req_sz >= Config.maxRequestHeaderSize) { debugs(33, 5, "parseHttpRequest: Too large request"); hp->request_parse_status = Http::scHeaderTooLarge; - return parseHttpRequestAbort(csd, "error:request-too-large"); + return csd->abortRequestParsing("error:request-too-large"); } /* Set method_p */ *method_p = HttpRequestMethod(&hp->buf[hp->req.m_start], &hp->buf[hp->req.m_end]+1); /* deny CONNECT via accelerated ports */ if (*method_p == Http::METHOD_CONNECT && csd->port != NULL && csd->port->flags.accelSurrogate) { debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << csd->port->transport.protocol << " Accelerator port " << csd->port->s.port()); /* XXX need a way to say "this many character length string" */ debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->buf); hp->request_parse_status = Http::scMethodNotAllowed; - return parseHttpRequestAbort(csd, "error:method-not-allowed"); + return csd->abortRequestParsing("error:method-not-allowed"); } if (*method_p == Http::METHOD_NONE) { /* XXX need a way to say "this many character length string" */ debugs(33, DBG_IMPORTANT, "clientParseRequestMethod: Unsupported method in request '" << hp->buf << "'"); hp->request_parse_status = Http::scMethodNotAllowed; - return parseHttpRequestAbort(csd, "error:unsupported-request-method"); + return csd->abortRequestParsing("error:unsupported-request-method"); } /* * Process headers after request line * TODO: Use httpRequestParse here. */ /* XXX this code should be modified to take a const char * later! */ req_hdr = (char *) hp->buf + hp->req.end + 1; debugs(33, 3, "parseHttpRequest: req_hdr = {" << req_hdr << "}"); end = (char *) hp->buf + hp->hdr_end; debugs(33, 3, "parseHttpRequest: end = {" << end << "}"); debugs(33, 3, "parseHttpRequest: prefix_sz = " << (int) HttpParserRequestLen(hp) << ", req_line_sz = " << HttpParserReqSz(hp)); /* Ok, all headers are received */ @@ -2398,61 +2395,40 @@ ConnStateData::connFinishedWithConn(int } return 0; } void ConnStateData::consumeInput(const size_t byteCount) { assert(byteCount > 0 && byteCount <= in.buf.length()); in.buf.consume(byteCount); debugs(33, 5, "in.buf has " << in.buf.length() << " unused bytes"); } // TODO: Remove when renaming ConnStateData void connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount) { conn->consumeInput(byteCount); } -/// respond with ERR_TOO_BIG if request header exceeds request_header_max_size -void -ConnStateData::checkHeaderLimits() -{ - if (in.buf.length() < Config.maxRequestHeaderSize) - return; // can accumulte more header data - - debugs(33, 3, "Request header is too large (" << in.buf.length() << " > " << - Config.maxRequestHeaderSize << " bytes)"); - - ClientSocketContext *context = parseHttpRequestAbort(this, "error:request-too-large"); - clientStreamNode *node = context->getClientReplyContext(); - clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); - assert (repContext); - repContext->setReplyToError(ERR_TOO_BIG, - Http::scBadRequest, Http::METHOD_NONE, NULL, - clientConnection->remote, NULL, NULL, NULL); - context->registerWithConn(); - context->pullData(); -} - void ConnStateData::clientAfterReadingRequests() { // Were we expecting to read more request body from half-closed connection? if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) { debugs(33, 3, HERE << "truncated body: closing half-closed " << clientConnection); clientConnection->close(); return; } if (flags.readMore) readSomeData(); } void ConnStateData::quitAfterError(HttpRequest *request) { // From HTTP p.o.v., we do not have to close after every error detected // at the client-side, but many such errors do require closure and the // client-side code is bad at handling errors so we play it safe. @@ -2879,89 +2855,85 @@ bool ConnStateData::concurrentRequestQueueFilled() const { const int existingRequestCount = getConcurrentRequestCount(); // default to the configured pipeline size. // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue const int concurrentRequestLimit = pipelinePrefetchMax() + 1; // when queue filled already we cant add more. if (existingRequestCount >= concurrentRequestLimit) { debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")"); debugs(33, 5, clientConnection << " deferring new request until one is done"); return true; } return false; } /** * Attempt to parse one or more requests from the input buffer. - * If a request is successfully parsed, even if the next request - * is only partially parsed, it will return TRUE. + * Returns true after completing parsing of at least one request [header]. That + * includes cases where parsing ended with an error (e.g., a huge request). */ bool ConnStateData::clientParseRequests() { bool parsed_req = false; debugs(33, 5, HERE << clientConnection << ": attempting to parse"); // Loop while we have read bytes that are not needed for producing the body // On errors, bodyPipe may become nil, but readMore will be cleared while (!in.buf.isEmpty() && !bodyPipe && flags.readMore) { connStripBufferWhitespace(this); /* Don't try to parse if the buffer is empty */ if (in.buf.isEmpty()) break; /* Limit the number of concurrent requests */ if (concurrentRequestQueueFilled()) break; Http::ProtocolVersion http_ver; - ClientSocketContext *context = parseOneRequest(http_ver); - - /* partial or incomplete request */ - if (!context) { - // TODO: why parseHttpRequest can just return parseHttpRequestAbort - // (which becomes context) but checkHeaderLimits cannot? - checkHeaderLimits(); - break; - } - - /* status -1 or 1 */ - if (context) { - debugs(33, 5, HERE << clientConnection << ": parsed a request"); + if (ClientSocketContext *context = parseOneRequest(http_ver)) { + debugs(33, 5, clientConnection << ": done parsing a request"); AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout", CommTimeoutCbPtrFun(clientLifetimeTimeout, context->http)); commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall); + context->registerWithConn(); + processParsedRequest(context, http_ver); parsed_req = true; // XXX: do we really need to parse everything right NOW ? if (context->mayUseConnection()) { debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection"); break; } + } else { + debugs(33, 5, clientConnection << ": not enough request data: " << + in.buf.length() << " < " << Config.maxRequestHeaderSize); + Must(in.buf.length() < Config.maxRequestHeaderSize); + break; } } /* XXX where to 'finish' the parsing pass? */ return parsed_req; } void ConnStateData::clientReadRequest(const CommIoCbParams &io) { debugs(33,5, io.conn); Must(reading()); reader = NULL; /* Bail out quickly on Comm::ERR_CLOSING - close handlers will tidy up */ if (io.flag == Comm::ERR_CLOSING) { debugs(33,5, io.conn << " closing Bailout."); return; } === modified file 'src/client_side.h' --- src/client_side.h 2014-08-10 02:28:33 +0000 +++ src/client_side.h 2014-08-12 19:19:06 +0000 @@ -192,41 +192,40 @@ class ServerBump; * If the above can be confirmed accurate we can call this object PipelineManager or similar */ class ConnStateData : public BodyProducer, public HttpControlMsgSink { public: explicit ConnStateData(const MasterXaction::Pointer &xact); virtual ~ConnStateData(); void readSomeData(); bool areAllContextsForThisConnection() const; void freeAllContexts(); void notifyAllContexts(const int xerrno); ///< tell everybody about the err /// Traffic parsing bool clientParseRequests(); void readNextRequest(); ClientSocketContext::Pointer getCurrentContext() const; void addContextToQueue(ClientSocketContext * context); int getConcurrentRequestCount() const; bool isOpen() const; - void checkHeaderLimits(); // HttpControlMsgSink API virtual void sendControlMsg(HttpControlMsg msg); // Client TCP connection details from comm layer. Comm::ConnectionPointer clientConnection; struct In { In(); ~In(); bool maybeMakeSpaceAvailable(); ChunkedCodingParser *bodyParser; ///< parses chunked request body SBuf buf; } in; /** number of body bytes we need to comm_read for the "current" request * * \retval 0 We do not need to read any [more] body bytes * \retval negative May need more but do not know how many; could be zero! @@ -390,50 +389,57 @@ public: Ssl::BumpMode sslBumpMode; ///< ssl_bump decision (Ssl::bumpEnd if n/a). #else bool switchedToHttps() const { return false; } #endif /* clt_conn_tag=tag annotation access */ const SBuf &connectionTag() const { return connectionTag_; } void connectionTag(const char *aTag) { connectionTag_ = aTag; } /// handle a control message received by context from a peer and call back virtual void writeControlMsgAndCall(ClientSocketContext *context, HttpReply *rep, AsyncCall::Pointer &call) = 0; /// ClientStream calls this to supply response header (once) and data /// for the current ClientSocketContext. virtual void handleReply(HttpReply *header, StoreIOBuffer receivedData) = 0; /// remove no longer needed leading bytes from the input buffer void consumeInput(const size_t byteCount); + /* TODO: Make the methods below (at least) non-public when possible. */ + + /// stop parsing the request and create context for relaying error info + ClientSocketContext *abortRequestParsing(const char *const errUri); + protected: void startDechunkingRequest(); void finishDechunkingRequest(bool withSuccess); void abortChunkedRequestBody(const err_type error); err_type handleChunkedRequestBody(size_t &putSize); void startPinnedConnectionMonitoring(); void clientPinnedConnectionRead(const CommIoCbParams &io); /// parse input buffer prefix into a single transfer protocol request + /// return NULL to request more header bytes (after checking any limits) + /// use abortRequestParsing() to handle parsing errors w/o creating request virtual ClientSocketContext *parseOneRequest(Http::ProtocolVersion &ver) = 0; /// start processing a freshly parsed request virtual void processParsedRequest(ClientSocketContext *context, const Http::ProtocolVersion &ver) = 0; /// returning N allows a pipeline of 1+N requests (see pipeline_prefetch) virtual int pipelinePrefetchMax() const; /// timeout to use when waiting for the next request virtual time_t idleTimeout() const = 0; BodyPipe::Pointer bodyPipe; ///< set when we are reading request body private: int connFinishedWithConn(int size); void clientAfterReadingRequests(); bool concurrentRequestQueueFilled() const; void pinNewConnection(const Comm::ConnectionPointer &pinServer, HttpRequest *request, CachePeer *aPeer, bool auth); === modified file 'src/client_side_reply.cc' --- src/client_side_reply.cc 2014-07-31 08:54:46 +0000 +++ src/client_side_reply.cc 2014-08-12 17:47:56 +0000 @@ -124,40 +124,58 @@ clientReplyContext::setReplyToError( setReplyToError(method, errstate); } void clientReplyContext::setReplyToError(const HttpRequestMethod& method, ErrorState *errstate) { if (errstate->httpStatus == Http::scNotImplemented && http->request) /* prevent confusion over whether we default to persistent or not */ http->request->flags.proxyKeepalive = false; http->al->http.code = errstate->httpStatus; if (http->request) http->request->ignoreRange("responding with a Squid-generated error"); createStoreEntry(method, RequestFlags()); assert(errstate->callback_data == NULL); errorAppendEntry(http->storeEntry(), errstate); /* Now the caller reads to get this */ } +void +clientReplyContext::setReplyToReply(HttpReply *futureReply) +{ + Must(futureReply); + http->al->http.code = futureReply->sline.status(); + + HttpRequestMethod method; + if (http->request) { // nil on responses to unparsable requests + http->request->ignoreRange("responding with a Squid-generated reply"); + method = http->request->method; + } + + createStoreEntry(method, RequestFlags()); + + http->storeEntry()->storeErrorResponse(futureReply); + /* Now the caller reads to get futureReply */ +} + // Assumes that the entry contains an error response without Content-Range. // To use with regular entries, make HTTP Range header removal conditional. void clientReplyContext::setReplyToStoreEntry(StoreEntry *entry, const char *reason) { entry->lock("clientReplyContext::setReplyToStoreEntry"); // removeClientStoreReference() unlocks sc = storeClientListAdd(entry, this); #if USE_DELAY_POOLS sc->setDelayId(DelayId::DelayClient(http)); #endif reqofs = 0; reqsize = 0; if (http->request) http->request->ignoreRange(reason); flags.storelogiccomplete = 1; http->storeEntry(entry); } void clientReplyContext::removeStoreReference(store_client ** scp, StoreEntry ** ep) === modified file 'src/client_side_reply.h' --- src/client_side_reply.h 2014-08-04 21:44:31 +0000 +++ src/client_side_reply.h 2014-08-12 17:29:20 +0000 @@ -62,40 +62,42 @@ public: void purgeFoundObject(StoreEntry *entry); void sendClientUpstreamResponse(); void purgeDoPurgeGet(StoreEntry *entry); void purgeDoPurgeHead(StoreEntry *entry); void doGetMoreData(); void identifyStoreObject(); void identifyFoundObject(StoreEntry *entry); int storeOKTransferDone() const; int storeNotOKTransferDone() const; /// replaces current response store entry with the given one void setReplyToStoreEntry(StoreEntry *e, const char *reason); /// builds error using clientBuildError() and calls setReplyToError() below void setReplyToError(err_type, Http::StatusCode, const HttpRequestMethod&, char const *, Ip::Address &, HttpRequest *, const char *, #if USE_AUTH Auth::UserRequest::Pointer); #else void * unused); #endif /// creates a store entry for the reply and appends err to it void setReplyToError(const HttpRequestMethod& method, ErrorState *err); + /// creates a store entry for the reply and appends error reply to it + void setReplyToReply(HttpReply *reply); void createStoreEntry(const HttpRequestMethod& m, RequestFlags flags); void removeStoreReference(store_client ** scp, StoreEntry ** ep); void removeClientStoreReference(store_client **scp, ClientHttpRequest *http); void startError(ErrorState * err); void processExpired(); clientStream_status_t replyStatus(); void processMiss(); void traceReply(clientStreamNode * node); const char *storeId() const { return (http->store_id.size() > 0 ? http->store_id.termedBuf() : http->uri); } Http::StatusCode purgeStatus; /* state variable - replace with class to handle storeentries at some point */ int lookingforstore; virtual void created (StoreEntry *newEntry); ClientHttpRequest *http; int headers_sz; store_client *sc; /* The store_client we're using */ StoreIOBuffer tempBuffer; /* For use in validating requests via IMS */ === modified file 'src/errorpage.cc' --- src/errorpage.cc 2014-08-05 00:17:16 +0000 +++ src/errorpage.cc 2014-08-12 17:41:22 +0000 @@ -626,48 +626,41 @@ errorAppendEntry(StoreEntry * entry, Err if (entry->store_status != STORE_PENDING) { debugs(4, 2, "Skipping error page due to store_status: " << entry->store_status); /* * If the entry is not STORE_PENDING, then no clients * care about it, and we don't need to generate an * error message */ assert(EBIT_TEST(entry->flags, ENTRY_ABORTED)); assert(entry->mem_obj->nclients == 0); delete err; return; } if (err->page_id == TCP_RESET) { if (err->request) { debugs(4, 2, "RSTing this reply"); err->request->flags.resetTcp = true; } } - entry->lock("errorAppendEntry"); - entry->buffer(); - entry->replaceHttpReply( err->BuildHttpReply() ); - entry->flush(); - entry->complete(); - entry->negativeCache(); - entry->releaseRequest(); - entry->unlock("errorAppendEntry"); + entry->storeErrorResponse(err->BuildHttpReply()); delete err; } void errorSend(const Comm::ConnectionPointer &conn, ErrorState * err) { HttpReply *rep; debugs(4, 3, HERE << conn << ", err=" << err); assert(Comm::IsConnOpen(conn)); rep = err->BuildHttpReply(); MemBuf *mb = rep->pack(); AsyncCall::Pointer call = commCbCall(78, 5, "errorSendComplete", CommIoCbPtrFun(&errorSendComplete, err)); Comm::Write(conn, mb, call); delete mb; delete rep; } === modified file 'src/servers/FtpServer.cc' --- src/servers/FtpServer.cc 2014-08-10 23:18:33 +0000 +++ src/servers/FtpServer.cc 2014-08-12 23:29:14 +0000 @@ -104,70 +104,64 @@ Ftp::Server::maybeReadUploadData() debugs(33, 4, dataConn << ": reading FTP data..."); typedef CommCbMemFunT Dialer; reader = JobCallback(33, 5, Dialer, this, Ftp::Server::readUploadData); comm_read(dataConn, uploadBuf + uploadAvailSize, availSpace, reader); } /// react to the freshly parsed request void Ftp::Server::doProcessRequest() { // zero pipelinePrefetchMax() ensures that there is only parsed request ClientSocketContext::Pointer context = getCurrentContext(); Must(context != NULL); Must(getConcurrentRequestCount() == 1); ClientHttpRequest *const http = context->http; assert(http != NULL); - HttpRequest *const request = http->request; - assert(request != NULL); - debugs(33, 9, request); - HttpHeader &header = request->header; - assert(header.has(HDR_FTP_COMMAND)); - String &cmd = header.findEntry(HDR_FTP_COMMAND)->value; - assert(header.has(HDR_FTP_ARGUMENTS)); - String ¶ms = header.findEntry(HDR_FTP_ARGUMENTS)->value; - - const bool fwd = !http->storeEntry() && handleRequest(cmd, params); + HttpRequest *const request = http->request; + Must(http->storeEntry() || request); + const bool mayForward = !http->storeEntry() && handleRequest(request); if (http->storeEntry() != NULL) { debugs(33, 4, "got an immediate response"); - assert(http->storeEntry() != NULL); clientSetKeepaliveFlag(http); context->pullData(); - } else if (fwd) { + } else if (mayForward) { debugs(33, 4, "forwarding request to server side"); assert(http->storeEntry() == NULL); clientProcessRequest(this, NULL /*parser*/, context.getRaw(), request->method, request->http_ver); } else { debugs(33, 4, "will resume processing later"); } } void Ftp::Server::processParsedRequest(ClientSocketContext *context, const Http::ProtocolVersion &) { + Must(getConcurrentRequestCount() == 1); + // Process FTP request asynchronously to make sure FTP // data connection accept callback is fired first. CallJobHere(33, 4, CbcPointer(this), Ftp::Server, doProcessRequest); } /// imports more upload data from the data connection void Ftp::Server::readUploadData(const CommIoCbParams &io) { debugs(33, 5, io.conn << " size " << io.size); Must(reader != NULL); reader = NULL; assert(Comm::IsConnOpen(dataConn)); assert(io.conn->fd == dataConn->fd); if (io.flag == Comm::OK && bodyPipe != NULL) { if (io.size > 0) { kb_incr(&(statCounter.client_http.kbytes_in), io.size); @@ -528,169 +522,246 @@ Ftp::CommandHasPathParameter(const SBuf PathedCommands.insert(cmdMlst()); PathedCommands.insert(cmdMlsd()); PathedCommands.insert(cmdStat()); PathedCommands.insert(cmdNlst()); PathedCommands.insert(cmdList()); PathedCommands.insert(cmdMkd()); PathedCommands.insert(cmdRmd()); PathedCommands.insert(cmdDele()); PathedCommands.insert(cmdRnto()); PathedCommands.insert(cmdRnfr()); PathedCommands.insert(cmdAppe()); PathedCommands.insert(cmdStor()); PathedCommands.insert(cmdRetr()); PathedCommands.insert(cmdSmnt()); PathedCommands.insert(cmdCwd()); } return PathedCommands.find(cmd) != PathedCommands.end(); } +/// creates a context filled with an error message for a given early error +ClientSocketContext * +Ftp::Server::earlyError(const EarlyErrorKind eek) +{ + /* Default values, to be updated by the switch statement below */ + int scode = 421; + const char *reason = "Internal error"; + const char *errUri = "error:ftp-internal-early-error"; + + switch (eek) { + case eekHugeRequest: + scode = 421; + reason = "Huge request"; + errUri = "error:ftp-huge-request"; + break; + + case eekMissingLogin: + scode = 530; + reason = "Must login first"; + errUri = "error:ftp-must-login-first"; + break; + + case eekMissingUsername: + scode = 501; + reason = "Missing username"; + errUri = "error:ftp-missing-username"; + break; + + case eekMissingHost: + scode = 501; + reason = "Missing host"; + errUri = "error:ftp-missing-host"; + break; + + case eekUnsupportedCommand: + scode = 502; + reason = "Unknown or unsupported command"; + errUri = "error:ftp-unsupported-command"; + break; + + case eekInvalidUri: + scode = 501; + reason = "Invalid URI"; + errUri = "error:ftp-invalid-uri"; + break; + + case eekMalformedCommand: + scode = 421; + reason = "Malformed command"; + errUri = "error:ftp-malformed-command"; + break; + + // no default so that a compiler can check that we have covered all cases + } + + ClientSocketContext *context = abortRequestParsing(errUri); + clientStreamNode *node = context->getClientReplyContext(); + Must(node); + clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); + + // We cannot relay FTP scode/reason via HTTP-specific ErrorState. + // TODO: When/if ErrorState can handle native FTP errors, use it instead. + HttpReply *reply = Ftp::HttpReplyWrapper(scode, reason, Http::scBadRequest, -1); + repContext->setReplyToReply(reply); + return context; +} + /// Parses a single FTP request on the control connection. -/// Returns NULL on errors and incomplete requests. +/// Returns a new ClientSocketContext on valid requests and all errors. +/// Returns NULL on incomplete requests that may still succeed given more data. ClientSocketContext * Ftp::Server::parseOneRequest(Http::ProtocolVersion &ver) { + flags.readMore = false; // common for all but one case below + // OWS [ RWS ] OWS LF // InlineSpaceChars are isspace(3) or RFC 959 Section 3.1.1.5.2, except // for the LF character that we must exclude here (but see FullWhiteSpace). static const char * const InlineSpaceChars = " \f\r\t\v"; static const CharacterSet InlineSpace = CharacterSet("Ftp::Inline", InlineSpaceChars); static const CharacterSet FullWhiteSpace = (InlineSpace + CharacterSet::LF).rename("Ftp::FWS"); static const CharacterSet CommandChars = FullWhiteSpace.complement("Ftp::Command"); static const CharacterSet TailChars = CharacterSet::LF.complement("Ftp::Tail"); // This set is used to ignore empty commands without allowing an attacker // to keep us endlessly busy by feeding us whitespace or empty commands. static const CharacterSet &LeadingSpace = FullWhiteSpace; SBuf cmd; SBuf params; Parser::Tokenizer tok(in.buf); (void)tok.skipAll(LeadingSpace); // leading OWS and empty commands const bool parsed = tok.prefix(cmd, CommandChars); // required command // note that the condition below will eat either RWS or trailing OWS if (parsed && tok.skipAll(InlineSpace) && tok.prefix(params, TailChars)) { // now params may include trailing OWS // TODO: Support right-trimming using CharacterSet in Tokenizer instead static const SBuf bufWhiteSpace(InlineSpaceChars); params.trim(bufWhiteSpace, false, true); } + // Why limit command line and parameters size? Did not we just parse them? + // XXX: Our good old String cannot handle very long strings. + const SBuf::size_type tokenMax = min( + static_cast(32*1024), // conservative + static_cast(Config.maxRequestHeaderSize)); + if (cmd.length() > tokenMax || params.length() > tokenMax) { + changeState(fssError, "huge req token"); + quitAfterError(NULL); + return earlyError(eekHugeRequest); + } + // technically, we may skip multiple NLs below, but that is OK if (!parsed || !tok.skipAll(CharacterSet::LF)) { // did not find terminating LF yet // we need more data, but can we buffer more? if (in.buf.length() >= Config.maxRequestHeaderSize) { changeState(fssError, "huge req"); - writeEarlyReply(421, "Huge request"); - return NULL; + quitAfterError(NULL); + return earlyError(eekHugeRequest); } else { + flags.readMore = true; debugs(33, 5, "Waiting for more, up to " << (Config.maxRequestHeaderSize - in.buf.length())); return NULL; } } Must(parsed && cmd.length()); consumeInput(tok.parsedSize()); // TODO: Would delaying optimize copying? debugs(33, 2, ">>ftp " << cmd << (params.isEmpty() ? "" : " ") << params); cmd.toUpper(); // this should speed up and simplify future comparisons // interception cases do not need USER to calculate the uri if (!transparent()) { if (!master->clientReadGreeting) { // the first command must be USER - if (!pinning.pinned && cmd != cmdUser()) { - writeEarlyReply(530, "Must login first"); - return NULL; - } + if (!pinning.pinned && cmd != cmdUser()) + return earlyError(eekMissingLogin); } // process USER request now because it sets FTP peer host name - if (cmd == cmdUser() && !handleUserRequest(cmd, params)) - return NULL; + if (cmd == cmdUser()) { + if (ClientSocketContext *errCtx = handleUserRequest(cmd, params)) + return errCtx; + } } - if (!Ftp::SupportedCommand(cmd)) { - writeEarlyReply(502, "Unknown or unsupported command"); - return NULL; - } + if (!Ftp::SupportedCommand(cmd)) + return earlyError(eekUnsupportedCommand); const HttpRequestMethod method = cmd == cmdAppe() || cmd == cmdStor() || cmd == cmdStou() ? Http::METHOD_PUT : Http::METHOD_GET; const SBuf *path = (params.length() && CommandHasPathParameter(cmd)) ? ¶ms : NULL; calcUri(path); char *newUri = xstrdup(uri.c_str()); HttpRequest *const request = HttpRequest::CreateFromUrlAndMethod(newUri, method); if (!request) { debugs(33, 5, "Invalid FTP URL: " << uri); - writeEarlyReply(501, "Invalid host"); uri.clear(); safe_free(newUri); - return NULL; + return earlyError(eekInvalidUri); } ver = Http::ProtocolVersion(Ftp::ProtocolVersion().major, Ftp::ProtocolVersion().minor); request->flags.ftpNative = true; request->http_ver = ver; // Our fake Request-URIs are not distinctive enough for caching to work request->flags.cachable = false; // XXX: reset later by maybeCacheable() request->flags.noCache = true; request->header.putStr(HDR_FTP_COMMAND, cmd.c_str()); request->header.putStr(HDR_FTP_ARGUMENTS, params.c_str()); // may be "" if (method == Http::METHOD_PUT) { request->header.putStr(HDR_EXPECT, "100-continue"); request->header.putStr(HDR_TRANSFER_ENCODING, "chunked"); } ClientHttpRequest *const http = new ClientHttpRequest(this); http->request = request; HTTPMSGLOCK(http->request); http->req_sz = tok.parsedSize(); http->uri = newUri; ClientSocketContext *const result = new ClientSocketContext(clientConnection, http); StoreIOBuffer tempBuffer; tempBuffer.data = result->reqbuf; tempBuffer.length = HTTP_REQBUF_SZ; ClientStreamData newServer = new clientReplyContext(http); ClientStreamData newClient = result; clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach, clientReplyStatus, newServer, clientSocketRecipient, clientSocketDetach, newClient, tempBuffer); - Must(!getConcurrentRequestCount()); - result->registerWithConn(); result->flags.parsed_ok = 1; flags.readMore = false; return result; } void Ftp::Server::handleReply(HttpReply *reply, StoreIOBuffer data) { // the caller guarantees that we are dealing with the current context only ClientSocketContext::Pointer context = getCurrentContext(); assert(context != NULL); if (context->http && context->http->al != NULL && !context->http->al->reply && reply) { context->http->al->reply = reply; HTTPMSGLOCK(context->http->al->reply); } static ReplyHandler handlers[] = { NULL, // fssBegin @@ -1022,40 +1093,41 @@ Ftp::Server::writeErrorReply(const HttpR mb.Printf("%i-Information: %s\r\n", scode, info.termedBuf()); if (desc.size()) mb.Printf("%i-Description: %s\r\n", scode, desc.termedBuf()); } assert(reply != NULL); const char *reason = reply->header.has(HDR_FTP_REASON) ? reply->header.getStr(HDR_FTP_REASON): reply->sline.reason(); mb.Printf("%i %s\r\n", scode, reason); // error terminating line // TODO: errorpage.cc should detect FTP client and use // configurable FTP-friendly error templates which we should // write to the client "as is" instead of hiding most of the info writeReply(mb); } /// writes FTP response based on HTTP reply that is not an FTP-response wrapper +/// for example, internally-generated Squid "errorpages" end up here (for now) void Ftp::Server::writeForwardedForeign(const HttpReply *reply) { changeState(fssConnected, "foreign reply"); closeDataConnection(); // 451: We intend to keep the control connection open. writeErrorReply(reply, 451); } void Ftp::Server::writeControlMsgAndCall(ClientSocketContext *context, HttpReply *reply, AsyncCall::Pointer &call) { // the caller guarantees that we are dealing with the current context only // the caller should also make sure reply->header.has(HDR_FTP_STATUS) writeForwardedReplyAndCall(reply, call); } void Ftp::Server::writeForwardedReplyAndCall(const HttpReply *reply, AsyncCall::Pointer &call) { @@ -1177,45 +1249,51 @@ Ftp::Server::wroteReply(const CommIoCbPa const clientStream_status_t socketState = context->socketState(); debugs(33, 5, "FTP client stream state " << socketState); switch (socketState) { case STREAM_UNPLANNED_COMPLETE: case STREAM_FAILED: io.conn->close(); return; case STREAM_NONE: case STREAM_COMPLETE: flags.readMore = true; changeState(fssConnected, "Ftp::Server::wroteReply"); if (in.bodyParser) finishDechunkingRequest(false); context->keepaliveNextRequest(); return; } } bool -Ftp::Server::handleRequest(String &cmd, String ¶ms) +Ftp::Server::handleRequest(HttpRequest *request) { - HttpRequest *request = getCurrentContext()->http->request; + debugs(33, 9, request); Must(request); + HttpHeader &header = request->header; + Must(header.has(HDR_FTP_COMMAND)); + String &cmd = header.findEntry(HDR_FTP_COMMAND)->value; + Must(header.has(HDR_FTP_ARGUMENTS)); + String ¶ms = header.findEntry(HDR_FTP_ARGUMENTS)->value; + if (do_debug(9, 2)) { MemBuf mb; Packer p; mb.init(); packerToMemInit(&p, &mb); request->pack(&p); packerClean(&p); debugs(9, 2, "FTP Client " << clientConnection); debugs(9, 2, "FTP Client REQUEST:\n---------\n" << mb.buf << "\n----------"); } // TODO: When HttpHeader uses SBuf, change keys to SBuf typedef std::map RequestHandlers; static RequestHandlers handlers; if (!handlers.size()) { handlers["LIST"] = &Ftp::Server::handleDataRequest; handlers["NLST"] = &Ftp::Server::handleDataRequest; handlers["MLSD"] = &Ftp::Server::handleDataRequest; @@ -1231,91 +1309,87 @@ Ftp::Server::handleRequest(String &cmd, } RequestHandler handler = NULL; if (request->method == Http::METHOD_PUT) handler = &Ftp::Server::handleUploadRequest; else { const RequestHandlers::const_iterator hi = handlers.find(cmd.termedBuf()); if (hi != handlers.end()) handler = hi->second; } if (!handler) { debugs(9, 7, "forwarding " << cmd << " as is, no post-processing"); return true; } return (this->*handler)(cmd, params); } /// Called to parse USER command, which is required to create an HTTP request -/// wrapper. Thus, errors are handled with writeEarlyReply() here. -bool +/// wrapper. W/o request, the errors are handled by returning earlyError(). +ClientSocketContext * Ftp::Server::handleUserRequest(const SBuf &cmd, SBuf ¶ms) { - if (params.isEmpty()) { - writeEarlyReply(501, "Missing username"); - return false; - } + if (params.isEmpty()) + return earlyError(eekMissingUsername); // find the [end of] user name const SBuf::size_type eou = params.rfind('@'); - if (eou == SBuf::npos || eou + 1 >= params.length()) { - writeEarlyReply(501, "Missing host"); - return false; - } + if (eou == SBuf::npos || eou + 1 >= params.length()) + return earlyError(eekMissingHost); // Determine the intended destination. host = params.substr(eou + 1, params.length()); // If we can parse it as raw IPv6 address, then surround with "[]". // Otherwise (domain, IPv4, [bracketed] IPv6, garbage, etc), use as is. if (host.find(':') != SBuf::npos) { const Ip::Address ipa(host.c_str()); if (!ipa.isAnyAddr()) { char ipBuf[MAX_IPSTRLEN]; ipa.toHostStr(ipBuf, MAX_IPSTRLEN); host = ipBuf; } } // const SBuf login = params.substr(0, eou); params.chop(0, eou); // leave just the login part for the peer SBuf oldUri; if (master->clientReadGreeting) oldUri = uri; master->workingDir.clear(); calcUri(NULL); if (!master->clientReadGreeting) { debugs(9, 3, "set URI to " << uri); } else if (oldUri.caseCmp(uri) == 0) { debugs(9, 5, "kept URI as " << oldUri); } else { debugs(9, 3, "reset URI from " << oldUri << " to " << uri); closeDataConnection(); unpinConnection(true); // close control connection to peer resetLogin("URI reset"); } - return true; + return NULL; // no early errors } bool Ftp::Server::handleFeatRequest(String &cmd, String ¶ms) { changeState(fssHandleFeat, "handleFeatRequest"); return true; } bool Ftp::Server::handlePasvRequest(String &cmd, String ¶ms) { if (gotEpsvAll) { setReply(500, "Bad PASV command"); return false; } if (params.size() > 0) { setReply(501, "Unexpected parameter"); return false; === modified file 'src/servers/FtpServer.h' --- src/servers/FtpServer.h 2014-08-10 23:18:33 +0000 +++ src/servers/FtpServer.h 2014-08-12 00:01:41 +0000 @@ -40,87 +40,99 @@ public: Ip::Address clientDataAddr; ///< address of our FTP client data connection SBuf workingDir; ///< estimated current working directory for URI formation ServerState serverState; ///< what our FTP server is doing bool clientReadGreeting; ///< whether our FTP client read their FTP server greeting }; /// Manages a control connection from an FTP client. class Server: public ConnStateData { public: explicit Server(const MasterXaction::Pointer &xact); virtual ~Server(); // This is a pointer in hope to minimize future changes when MasterState // becomes a part of MasterXaction. Guaranteed not to be nil. MasterState::Pointer master; ///< info shared among our FTP client and server jobs protected: friend void StartListening(); + // errors detected before it is possible to create an HTTP request wrapper + typedef enum { + eekHugeRequest, + eekMissingLogin, + eekMissingUsername, + eekMissingHost, + eekUnsupportedCommand, + eekInvalidUri, + eekMalformedCommand + } EarlyErrorKind; + /* ConnStateData API */ virtual ClientSocketContext *parseOneRequest(Http::ProtocolVersion &ver); virtual void processParsedRequest(ClientSocketContext *context, const Http::ProtocolVersion &ver); virtual void notePeerConnection(Comm::ConnectionPointer conn); virtual void clientPinnedConnectionClosed(const CommCloseCbParams &io); virtual void handleReply(HttpReply *header, StoreIOBuffer receivedData); virtual int pipelinePrefetchMax() const; virtual void writeControlMsgAndCall(ClientSocketContext *context, HttpReply *rep, AsyncCall::Pointer &call); virtual time_t idleTimeout() const; /* BodyPipe API */ virtual void noteMoreBodySpaceAvailable(BodyPipe::Pointer); virtual void noteBodyConsumerAborted(BodyPipe::Pointer ptr); /* AsyncJob API */ virtual void start(); /* Comm callbacks */ static void AcceptCtrlConnection(const CommAcceptCbParams ¶ms); void acceptDataConnection(const CommAcceptCbParams ¶ms); void readUploadData(const CommIoCbParams &io); void wroteEarlyReply(const CommIoCbParams &io); void wroteReply(const CommIoCbParams &io); void wroteReplyData(const CommIoCbParams &io); void connectedForData(const CommConnectCbParams ¶ms); unsigned int listenForDataConnection(); bool createDataConnection(Ip::Address cltAddr); void closeDataConnection(); void calcUri(const SBuf *file); void changeState(const Ftp::ServerState newState, const char *reason); - bool handleUserRequest(const SBuf &cmd, SBuf ¶ms); + ClientSocketContext *handleUserRequest(const SBuf &cmd, SBuf ¶ms); bool checkDataConnPost() const; void replyDataWritingCheckpoint(); void maybeReadUploadData(); void setReply(const int code, const char *msg); void writeCustomReply(const int code, const char *msg, const HttpReply *reply = NULL); void writeEarlyReply(const int code, const char *msg); void writeErrorReply(const HttpReply *reply, const int status); void writeForwardedForeign(const HttpReply *reply); void writeForwardedReply(const HttpReply *reply); void writeForwardedReplyAndCall(const HttpReply *reply, AsyncCall::Pointer &call); void writeReply(MemBuf &mb); - bool handleRequest(String &cmd, String ¶ms); + ClientSocketContext *earlyError(const EarlyErrorKind eek); + bool handleRequest(HttpRequest *); void setDataCommand(); bool checkDataConnPre(); /// a method handling an FTP command; selected by handleRequest() typedef bool (Ftp::Server::*RequestHandler)(String &cmd, String ¶ms); bool handleFeatRequest(String &cmd, String ¶ms); bool handlePasvRequest(String &cmd, String ¶ms); bool handlePortRequest(String &cmd, String ¶ms); bool handleDataRequest(String &cmd, String ¶ms); bool handleUploadRequest(String &cmd, String ¶ms); bool handleEprtRequest(String &cmd, String ¶ms); bool handleEpsvRequest(String &cmd, String ¶ms); bool handleCwdRequest(String &cmd, String ¶ms); bool handlePassRequest(String &cmd, String ¶ms); bool handleCdupRequest(String &cmd, String ¶ms); /// a method handling an FTP response; selected by handleReply() typedef void (Ftp::Server::*ReplyHandler)(const HttpReply *reply, StoreIOBuffer data); void handleFeatReply(const HttpReply *header, StoreIOBuffer receivedData); void handlePasvReply(const HttpReply *header, StoreIOBuffer receivedData); === modified file 'src/servers/HttpServer.cc' --- src/servers/HttpServer.cc 2014-08-08 03:07:03 +0000 +++ src/servers/HttpServer.cc 2014-08-12 23:21:09 +0000 @@ -99,43 +99,40 @@ Http::Server::noteMoreBodySpaceAvailable if (!isOpen() || stoppedReceiving()) return; readSomeData(); } ClientSocketContext * Http::Server::parseOneRequest(Http::ProtocolVersion &ver) { ClientSocketContext *context = NULL; PROF_start(HttpServer_parseOneRequest); HttpParserInit(&parser_, in.buf.c_str(), in.buf.length()); context = parseHttpRequest(this, &parser_, &method_, &ver); PROF_stop(HttpServer_parseOneRequest); return context; } void Http::Server::processParsedRequest(ClientSocketContext *context, const Http::ProtocolVersion &ver) { - /* We have an initial client stream in place should it be needed */ - /* setup our private context */ - context->registerWithConn(); clientProcessRequest(this, &parser_, context, method_, ver); } void Http::Server::noteBodyConsumerAborted(BodyPipe::Pointer ptr) { ConnStateData::noteBodyConsumerAborted(ptr); stopReceiving("virgin request body consumer aborted"); // closes ASAP } void Http::Server::handleReply(HttpReply *rep, StoreIOBuffer receivedData) { // the caller guarantees that we are dealing with the current context only ClientSocketContext::Pointer context = getCurrentContext(); Must(context != NULL); const ClientHttpRequest *http = context->http; Must(http != NULL); // After sending Transfer-Encoding: chunked (at least), always send === modified file 'src/store.cc' --- src/store.cc 2014-06-25 00:14:36 +0000 +++ src/store.cc 2014-08-12 17:43:54 +0000 @@ -1829,40 +1829,53 @@ createRemovalPolicy(RemovalPolicySetting void storeSwapFileNumberSet(StoreEntry * e, sfileno filn) { if (e->swap_file_number == filn) return; if (filn < 0) { assert(-1 == filn); storeDirMapBitReset(e->swap_file_number); storeDirLRUDelete(e); e->swap_file_number = -1; } else { assert(-1 == e->swap_file_number); storeDirMapBitSet(e->swap_file_number = filn); storeDirLRUAdd(e); } } #endif +void +StoreEntry::storeErrorResponse(HttpReply *reply) +{ + lock("StoreEntry::storeErrorResponse"); + buffer(); + replaceHttpReply(reply); + flush(); + complete(); + negativeCache(); + releaseRequest(); + unlock("StoreEntry::storeErrorResponse"); +} + /* * Replace a store entry with * a new reply. This eats the reply. */ void StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting) { debugs(20, 3, "StoreEntry::replaceHttpReply: " << url()); if (!mem_obj) { debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation"); return; } mem_obj->replaceHttpReply(rep); if (andStartWriting) startWriting(); } === modified file 'src/tests/stub_client_side.cc' --- src/tests/stub_client_side.cc 2014-07-30 17:33:14 +0000 +++ src/tests/stub_client_side.cc 2014-08-12 18:26:49 +0000 @@ -20,41 +20,40 @@ void ClientSocketContext::noteSentBodyBy void ClientSocketContext::buildRangeHeader(HttpReply * rep) STUB clientStreamNode * ClientSocketContext::getTail() const STUB_RETVAL(NULL) clientStreamNode * ClientSocketContext::getClientReplyContext() const STUB_RETVAL(NULL) void ClientSocketContext::connIsFinished() STUB void ClientSocketContext::removeFromConnectionList(ConnStateData * conn) STUB void ClientSocketContext::deferRecipientForLater(clientStreamNode * node, HttpReply * rep, StoreIOBuffer receivedData) STUB bool ClientSocketContext::multipartRangeRequest() const STUB_RETVAL(false) void ClientSocketContext::registerWithConn() STUB void ClientSocketContext::noteIoError(const int xerrno) STUB void ClientSocketContext::writeControlMsg(HttpControlMsg &msg) STUB void ConnStateData::readSomeData() STUB bool ConnStateData::areAllContextsForThisConnection() const STUB_RETVAL(false) void ConnStateData::freeAllContexts() STUB void ConnStateData::notifyAllContexts(const int xerrno) STUB bool ConnStateData::clientParseRequests() STUB_RETVAL(false) void ConnStateData::readNextRequest() STUB void ConnStateData::addContextToQueue(ClientSocketContext * context) STUB int ConnStateData::getConcurrentRequestCount() const STUB_RETVAL(0) bool ConnStateData::isOpen() const STUB_RETVAL(false) -void ConnStateData::checkHeaderLimits() STUB void ConnStateData::sendControlMsg(HttpControlMsg msg) STUB int64_t ConnStateData::mayNeedToReadMoreBody() const STUB_RETVAL(0) #if USE_AUTH void ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *cause) STUB #endif bool ConnStateData::transparent() const STUB_RETVAL(false) bool ConnStateData::reading() const STUB_RETVAL(false) void ConnStateData::stopReading() STUB void ConnStateData::stopReceiving(const char *error) STUB void ConnStateData::stopSending(const char *error) STUB void ConnStateData::expectNoForwarding() STUB void ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer) STUB void ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer) STUB bool ConnStateData::handleReadData() STUB_RETVAL(false) bool ConnStateData::handleRequestBodyData() STUB_RETVAL(false) void ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServerConn, HttpRequest *request, CachePeer *peer, bool auth, bool monitor) STUB void ConnStateData::unpinConnection(const bool andClose) STUB const Comm::ConnectionPointer ConnStateData::validatePinnedConnection(HttpRequest *request, const CachePeer *peer) STUB_RETVAL(NULL) void ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io) STUB void ConnStateData::clientReadRequest(const CommIoCbParams &io) STUB