Polished request reading code to fix CONNECT double-read assertion comm.cc:216: "fd_table[fd].halfClosedReader != NULL" ConnStateData::flags.readMoreRequests, do_next_read variables, and ClientSocketContext::mayUseConnection() methods were used (or unused!) incorrectly or inconsistently. This change removes all do_next_read variables to simplify the state. Instead, the renamed ConnStateData::flags.readMore indicates whether client_side.cc should call comm_read. The mayUseConnection() methods are now used to indicate whether the next client-sent byte (buffered or read) should be reserved for the current request rather than being interpreted as the beginning of the next request. Usually, flags.readMore mayUseConnection regular requests: true false requests with bodies: true true errors: false false tunnels: false true === modified file 'src/client_side.cc' --- src/client_side.cc 2011-04-05 21:08:43 +0000 +++ src/client_side.cc 2011-04-06 19:53:52 +0000 @@ -742,41 +742,41 @@ ConnStateData::notifyAllContexts(int xerrno) { typedef ClientSocketContext::Pointer CSCP; for (CSCP c = getCurrentContext(); c.getRaw(); c = c->next) c->noteIoError(xerrno); } /* This is a handler normally called by comm_close() */ void ConnStateData::connStateClosed(const CommCloseCbParams &io) { assert (fd == io.fd); deleteThis("ConnStateData::connStateClosed"); } // cleans up before destructor is called void ConnStateData::swanSong() { debugs(33, 2, "ConnStateData::swanSong: FD " << fd); fd = -1; - flags.readMoreRequests = false; + flags.readMore = false; clientdbEstablished(peer, -1); /* decrement */ assert(areAllContextsForThisConnection()); freeAllContexts(); #if USE_AUTH if (auth_user_request != NULL) { debugs(33, 4, "ConnStateData::swanSong: freeing auth_user_request '" << auth_user_request << "' (this is '" << this << "')"); auth_user_request->onConnectionClose(this); } #endif if (pinning.fd >= 0) comm_close(pinning.fd); BodyProducer::swanSong(); flags.swanSang = true; } bool ConnStateData::isOpen() const { return cbdataReferenceValid(this) && // XXX: checking "this" in a method @@ -1494,94 +1494,96 @@ if (deferredRequest->flags.deferred) { /** NO data is allowed to have been sent. */ assert(deferredRequest->http->out.size == 0); /** defer now. */ clientSocketRecipient(deferredRequest->deferredparams.node, deferredRequest->http, deferredRequest->deferredparams.rep, deferredRequest->deferredparams.queuedBuffer); } /** otherwise, the request is still active in a callbacksomewhere, * and we are done */ } void ClientSocketContext::keepaliveNextRequest() { ConnStateData * conn = http->getConn(); - bool do_next_read = false; debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: FD " << conn->fd); connIsFinished(); if (conn->pinning.pinned && conn->pinning.fd == -1) { debugs(33, 2, "clientKeepaliveNextRequest: FD " << conn->fd << " Connection was pinned but server side gone. Terminating client connection"); comm_close(conn->fd); return; } /** \par * Attempt to parse a request from the request buffer. * If we've been fed a pipelined request it may already * be in our read buffer. * \par * This needs to fall through - if we're unlucky and parse the _last_ request * from our read buffer we may never re-register for another client read. */ - if (conn->clientParseRequest(do_next_read)) { + if (conn->clientParseRequests()) { debugs(33, 3, "clientSocketContext::keepaliveNextRequest: FD " << conn->fd << ": parsed next request from buffer"); } /** \par * Either we need to kick-start another read or, if we have * a half-closed connection, kill it after the last request. * This saves waiting for half-closed connections to finished being * half-closed _AND_ then, sometimes, spending "Timeout" time in * the keepalive "Waiting for next request" state. */ if (commIsHalfClosed(conn->fd) && (conn->getConcurrentRequestCount() == 0)) { debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: half-closed client with no pending requests, closing"); comm_close(conn->fd); return; } ClientSocketContext::Pointer deferredRequest; /** \par * At this point we either have a parsed request (which we've * kicked off the processing for) or not. If we have a deferred * request (parsed but deferred for pipeling processing reasons) * then look at processing it. If not, simply kickstart * another read. */ if ((deferredRequest = conn->getCurrentContext()).getRaw()) { debugs(33, 3, "ClientSocketContext:: FD " << conn->fd << ": calling PushDeferredIfNeeded"); ClientSocketContextPushDeferredIfNeeded(deferredRequest, conn); - } else { + } else if (conn->flags.readMore) { debugs(33, 3, "ClientSocketContext:: FD " << conn->fd << ": calling conn->readNextRequest()"); conn->readNextRequest(); + } else { + // XXX: Can this happen? CONNECT tunnels have deferredRequest set. + debugs(33, DBG_IMPORTANT, HERE << "abandoning FD " << conn->fd); } } void clientUpdateSocketStats(log_type logType, size_t size) { if (size == 0) return; kb_incr(&statCounter.client_http.kbytes_out, size); if (logTypeIsATcpHit(logType)) kb_incr(&statCounter.client_http.hit_kbytes_out, size); } /** * increments iterator "i" * used by clientPackMoreRanges * \retval true there is still data available to pack more ranges @@ -2376,146 +2378,138 @@ ConnStateData::checkHeaderLimits() { if (in.notYetUsed < Config.maxRequestHeaderSize) return; // can accumulte more header data debugs(33, 3, "Request header is too large (" << in.notYetUsed << " > " << Config.maxRequestHeaderSize << " bytes)"); ClientSocketContext *context = parseHttpRequestAbort(this, "error:request-too-large"); clientStreamNode *node = context->getClientReplyContext(); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_TOO_BIG, HTTP_BAD_REQUEST, METHOD_NONE, NULL, peer, NULL, NULL, NULL); context->registerWithConn(); context->pullData(); } void -ConnStateData::clientMaybeReadData(int do_next_read) -{ - if (do_next_read) { - flags.readMoreRequests = true; - readSomeData(); - } -} - -void -ConnStateData::clientAfterReadingRequests(int do_next_read) +ConnStateData::clientAfterReadingRequests() { // Were we expecting to read more request body from half-closed connection? if (mayNeedToReadMoreBody() && commIsHalfClosed(fd)) { debugs(33, 3, HERE << "truncated body: closing half-closed FD " << fd); comm_close(fd); return; } - clientMaybeReadData (do_next_read); + if (flags.readMore) + readSomeData(); } static void clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *context, const HttpRequestMethod& method, HttpVersion http_ver) { ClientHttpRequest *http = context->http; HttpRequest *request = NULL; bool notedUseOfBuffer = false; bool chunked = false; bool mustReplyToOptions = false; bool unsupportedTe = false; bool expectBody = false; /* We have an initial client stream in place should it be needed */ /* setup our private context */ context->registerWithConn(); if (context->flags.parsed_ok == 0) { clientStreamNode *node = context->getClientReplyContext(); debugs(33, 2, "clientProcessRequest: Invalid Request"); // setLogUri should called before repContext->setReplyToError setLogUri(http, http->uri, true); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); switch (hp->request_parse_status) { case HTTP_HEADER_TOO_LARGE: repContext->setReplyToError(ERR_TOO_BIG, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, conn->in.buf, NULL); break; case HTTP_METHOD_NOT_ALLOWED: repContext->setReplyToError(ERR_UNSUP_REQ, HTTP_METHOD_NOT_ALLOWED, method, http->uri, conn->peer, NULL, conn->in.buf, NULL); break; default: repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, conn->in.buf, NULL); } assert(context->http->out.offset == 0); context->pullData(); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; goto finish; } if ((request = HttpRequest::CreateFromUrlAndMethod(http->uri, method)) == NULL) { clientStreamNode *node = context->getClientReplyContext(); debugs(33, 5, "Invalid URL: " << http->uri); // setLogUri should called before repContext->setReplyToError setLogUri(http, http->uri, true); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_INVALID_URL, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, NULL, NULL); assert(context->http->out.offset == 0); context->pullData(); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; goto finish; } /* RFC 2616 section 10.5.6 : handle unsupported HTTP versions cleanly. */ /* We currently only accept 0.9, 1.0, 1.1 */ if ( (http_ver.major == 0 && http_ver.minor != 9) || (http_ver.major == 1 && http_ver.minor > 1 ) || (http_ver.major > 1) ) { clientStreamNode *node = context->getClientReplyContext(); debugs(33, 5, "Unsupported HTTP version discovered. :\n" << HttpParserHdrBuf(hp)); // setLogUri should called before repContext->setReplyToError setLogUri(http, http->uri, true); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_UNSUP_HTTPVERSION, HTTP_HTTP_VERSION_NOT_SUPPORTED, method, http->uri, conn->peer, NULL, HttpParserHdrBuf(hp), NULL); assert(context->http->out.offset == 0); context->pullData(); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; goto finish; } /* compile headers */ /* we should skip request line! */ /* XXX should actually know the damned buffer size here */ if (http_ver.major >= 1 && !request->parseHeader(HttpParserHdrBuf(hp), HttpParserHdrSz(hp))) { clientStreamNode *node = context->getClientReplyContext(); debugs(33, 5, "Failed to parse request headers:\n" << HttpParserHdrBuf(hp)); // setLogUri should called before repContext->setReplyToError setLogUri(http, http->uri, true); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, NULL, NULL); assert(context->http->out.offset == 0); context->pullData(); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; goto finish; } request->flags.accelerated = http->flags.accel; request->flags.ignore_cc = conn->port->ignore_cc; request->flags.no_direct = request->flags.accelerated ? !conn->port->allow_direct : 0; /** \par * If transparent or interception mode is working clone the transparent and interception flags * from the port settings to the request. */ if (Ip::Interceptor.InterceptActive()) { request->flags.intercepted = http->flags.intercepted; } if (Ip::Interceptor.TransparentActive()) { request->flags.spoof_client_ip = conn->port->spoof_client_ip; } if (internalCheck(request->urlpath.termedBuf())) { if (internalHostnameIs(request->GetHost()) && @@ -2549,181 +2543,184 @@ if (request->header.chunked()) { chunked = true; } else if (request->header.has(HDR_TRANSFER_ENCODING)) { const String te = request->header.getList(HDR_TRANSFER_ENCODING); // HTTP/1.1 requires chunking to be the last encoding if there is one unsupportedTe = te.size() && te != "identity"; } // else implied identity coding mustReplyToOptions = (method == METHOD_OPTIONS) && (request->header.getInt64(HDR_MAX_FORWARDS) == 0); if (!urlCheckRequest(request) || mustReplyToOptions || unsupportedTe) { clientStreamNode *node = context->getClientReplyContext(); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_UNSUP_REQ, HTTP_NOT_IMPLEMENTED, request->method, NULL, conn->peer, request, NULL, NULL); assert(context->http->out.offset == 0); context->pullData(); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; goto finish; } if (!chunked && !clientIsContentLengthValid(request)) { clientStreamNode *node = context->getClientReplyContext(); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_INVALID_REQ, HTTP_LENGTH_REQUIRED, request->method, NULL, conn->peer, request, NULL, NULL); assert(context->http->out.offset == 0); context->pullData(); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; goto finish; } if (request->header.has(HDR_EXPECT)) { const String expect = request->header.getList(HDR_EXPECT); const bool supportedExpect = (expect.caseCmp("100-continue") == 0); if (!supportedExpect) { clientStreamNode *node = context->getClientReplyContext(); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_INVALID_REQ, HTTP_EXPECTATION_FAILED, request->method, http->uri, conn->peer, request, NULL, NULL); assert(context->http->out.offset == 0); context->pullData(); + conn->flags.readMore = false; goto finish; } } http->request = HTTPMSGLOCK(request); clientSetKeepaliveFlag(http); - /* If this is a CONNECT, don't schedule a read - ssl.c will handle it */ - if (http->request->method == METHOD_CONNECT) + // Let tunneling code be fully responsible for CONNECT requests + if (http->request->method == METHOD_CONNECT) { context->mayUseConnection(true); + conn->flags.readMore = false; + } /* Do we expect a request-body? */ expectBody = chunked || request->content_length > 0; if (!context->mayUseConnection() && expectBody) { request->body_pipe = conn->expectRequestBody( chunked ? -1 : request->content_length); // consume header early so that body pipe gets just the body connNoteUseOfBuffer(conn, http->req_sz); notedUseOfBuffer = true; /* Is it too large? */ if (!chunked && // if chunked, we will check as we accumulate clientIsRequestBodyTooLargeForPolicy(request->content_length)) { clientStreamNode *node = context->getClientReplyContext(); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); repContext->setReplyToError(ERR_TOO_BIG, HTTP_REQUEST_ENTITY_TOO_LARGE, METHOD_NONE, NULL, conn->peer, http->request, NULL, NULL); assert(context->http->out.offset == 0); context->pullData(); + conn->flags.readMore = false; goto finish; } // We may stop producing, comm_close, and/or call setReplyToError() // below, so quit on errors to avoid http->doCallouts() if (!conn->handleRequestBodyData()) goto finish; - if (!request->body_pipe->productionEnded()) - conn->readSomeData(); - - context->mayUseConnection(!request->body_pipe->productionEnded()); + if (!request->body_pipe->productionEnded()) { + debugs(33, 5, HERE << "need more request body"); + context->mayUseConnection(true); + assert(conn->flags.readMore); + } } http->calloutContext = new ClientRequestContext(http); http->doCallouts(); finish: if (!notedUseOfBuffer) connNoteUseOfBuffer(conn, http->req_sz); /* * DPW 2007-05-18 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData * to here because calling comm_reset_close() causes http to * be freed and the above connNoteUseOfBuffer() would hit an * assertion, not to mention that we were accessing freed memory. */ if (http->request->flags.resetTCP() && conn->fd > -1) { debugs(33, 3, HERE << "Sending TCP RST on FD " << conn->fd); - conn->flags.readMoreRequests = false; + conn->flags.readMore = false; comm_reset_close(conn->fd); return; } } static void connStripBufferWhitespace (ConnStateData * conn) { while (conn->in.notYetUsed > 0 && xisspace(conn->in.buf[0])) { memmove(conn->in.buf, conn->in.buf + 1, conn->in.notYetUsed - 1); --conn->in.notYetUsed; } } static int connOkToAddRequest(ConnStateData * conn) { int result = conn->getConcurrentRequestCount() < (Config.onoff.pipeline_prefetch ? 2 : 1); if (!result) { debugs(33, 3, "connOkToAddRequest: FD " << conn->fd << " max concurrent requests reached"); debugs(33, 5, "connOkToAddRequest: FD " << conn->fd << " defering new request until one is done"); } return result; } /** * Attempt to parse one or more requests from the input buffer. * If a request is successfully parsed, even if the next request * is only partially parsed, it will return TRUE. - * do_next_read is updated to indicate whether a read should be - * scheduled. */ bool -ConnStateData::clientParseRequest(bool &do_next_read) +ConnStateData::clientParseRequests() { HttpRequestMethod method; bool parsed_req = false; HttpVersion http_ver; debugs(33, 5, HERE << "FD " << fd << ": attempting to parse"); // Loop while we have read bytes that are not needed for producing the body - // On errors, bodyPipe may become nil, but readMoreRequests will be cleared - while (in.notYetUsed > 0 && !bodyPipe && flags.readMoreRequests) { + // On errors, bodyPipe may become nil, but readMore will be cleared + while (in.notYetUsed > 0 && !bodyPipe && flags.readMore) { connStripBufferWhitespace(this); /* Don't try to parse if the buffer is empty */ if (in.notYetUsed == 0) break; /* Limit the number of concurrent requests to 2 */ if (!connOkToAddRequest(this)) { break; } /* Should not be needed anymore */ /* Terminate the string */ in.buf[in.notYetUsed] = '\0'; /* Begin the parsing */ PROF_start(parseHttpRequest); HttpParserInit(&parser_, in.buf, in.notYetUsed); /* Process request */ @@ -2732,57 +2729,56 @@ /* partial or incomplete request */ if (!context) { // TODO: why parseHttpRequest can just return parseHttpRequestAbort // (which becomes context) but checkHeaderLimits cannot? checkHeaderLimits(); break; } /* status -1 or 1 */ if (context) { debugs(33, 5, HERE << "FD " << fd << ": parsed a request"); commSetTimeout(fd, Config.Timeout.lifetime, clientLifetimeTimeout, context->http); clientProcessRequest(this, &parser_, context, method, http_ver); parsed_req = true; // XXX: do we really need to parse everything right NOW ? if (context->mayUseConnection()) { - debugs(33, 3, HERE << "Not reading, as this request may need the connection"); - return false; + debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection"); + break; } } } /* XXX where to 'finish' the parsing pass? */ return parsed_req; } void ConnStateData::clientReadRequest(const CommIoCbParams &io) { debugs(33,5,HERE << "clientReadRequest FD " << io.fd << " size " << io.size); Must(reading()); reader = NULL; - bool do_next_read = 1; /* the default _is_ to read data! - adrian */ assert (io.fd == fd); /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */ if (io.flag == COMM_ERR_CLOSING) { debugs(33,5, HERE << " FD " << fd << " closing Bailout."); return; } /* * Don't reset the timeout value here. The timeout value will be * set to Config.Timeout.request by httpAccept() and * clientWriteComplete(), and should apply to the request as a * whole, not individual read() calls. Plus, it breaks our * lame half-close detection */ if (connReadWasError(io.flag, io.size, io.xerrno)) { notifyAllContexts(io.xerrno); comm_close(fd); @@ -2793,78 +2789,76 @@ if (io.size > 0) { kb_incr(&statCounter.client_http.kbytes_in, io.size); // may comm_close or setReplyToError if (!handleReadData(io.buf, io.size)) return; } else if (io.size == 0) { debugs(33, 5, "clientReadRequest: FD " << fd << " closed?"); if (connFinishedWithConn(io.size)) { comm_close(fd); return; } /* It might be half-closed, we can't tell */ fd_table[fd].flags.socket_eof = 1; commMarkHalfClosed(fd); - do_next_read = 0; - fd_note(fd, "half-closed"); /* There is one more close check at the end, to detect aborted * (partial) requests. At this point we can't tell if the request * is partial. */ /* Continue to process previously read data */ } } /* Process next request */ if (getConcurrentRequestCount() == 0) fd_note(fd, "Reading next request"); - if (!clientParseRequest(do_next_read)) { + if (!clientParseRequests()) { if (!isOpen()) return; /* * If the client here is half closed and we failed * to parse a request, close the connection. * The above check with connFinishedWithConn() only * succeeds _if_ the buffer is empty which it won't * be if we have an incomplete request. * XXX: This duplicates ClientSocketContext::keepaliveNextRequest */ if (getConcurrentRequestCount() == 0 && commIsHalfClosed(fd)) { debugs(33, 5, "clientReadRequest: FD " << fd << ": half-closed connection, no completed request parsed, connection closing."); comm_close(fd); return; } } if (!isOpen()) return; - clientAfterReadingRequests(do_next_read); + clientAfterReadingRequests(); } /** * called when new request data has been read from the socket * * \retval false called comm_close or setReplyToError (the caller should bail) * \retval true we did not call comm_close or setReplyToError */ bool ConnStateData::handleReadData(char *buf, size_t size) { char *current_buf = in.addressToReadInto(); if (buf != current_buf) memmove(current_buf, buf, size); in.notYetUsed += size; in.buf[in.notYetUsed] = '\0'; /* Terminate the string */ @@ -2981,41 +2975,41 @@ clientStreamNode *node = context->getClientReplyContext(); clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert(repContext); const http_status scode = (error == ERR_TOO_BIG) ? HTTP_REQUEST_ENTITY_TOO_LARGE : HTTP_BAD_REQUEST; repContext->setReplyToError(error, scode, repContext->http->request->method, repContext->http->uri, peer, repContext->http->request, in.buf, NULL); context->pullData(); } else { // close or otherwise we may get stuck as nobody will notice the error? comm_reset_close(fd); } #else debugs(33, 3, HERE << "aborting chunked request without error " << error); comm_reset_close(fd); #endif - flags.readMoreRequests = false; + flags.readMore = false; } void ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer ) { if (!handleRequestBodyData()) return; readSomeData(); } void ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer ) { if (!closing()) startClosing("body consumer aborted"); } /** general lifetime handler for HTTP requests */ void @@ -3125,41 +3119,41 @@ if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF && (result->transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) { #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT) int i = IP_PMTUDISC_DONT; setsockopt(fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof i); #else static int reported = 0; if (!reported) { debugs(33, 1, "Notice: httpd_accel_no_pmtu_disc not supported on your platform"); reported = 1; } #endif } - result->flags.readMoreRequests = true; + result->flags.readMore = true; return result; } /** Handle a new connection on HTTP socket. */ void httpAccept(int, int newfd, ConnectionDetail *details, comm_err_t flag, int xerrno, void *data) { http_port_list *s = (http_port_list *)data; ConnStateData *connState = NULL; if (flag != COMM_OK) { // Its possible the call was still queued when the client disconnected debugs(33, 2, "httpAccept: FD " << s->listenFd << ": accept failure: " << xstrerr(xerrno)); return; } debugs(33, 4, "httpAccept: FD " << newfd << ": accepted"); fd_note(newfd, "client http connect"); connState = connStateCreate(&details->peer, &details->me, newfd, s); === modified file 'src/client_side.h' --- src/client_side.h 2011-03-26 02:03:49 +0000 +++ src/client_side.h 2011-04-06 18:36:18 +0000 @@ -135,41 +135,41 @@ }; class ConnectionDetail; /** A connection to a socket */ class ConnStateData : public BodyProducer, public HttpControlMsgSink { public: ConnStateData(); ~ConnStateData(); void readSomeData(); int getAvailableBufferLength() const; bool areAllContextsForThisConnection() const; void freeAllContexts(); void notifyAllContexts(const int xerrno); ///< tell everybody about the err /// Traffic parsing - bool clientParseRequest(bool &do_next_read); + bool clientParseRequests(); void readNextRequest(); bool maybeMakeSpaceAvailable(); ClientSocketContext::Pointer getCurrentContext() const; void addContextToQueue(ClientSocketContext * context); int getConcurrentRequestCount() const; bool isOpen() const; void checkHeaderLimits(); // HttpControlMsgSink API virtual void sendControlMsg(HttpControlMsg msg); int fd; struct In { In(); ~In(); char *addressToReadInto() const; ChunkedCodingParser *bodyParser; ///< parses chunked request body char *buf; @@ -196,41 +196,41 @@ /** * used by the owner of the connection, opaque otherwise * TODO: generalise the connection owner concept. */ ClientSocketContext::Pointer currentobject; Ip::Address peer; Ip::Address me; Ip::Address log_addr; char rfc931[USER_IDENT_SZ]; int nrequests; #if USE_SQUID_EUI Eui::Eui48 peer_eui48; Eui::Eui64 peer_eui64; #endif struct { - bool readMoreRequests; + bool readMore; ///< needs comm_read (for this request or new requests) bool swanSang; // XXX: temporary flag to check proper cleanup } flags; struct { int fd; /* pinned server side connection */ char *host; /* host name of pinned connection */ int port; /* port of pinned connection */ bool pinned; /* this connection was pinned */ bool auth; /* pinned for www authentication */ struct peer *peer; /* peer the connection goes via */ AsyncCall::Pointer closeHandler; /*The close handler for pinned server side connection*/ } pinning; http_port_list *port; bool transparent() const; void transparent(bool const); bool reading() const; void stopReading(); ///< cancels comm_read if it is scheduled bool closing() const; @@ -289,42 +289,41 @@ /// Callback function. It is called when squid receive message from ssl_crtd. static void sslCrtdHandleReplyWrapper(void *data, char *reply); /// Proccess response from ssl_crtd. void sslCrtdHandleReply(const char * reply); bool switchToHttps(const char *host); bool switchedToHttps() const { return switchedToHttps_; } #else bool switchedToHttps() const { return false; } #endif protected: void startDechunkingRequest(); void finishDechunkingRequest(bool withSuccess); void abortChunkedRequestBody(const err_type error); err_type handleChunkedRequestBody(size_t &putSize); private: int connReadWasError(comm_err_t flag, int size, int xerrno); int connFinishedWithConn(int size); - void clientMaybeReadData(int do_next_read); - void clientAfterReadingRequests(int do_next_read); + void clientAfterReadingRequests(); private: HttpParser parser_; // XXX: CBDATA plays with public/private and leaves the following 'private' fields all public... :( CBDATA_CLASS2(ConnStateData); bool transparent_; bool closing_; bool switchedToHttps_; String sslHostName; ///< Host name for SSL certificate generation AsyncCall::Pointer reader; ///< set when we are reading BodyPipe::Pointer bodyPipe; // set when we are reading request body }; /* convenience class while splitting up body handling */ /* temporary existence only - on stack use expected */ void setLogUri(ClientHttpRequest * http, char const *uri, bool cleanUrl = false);