Close idle client connections associated with closed idle pinned connections. Squid was not monitoring idle persistent connections pinned to servers. Squid would discover that the pinned server connection is closed only after receiving a new request on the idle client connection and trying to write that request to the server. In such cases, Squid propagates the pinned connection closure to the client (as it should). Chrome and, to a lesser extent, Firefox handle such races by opening a new connection and resending the failed [idempotent] request transparently to the user. However, IE usually displays an error page to the user. While some pconn races cannot be avoided, without monitoring idle pconns, Squid virtually guaranteed such a race in environments where origin server idle connection timeout is smaller than client/Squid timeouts and users are revisiting pages in the window between those two timeouts. Squid now monitors idle pinned connections similar to idle connections in the pconn pool and closes the corresponding idle client connection to keep the two sides in sync (to the extent possible). === modified file 'src/FwdState.cc' --- src/FwdState.cc 2013-07-15 15:47:00 +0000 +++ src/FwdState.cc 2013-08-17 00:38:08 +0000 @@ -1102,40 +1102,41 @@ if (serverDestinations[0]->getPeer() && request->flags.sslBumped) { debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parrent proxy are not allowed"); ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request); fail(anErr); self = NULL; // refcounted return; } request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials? // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below. // XXX: also, logs will now lie if pinning is broken and leads to an error message. if (serverDestinations[0]->peerType == PINNED) { ConnStateData *pinned_connection = request->pinnedConnection(); debugs(17,7, "pinned peer connection: " << pinned_connection); // pinned_connection may become nil after a pconn race if (pinned_connection) serverConn = pinned_connection->validatePinnedConnection(request, serverDestinations[0]->getPeer()); else serverConn = NULL; if (Comm::IsConnOpen(serverConn)) { + pinned_connection->stopPinnedConnectionMonitoring(); flags.connected_okay = true; ++n_tries; request->flags.pinned = true; if (pinned_connection->pinnedAuth()) request->flags.auth = true; comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this); // the server may close the pinned connection before this request pconnRace = racePossible; dispatch(); return; } // Pinned connection failure. debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection); ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request); fail(anErr); self = NULL; // refcounted return; } // Use pconn to avoid opening a new connection. === modified file 'src/client_side.cc' --- src/client_side.cc 2013-07-26 11:26:04 +0000 +++ src/client_side.cc 2013-08-16 23:44:56 +0000 @@ -4466,90 +4466,141 @@ ClientSocketContext::Pointer context = getCurrentContext(); if (context != NULL) { context->writeControlMsg(msg); // will call msg.cbSuccess return; } debugs(33, 3, HERE << " closing due to missing context for 1xx"); clientConnection->close(); } /// Our close handler called by Comm when the pinned connection is closed void ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io) { // FwdState might repin a failed connection sooner than this close // callback is called for the failed connection. assert(pinning.serverConnection == io.conn); pinning.closeHandler = NULL; // Comm unregisters handlers before calling const bool sawZeroReply = pinning.zeroReply; // reset when unpinning unpinConnection(); - if (sawZeroReply) { + if (sawZeroReply && clientConnection != NULL) { debugs(33, 3, "Closing client connection on pinned zero reply."); clientConnection->close(); } } void ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, HttpRequest *request, CachePeer *aPeer, bool auth) { char desc[FD_DESC_SZ]; if (Comm::IsConnOpen(pinning.serverConnection)) { - if (pinning.serverConnection->fd == pinServer->fd) + if (pinning.serverConnection->fd == pinServer->fd) { + startPinnedConnectionMonitoring(); return; + } } unpinConnection(); // closes pinned connection, if any, and resets fields pinning.serverConnection = pinServer; debugs(33, 3, HERE << pinning.serverConnection); // when pinning an SSL bumped connection, the request may be NULL const char *pinnedHost = "[unknown]"; if (request) { pinning.host = xstrdup(request->GetHost()); pinning.port = request->port; pinnedHost = pinning.host; } else { pinning.port = pinServer->remote.port(); } pinning.pinned = true; if (aPeer) pinning.peer = cbdataReference(aPeer); pinning.auth = auth; char stmp[MAX_IPSTRLEN]; snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)", (auth || !aPeer) ? pinnedHost : aPeer->name, clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN), clientConnection->fd); fd_note(pinning.serverConnection->fd, desc); typedef CommCbMemFunT Dialer; pinning.closeHandler = JobCallback(33, 5, Dialer, this, ConnStateData::clientPinnedConnectionClosed); // remember the pinned connection so that cb does not unpin a fresher one typedef CommCloseCbParams Params; Params ¶ms = GetCommParams(pinning.closeHandler); params.conn = pinning.serverConnection; comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler); + + startPinnedConnectionMonitoring(); +} + +/// Assign a read handler to an idle pinned connection so that we can detect connection closures. +void +ConnStateData::startPinnedConnectionMonitoring() +{ + if (pinning.readHandler != NULL) + return; // already monitoring + + typedef CommCbMemFunT Dialer; + pinning.readHandler = JobCallback(33, 3, + Dialer, this, ConnStateData::clientPinnedConnectionRead); + static char unusedBuf[8]; + comm_read(pinning.serverConnection, unusedBuf, sizeof(unusedBuf), pinning.readHandler); +} + +void +ConnStateData::stopPinnedConnectionMonitoring() +{ + if (pinning.readHandler != NULL) { + comm_read_cancel(pinning.serverConnection->fd, pinning.readHandler); + pinning.readHandler = NULL; + } +} + +/// Our read handler called by Comm when the server either closes an idle pinned connection or +/// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection. +void +ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io) +{ + pinning.readHandler = NULL; // Comm unregisters handlers before calling + + if (io.flag == COMM_ERR_CLOSING) + return; // close handler will clean up + + debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " << io.size << + (!currentobject ? " with idle client" : "")); + + assert(pinning.serverConnection == io.conn); + pinning.serverConnection->close(); + + // If we are still sending data to the client, do not close now. When we are done sending, + // ClientSocketContext::keepaliveNextRequest() checks pinning.serverConnection and will close. + // However, if we are idle, then we must close to inform the idle client and minimize races. + // We could use getConcurrentRequestCount() instead of currentobject, but the latter is faster. + if (!currentobject && clientConnection != NULL) + clientConnection->close(); } const Comm::ConnectionPointer ConnStateData::validatePinnedConnection(HttpRequest *request, const CachePeer *aPeer) { debugs(33, 7, HERE << pinning.serverConnection); bool valid = true; if (!Comm::IsConnOpen(pinning.serverConnection)) valid = false; else if (pinning.auth && pinning.host && request && strcasecmp(pinning.host, request->GetHost()) != 0) valid = false; else if (request && pinning.port != request->port) valid = false; else if (pinning.peer && !cbdataReferenceValid(pinning.peer)) valid = false; else if (aPeer != pinning.peer) valid = false; if (!valid) { === modified file 'src/client_side.h' --- src/client_side.h 2013-06-27 15:58:46 +0000 +++ src/client_side.h 2013-08-16 23:37:39 +0000 @@ -250,40 +250,41 @@ * used by the owner of the connection, opaque otherwise * TODO: generalise the connection owner concept. */ ClientSocketContext::Pointer currentobject; Ip::Address log_addr; int nrequests; struct { bool readMore; ///< needs comm_read (for this request or new requests) bool swanSang; // XXX: temporary flag to check proper cleanup } flags; struct { Comm::ConnectionPointer serverConnection; /* pinned server side connection */ char *host; /* host name of pinned connection */ int port; /* port of pinned connection */ bool pinned; /* this connection was pinned */ bool auth; /* pinned for www authentication */ bool zeroReply; ///< server closed w/o response (ERR_ZERO_SIZE_OBJECT) CachePeer *peer; /* CachePeer the connection goes via */ + AsyncCall::Pointer readHandler; ///< detects serverConnection closure AsyncCall::Pointer closeHandler; /*The close handler for pinned server side connection*/ } pinning; /// Squid listening port details where this connection arrived. AnyP::PortCfg *port; bool transparent() const; bool reading() const; void stopReading(); ///< cancels comm_read if it is scheduled /// true if we stopped receiving the request const char *stoppedReceiving() const { return stoppedReceiving_; } /// true if we stopped sending the response const char *stoppedSending() const { return stoppedSending_; } /// note request receiving error and close as soon as we write the response void stopReceiving(const char *error); /// note response sending error and close as soon as we read the request void stopSending(const char *error); void expectNoForwarding(); ///< cleans up virgin request [body] forwarding state @@ -316,40 +317,43 @@ */ CachePeer *pinnedPeer() const {return pinning.peer;} bool pinnedAuth() const {return pinning.auth;} // pining related comm callbacks void clientPinnedConnectionClosed(const CommCloseCbParams &io); // comm callbacks void clientReadRequest(const CommIoCbParams &io); void connStateClosed(const CommCloseCbParams &io); void requestTimeout(const CommTimeoutCbParams ¶ms); // AsyncJob API virtual bool doneAll() const { return BodyProducer::doneAll() && false;} virtual void swanSong(); /// Changes state so that we close the connection and quit after serving /// the client-side-detected error response instead of getting stuck. void quitAfterError(HttpRequest *request); // meant to be private + /// The caller assumes responsibility for connection closure detection. + void stopPinnedConnectionMonitoring(); + #if USE_SSL /// called by FwdState when it is done bumping the server void httpsPeeked(Comm::ConnectionPointer serverConnection); /// Start to create dynamic SSL_CTX for host or uses static port SSL context. void getSslContextStart(); /** * Done create dynamic ssl certificate. * * \param[in] isNew if generated certificate is new, so we need to add this certificate to storage. */ void getSslContextDone(SSL_CTX * sslContext, bool isNew = false); /// Callback function. It is called when squid receive message from ssl_crtd. static void sslCrtdHandleReplyWrapper(void *data, const HelperReply &reply); /// Proccess response from ssl_crtd. void sslCrtdHandleReply(const HelperReply &reply); void switchToHttps(HttpRequest *request, Ssl::BumpMode bumpServerMode); bool switchedToHttps() const { return switchedToHttps_; } Ssl::ServerBump *serverBump() {return sslServerBump;} @@ -363,40 +367,43 @@ /// and create the key for storing/retrieve the certificate to/from the cache void buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties); /// Called when the client sends the first request on a bumped connection. /// Returns false if no [delayed] error should be written to the client. /// Otherwise, writes the error to the client and returns true. Also checks /// for SQUID_X509_V_ERR_DOMAIN_MISMATCH on bumped requests. bool serveDelayedError(ClientSocketContext *context); Ssl::BumpMode sslBumpMode; ///< ssl_bump decision (Ssl::bumpEnd if n/a). #else bool switchedToHttps() const { return false; } #endif protected: void startDechunkingRequest(); void finishDechunkingRequest(bool withSuccess); void abortChunkedRequestBody(const err_type error); err_type handleChunkedRequestBody(size_t &putSize); + void startPinnedConnectionMonitoring(); + void clientPinnedConnectionRead(const CommIoCbParams &io); + private: int connReadWasError(comm_err_t flag, int size, int xerrno); int connFinishedWithConn(int size); void clientAfterReadingRequests(); bool concurrentRequestQueueFilled() const; #if USE_AUTH /// some user details that can be used to perform authentication on this connection Auth::UserRequest::Pointer auth_; #endif HttpParser parser_; // XXX: CBDATA plays with public/private and leaves the following 'private' fields all public... :( #if USE_SSL bool switchedToHttps_; /// The SSL server host name appears in CONNECT request or the server ip address for the intercepted requests String sslConnectHostOrIp; ///< The SSL server host name as passed in the CONNECT request String sslCommonName; ///< CN name for SSL certificate generation