comm.cc
Go to the documentation of this file.
1/*
2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 05 Socket Functions */
10
11#include "squid.h"
12#include "base/AsyncFunCalls.h"
13#include "ClientInfo.h"
14#include "comm/AcceptLimiter.h"
15#include "comm/comm_internal.h"
16#include "comm/Connection.h"
17#include "comm/IoCallback.h"
18#include "comm/Loops.h"
19#include "comm/Read.h"
20#include "comm/TcpAcceptor.h"
21#include "comm/Write.h"
22#include "compat/cmsg.h"
23#include "DescriptorSet.h"
24#include "event.h"
25#include "fd.h"
26#include "fde.h"
27#include "globals.h"
28#include "icmp/net_db.h"
29#include "ip/Intercept.h"
30#include "ip/QosConfig.h"
31#include "ip/tools.h"
32#include "pconn.h"
33#include "sbuf/SBuf.h"
34#include "sbuf/Stream.h"
35#include "SquidConfig.h"
36#include "StatCounters.h"
37#include "StoreIOBuffer.h"
38#include "tools.h"
39
40#if USE_OPENSSL
41#include "ssl/support.h"
42#endif
43
44#include <cerrno>
45#include <cmath>
46#if _SQUID_CYGWIN_
47#include <sys/ioctl.h>
48#endif
49#ifdef HAVE_NETINET_TCP_H
50#include <netinet/tcp.h>
51#endif
52#if HAVE_SYS_UN_H
53#include <sys/un.h>
54#endif
55
56/*
57 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
58 */
59
61static int comm_openex(int sock_type, int proto, Ip::Address &, int flags, const char *note);
62static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
63static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
64
65#if USE_DELAY_POOLS
67
68static void commHandleWriteHelper(void * data);
69#endif
70
71/* STATIC */
72
73static DescriptorSet *TheHalfClosed = nullptr;
74static bool WillCheckHalfClosed = false;
76static void commPlanHalfClosedCheck();
77
78static Comm::Flag commBind(int s, struct addrinfo &);
79static void commSetBindAddressNoPort(int);
80static void commSetReuseAddr(int);
81static void commSetNoLinger(int);
82#ifdef TCP_NODELAY
83static void commSetTcpNoDelay(int);
84#endif
85static void commSetTcpRcvbuf(int, int);
86
87bool
88isOpen(const int fd)
89{
90 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
91}
92
101static void
103{
104#if _SQUID_LINUX_
105#if USE_OPENSSL
106 // Bug 4146: SSL-Bump BIO does not release sockets on close.
107 if (fd_table[fd].ssl)
108 return;
109#endif
110
111 /* prevent those nasty RST packets */
112 char buf[SQUID_TCP_SO_RCVBUF];
113 if (fd_table[fd].flags.nonblocking && fd_table[fd].type != FD_MSGHDR) {
114 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
115 }
116#else
117 (void)fd;
118#endif
119}
120
124int
125comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
126{
128 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
129 struct addrinfo *AI = nullptr;
131 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
132 from = *AI;
134 return x;
135}
136
137int
138comm_udp_recv(int fd, void *buf, size_t len, int flags)
139{
140 Ip::Address nul;
141 return comm_udp_recvfrom(fd, buf, len, flags, nul);
142}
143
144ssize_t
145comm_udp_send(int s, const void *buf, size_t len, int flags)
146{
147 return send(s, buf, len, flags);
148}
149
150bool
152{
153 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd) != nullptr);
154 return COMMIO_FD_WRITECB(fd)->active();
155}
156
162/* Return the local port associated with fd. */
163unsigned short
165{
166 Ip::Address temp;
167 struct addrinfo *addr = nullptr;
168 fde *F = &fd_table[fd];
169
170 /* If the fd is closed already, just return */
171
172 if (!F->flags.open) {
173 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
174 return 0;
175 }
176
177 if (F->local_addr.port())
178 return F->local_addr.port();
179
180 if (F->sock_family == AF_INET)
181 temp.setIPv4();
182
184
185 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
186 int xerrno = errno;
187 debugs(50, DBG_IMPORTANT, "ERROR: " << MYNAME << "Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerr(xerrno));
189 return 0;
190 }
191 temp = *addr;
192
194
195 if (F->local_addr.isAnyAddr()) {
196 /* save the whole local address, not just the port. */
197 F->local_addr = temp;
198 } else {
199 F->local_addr.port(temp.port());
200 }
201
202 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
203 return F->local_addr.port();
204}
205
208static void
210{
211#if defined(IP_BIND_ADDRESS_NO_PORT)
212 int flag = 1;
213 if (setsockopt(fd, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, reinterpret_cast<char*>(&flag), sizeof(flag)) < 0) {
214 const auto savedErrno = errno;
215 debugs(50, DBG_IMPORTANT, "ERROR: setsockopt(IP_BIND_ADDRESS_NO_PORT) failure: " << xstrerr(savedErrno));
216 }
217#else
218 (void)fd;
219#endif
220}
221
222static Comm::Flag
223commBind(int s, struct addrinfo &inaddr)
224{
226
227 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
228 debugs(50, 6, "bind socket FD " << s << " to " << fd_table[s].local_addr);
229 return Comm::OK;
230 }
231 int xerrno = errno;
232 debugs(50, DBG_CRITICAL, "ERROR: " << MYNAME << "Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerr(xerrno));
233
234 return Comm::COMM_ERROR;
235}
236
241int
242comm_open(int sock_type,
243 int proto,
244 Ip::Address &addr,
245 int flags,
246 const char *note)
247{
248 // assume zero-port callers do not need to know the assigned port right away
249 if (sock_type == SOCK_STREAM && addr.port() == 0 && ((flags & COMM_DOBIND) || !addr.isAnyAddr()))
250 flags |= COMM_DOBIND_PORT_LATER;
251
252 return comm_openex(sock_type, proto, addr, flags, note);
253}
254
255void
256comm_open_listener(int sock_type,
257 int proto,
259 const char *note)
260{
261 /* all listener sockets require bind() */
262 conn->flags |= COMM_DOBIND;
263
264 /* attempt native enabled port. */
265 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, note);
266}
267
268int
269comm_open_listener(int sock_type,
270 int proto,
271 Ip::Address &addr,
272 int flags,
273 const char *note)
274{
275 int sock = -1;
276
277 /* all listener sockets require bind() */
278 flags |= COMM_DOBIND;
279
280 /* attempt native enabled port. */
281 sock = comm_openex(sock_type, proto, addr, flags, note);
282
283 return sock;
284}
285
286static bool
287limitError(int const anErrno)
288{
289 return anErrno == ENFILE || anErrno == EMFILE;
290}
291
292static void
293comm_set_v6only(int fd, int tos)
294{
295#ifdef IPV6_V6ONLY
296 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
297 int xerrno = errno;
298 debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerr(xerrno));
299 }
300#else
301 debugs(50, DBG_CRITICAL, MYNAME << "WARNING: setsockopt(IPV6_V6ONLY) not supported on this platform");
302#endif /* sockopt */
303}
304
311static void
313{
314#if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
315# define soLevel SOL_IP
316# define soFlag IP_TRANSPARENT
317 bool doneSuid = false;
318
319#elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
320# define soLevel SOL_SOCKET
321# define soFlag SO_BINDANY
322 enter_suid();
323 bool doneSuid = true;
324
325#elif defined(IP_BINDANY) // FreeBSD with IPFW
326# define soLevel IPPROTO_IP
327# define soFlag IP_BINDANY
328 enter_suid();
329 bool doneSuid = true;
330
331#else
332 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
333 (void)fd;
334#endif /* sockopt */
335
336#if defined(soLevel) && defined(soFlag)
337 int tos = 1;
338 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
339 int xerrno = errno;
340 debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(TPROXY) on FD " << fd << ": " << xstrerr(xerrno));
341 } else {
342 /* mark the socket as having transparent options */
343 fd_table[fd].flags.transparent = true;
344 }
345 if (doneSuid)
346 leave_suid();
347#endif
348}
349
354static int
355comm_openex(int sock_type,
356 int proto,
357 Ip::Address &addr,
358 int flags,
359 const char *note)
360{
361 int new_socket;
362 struct addrinfo *AI = nullptr;
363
364 /* Create socket for accepting new connections. */
366
367 /* Setup the socket addrinfo details for use */
368 addr.getAddrInfo(AI);
369 AI->ai_socktype = sock_type;
370 AI->ai_protocol = proto;
371
372 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
373
374 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
375 int xerrno = errno;
376
377 /* under IPv6 there is the possibility IPv6 is present but disabled. */
378 /* try again as IPv4-native if possible */
379 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
380 /* attempt to open this IPv4-only. */
382 /* Setup the socket addrinfo details for use */
383 addr.getAddrInfo(AI);
384 AI->ai_socktype = sock_type;
385 AI->ai_protocol = proto;
386 debugs(50, 3, "Attempt fallback open socket for: " << addr );
387 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
388 debugs(50, 2, "attempt open " << note << " socket on: " << addr);
389 }
390
391 if (new_socket < 0) {
392 /* Increase the number of reserved fd's if calls to socket()
393 * are failing because the open file table is full. This
394 * limits the number of simultaneous clients */
395
396 if (limitError(errno)) {
397 debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
399 } else {
400 debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
401 }
402
404
405 errno = xerrno; // restore for caller
406 return -1;
407 }
408
409 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
411 conn->local = addr;
412 conn->fd = new_socket;
413
414 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
415
417 comm_set_v6only(conn->fd, 1);
418
419 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
420 /* Other OS may have this administratively disabled for general use. Same deal. */
422 comm_set_v6only(conn->fd, 0);
423
424 comm_init_opened(conn, note, AI);
425 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
426
428
429 // XXX transition only. prevent conn from closing the new FD on function exit.
430 conn->fd = -1;
431 errno = xerrno; // restore for caller
432 return new_socket;
433}
434
436void
438 const char *note,
439 struct addrinfo *AI)
440{
442 assert(AI);
443
444 /* update fdstat */
445 debugs(5, 5, conn << " is a new socket");
446
447 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
448 fd_open(conn->fd, FD_SOCKET, note);
449
450 fde *F = &fd_table[conn->fd];
451 F->local_addr = conn->local;
452
453 F->sock_family = AI->ai_family;
454}
455
458static int
459comm_apply_flags(int new_socket,
460 Ip::Address &addr,
461 int flags,
462 struct addrinfo *AI)
463{
464 assert(new_socket >= 0);
465 assert(AI);
466 const int sock_type = AI->ai_socktype;
467
468 if (!(flags & COMM_NOCLOEXEC))
469 commSetCloseOnExec(new_socket);
470
471 if ((flags & COMM_REUSEADDR))
472 commSetReuseAddr(new_socket);
473
474 if (addr.port() > (unsigned short) 0) {
475#if _SQUID_WINDOWS_
476 if (sock_type != SOCK_DGRAM)
477#endif
478 commSetNoLinger(new_socket);
479
480 if (opt_reuseaddr)
481 commSetReuseAddr(new_socket);
482 }
483
484 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
485 if ((flags & COMM_TRANSPARENT)) {
486 comm_set_transparent(new_socket);
487 }
488
489 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
490 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
491 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
492 if ( addr.isNoAddr() )
493 debugs(5, DBG_CRITICAL, "ERROR: Squid is attempting to bind() port " << addr << "!!");
494
495#if defined(SO_REUSEPORT)
496 if (flags & COMM_REUSEPORT) {
497 int on = 1;
498 if (setsockopt(new_socket, SOL_SOCKET, SO_REUSEPORT, reinterpret_cast<char*>(&on), sizeof(on)) < 0) {
499 const auto savedErrno = errno;
500 const auto errorMessage = ToSBuf("cannot enable SO_REUSEPORT socket option when binding to ",
501 addr, ": ", xstrerr(savedErrno));
502 if (reconfiguring)
503 debugs(5, DBG_IMPORTANT, "ERROR: " << errorMessage);
504 else
505 throw TexcHere(errorMessage);
506 }
507 }
508#endif
509
510 if ((flags & COMM_DOBIND_PORT_LATER))
511 commSetBindAddressNoPort(new_socket);
512
513 if (commBind(new_socket, *AI) != Comm::OK) {
514 comm_close(new_socket);
515 return -1;
516 }
517 }
518
519 if (flags & COMM_NONBLOCKING)
520 if (commSetNonBlocking(new_socket) == Comm::COMM_ERROR) {
521 comm_close(new_socket);
522 return -1;
523 }
524
525#ifdef TCP_NODELAY
526 if (sock_type == SOCK_STREAM)
527 commSetTcpNoDelay(new_socket);
528
529#endif
530
531 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
533
534 return new_socket;
535}
536
537void
539 const char *note,
540 struct addrinfo *AI)
541{
542 debugs(5, 2, conn);
544 assert(AI);
545
546 comm_init_opened(conn, note, AI);
547
548 if (!(conn->flags & COMM_NOCLOEXEC))
549 fd_table[conn->fd].flags.close_on_exec = true;
550
551 if (conn->local.port() > (unsigned short) 0) {
552#if _SQUID_WINDOWS_
553 if (AI->ai_socktype != SOCK_DGRAM)
554#endif
555 fd_table[conn->fd].flags.nolinger = true;
556 }
557
558 if ((conn->flags & COMM_TRANSPARENT))
559 fd_table[conn->fd].flags.transparent = true;
560
561 if (conn->flags & COMM_NONBLOCKING)
562 fd_table[conn->fd].flags.nonblocking = true;
563
564#ifdef TCP_NODELAY
565 if (AI->ai_socktype == SOCK_STREAM)
566 fd_table[conn->fd].flags.nodelay = true;
567#endif
568
569 /* no fd_table[fd].flags. updates needed for these conditions:
570 * if ((flags & COMM_REUSEADDR)) ...
571 * if ((flags & COMM_DOBIND) ...) ...
572 */
573}
574
575// XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
576// With handler already unset. Leaving this present until that can be verified for all code paths.
577void
579{
580 debugs(5, 3, "Remove timeout for FD " << fd);
581 assert(fd >= 0);
582 assert(fd < Squid_MaxFD);
583 fde *F = &fd_table[fd];
584 assert(F->flags.open);
585
586 F->timeoutHandler = nullptr;
587 F->timeout = 0;
588}
589
590int
592{
593 debugs(5, 3, conn << " timeout " << timeout);
595 assert(conn->fd < Squid_MaxFD);
596 fde *F = &fd_table[conn->fd];
597 assert(F->flags.open);
598
599 if (timeout < 0) {
600 F->timeoutHandler = nullptr;
601 F->timeout = 0;
602 } else {
603 if (callback != nullptr) {
604 typedef CommTimeoutCbParams Params;
605 Params &params = GetCommParams<Params>(callback);
606 params.conn = conn;
607 F->timeoutHandler = callback;
608 }
609
610 F->timeout = squid_curtime + (time_t) timeout;
611 }
612
613 return F->timeout;
614}
615
616int
618{
619 debugs(5, 3, "Remove timeout for " << conn);
621 return commSetConnTimeout(conn, -1, nil);
622}
623
629int
630comm_connect_addr(int sock, const Ip::Address &address)
631{
632 Comm::Flag status = Comm::OK;
633 fde *F = &fd_table[sock];
634 int x = 0;
635 int err = 0;
636 socklen_t errlen;
637 struct addrinfo *AI = nullptr;
638
639 assert(address.port() != 0);
640
641 debugs(5, 9, "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
642
643 /* Handle IPv6 over IPv4-only socket case.
644 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
645 * NP: because commResetFD is private to ConnStateData we have to return an error and
646 * trust its handled properly.
647 */
648 if (F->sock_family == AF_INET && !address.isIPv4()) {
649 errno = ENETUNREACH;
650 return Comm::ERR_PROTOCOL;
651 }
652
653 /* Handle IPv4 over IPv6-only socket case.
654 * This case is presently handled here as it's both a known case and it's
655 * uncertain what error will be returned by the IPv6 stack in such case. It's
656 * possible this will also be handled by the errno checks below after connect()
657 * but needs careful cross-platform verification, and verifying the address
658 * condition here is simple.
659 */
660 if (!F->local_addr.isIPv4() && address.isIPv4()) {
661 errno = ENETUNREACH;
662 return Comm::ERR_PROTOCOL;
663 }
664
665 address.getAddrInfo(AI, F->sock_family);
666
667 /* Establish connection. */
668 int xerrno = 0;
669
670 if (!F->flags.called_connect) {
671 F->flags.called_connect = true;
673
674 errno = 0;
675 if ((x = connect(sock, AI->ai_addr, AI->ai_addrlen)) < 0) {
676 xerrno = errno;
677 debugs(5,5, "sock=" << sock << ", addrinfo(" <<
678 " flags=" << AI->ai_flags <<
679 ", family=" << AI->ai_family <<
680 ", socktype=" << AI->ai_socktype <<
681 ", protocol=" << AI->ai_protocol <<
682 ", &addr=" << AI->ai_addr <<
683 ", addrlen=" << AI->ai_addrlen << " )");
684 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerr(xerrno));
685 debugs(14,9, "connecting to: " << address);
686
687 } else if (x == 0) {
688 // XXX: ICAP code refuses callbacks during a pending comm_ call
689 // Async calls development will fix this.
690 x = -1;
691 xerrno = EINPROGRESS;
692 }
693
694 } else {
695 errno = 0;
696#if _SQUID_NEWSOS6_
697 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
698 if (connect(sock, AI->ai_addr, AI->ai_addrlen) < 0)
699 xerrno = errno;
700
701 if (xerrno == EINVAL) {
702 errlen = sizeof(err);
703 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
704 if (x >= 0)
705 xerrno = x;
706 }
707#else
708 errlen = sizeof(err);
709 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
710 if (x == 0)
711 xerrno = err;
712
713#if _SQUID_SOLARIS_
714 /*
715 * Solaris 2.4's socket emulation doesn't allow you
716 * to determine the error from a failed non-blocking
717 * connect and just returns EPIPE. Create a fake
718 * error message for connect. -- fenner@parc.xerox.com
719 */
720 if (x < 0 && xerrno == EPIPE)
721 xerrno = ENOTCONN;
722 else
723 xerrno = errno;
724#endif
725#endif
726 }
727
729
730 errno = xerrno;
731 if (xerrno == 0 || xerrno == EISCONN)
732 status = Comm::OK;
733 else if (ignoreErrno(xerrno))
734 status = Comm::INPROGRESS;
735 else if (xerrno == EAFNOSUPPORT || xerrno == EINVAL)
736 return Comm::ERR_PROTOCOL;
737 else
738 return Comm::COMM_ERROR;
739
740 address.toStr(F->ipaddr, MAX_IPSTRLEN);
741
742 F->remote_port = address.port(); /* remote_port is HS */
743
744 if (status == Comm::OK) {
745 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
746 } else if (status == Comm::INPROGRESS) {
747 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
748 }
749
750 errno = xerrno;
751 return status;
752}
753
754void
756{
757 fde *F = &fd_table[fd];
758 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
759
760 while (F->closeHandler != nullptr) {
761 AsyncCall::Pointer call = F->closeHandler;
762 F->closeHandler = call->Next();
763 call->setNext(nullptr);
764 // If call is not canceled schedule it for execution else ignore it
765 if (!call->canceled()) {
766 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
767 // XXX: Without the following code, callback fd may be -1.
768 // typedef CommCloseCbParams Params;
769 // auto &params = GetCommParams<Params>(call);
770 // params.fd = fd;
771 ScheduleCallHere(call);
772 }
773 }
774}
775
780void
782{
783 struct linger L;
784 L.l_onoff = 1;
785 L.l_linger = 0;
786
787 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
788 int xerrno = errno;
789 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerr(xerrno));
790 }
791 conn->close();
792}
793
794// Legacy close function.
795void
797{
798 struct linger L;
799 L.l_onoff = 1;
800 L.l_linger = 0;
801
802 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
803 int xerrno = errno;
804 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerr(xerrno));
805 }
806 comm_close(fd);
807}
808
809static void
810commStartTlsClose(const int fd)
811{
813}
814
815static void
817{
818 auto F = &fd_table[fd];
819 F->ssl.reset();
820 F->dynamicTlsContext.reset();
821 fd_close(fd); /* update fdstat */
822 close(fd);
823
825
826 /* When one connection closes, give accept() a chance, if need be */
827 CodeContext::Reset(); // exit FD-specific context
829}
830
831/*
832 * Close the socket fd.
833 *
834 * + call write handlers with ERR_CLOSING
835 * + call read handlers with ERR_CLOSING
836 * + call closing handlers
837 *
838 * A deferred reader has no Comm read handler mentioned above. To stay in sync,
839 * such a reader must register a Comm closing handler.
840 */
841void
842_comm_close(int fd, char const *file, int line)
843{
844 debugs(5, 3, "start closing FD " << fd << " by " << file << ":" << line);
845 assert(fd >= 0);
846 assert(fd < Squid_MaxFD);
847
848 fde *F = &fd_table[fd];
849
850 if (F->closing())
851 return;
852
853 /* XXX: is this obsolete behind F->closing() ? */
854 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
855 return;
856
857 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
858 if (!isOpen(fd)) {
859 debugs(50, DBG_IMPORTANT, "ERROR: Squid BUG #3556: FD " << fd << " is not an open socket.");
860 // XXX: do we need to run close(fd) or fd_close(fd) here?
861 return;
862 }
863
864 assert(F->type != FD_FILE);
865
866 F->flags.close_request = true;
867
868 // We have caller's context and fde::codeContext. In the unlikely event they
869 // differ, it is not clear which context is more applicable to this closure.
870 // For simplicity sake, we remain in the caller's context while still
871 // allowing individual advanced callbacks to overwrite it.
872
873 if (F->ssl) {
874 const auto startCall = asyncCall(5, 4, "commStartTlsClose",
876 ScheduleCallHere(startCall);
877 }
878
879 // a half-closed fd may lack a reader, so we stop monitoring explicitly
883
884 // notify read/write handlers after canceling select reservations, if any
885 if (COMMIO_FD_WRITECB(fd)->active()) {
886 Comm::SetSelect(fd, COMM_SELECT_WRITE, nullptr, nullptr, 0);
887 COMMIO_FD_WRITECB(fd)->finish(Comm::ERR_CLOSING, errno);
888 }
889 if (COMMIO_FD_READCB(fd)->active()) {
890 Comm::SetSelect(fd, COMM_SELECT_READ, nullptr, nullptr, 0);
891 COMMIO_FD_READCB(fd)->finish(Comm::ERR_CLOSING, errno);
892 }
893
894#if USE_DELAY_POOLS
896 if (bucket->selectWaiting)
897 bucket->onFdClosed();
898 }
899#endif
900
902
904
905 // must use async call to wait for all callbacks
906 // scheduled before comm_close() to finish
907 const auto completeCall = asyncCall(5, 4, "comm_close_complete",
909 ScheduleCallHere(completeCall);
910}
911
912/* Send a udp datagram to specified TO_ADDR. */
913int
915 const Ip::Address &to_addr,
916 const void *buf,
917 int len)
918{
920
921 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
922 " using FD " << fd << " using Port " << comm_local_port(fd) );
923
924 struct addrinfo *AI = nullptr;
925 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
926 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
927 int xerrno = errno;
929
930 if (x >= 0) {
931 errno = xerrno; // restore for caller to use
932 return x;
933 }
934
935#if _SQUID_LINUX_
936 if (ECONNREFUSED != xerrno)
937#endif
938 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerr(xerrno));
939
940 errno = xerrno; // restore for caller to use
941 return Comm::COMM_ERROR;
942}
943
945comm_add_close_handler(int fd, CLCB * handler, void *data)
946{
947 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
948 handler << ", data=" << data);
949
950 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
952 comm_add_close_handler(fd, call);
953 return call;
954}
955
956void
958{
959 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
960
961 /*TODO:Check for a similar scheduled AsyncCall*/
962// for (c = fd_table[fd].closeHandler; c; c = c->next)
963// assert(c->handler != handler || c->data != data);
964
965 // TODO: Consider enhancing AsyncCallList to support random-access close
966 // handlers, perhaps after upgrading the remaining legacy CLCB handlers.
967 call->setNext(fd_table[fd].closeHandler);
968
969 fd_table[fd].closeHandler = call;
970}
971
972// remove function-based close handler
973void
975{
976 assert(isOpen(fd));
977 /* Find handler in list */
978 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
979 handler << ", data=" << data);
980
981 AsyncCall::Pointer p, prev = nullptr;
982 for (p = fd_table[fd].closeHandler; p != nullptr; prev = p, p = p->Next()) {
984 const Call *call = dynamic_cast<const Call*>(p.getRaw());
985 if (!call) // method callbacks have their own comm_remove_close_handler
986 continue;
987
988 typedef CommCloseCbParams Params;
989 const Params &params = GetCommParams<Params>(p);
990 if (call->dialer.handler == handler && params.data == data)
991 break; /* This is our handler */
992 }
993
994 // comm_close removes all close handlers so our handler may be gone
995 if (p != nullptr) {
996 p->dequeue(fd_table[fd].closeHandler, prev);
997 p->cancel("comm_remove_close_handler");
998 }
999}
1000
1001// remove method-based close handler
1002void
1004{
1005 assert(isOpen(fd));
1006 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1007
1008 // comm_close removes all close handlers so our handler may be gone
1009 AsyncCall::Pointer p, prev = nullptr;
1010 for (p = fd_table[fd].closeHandler; p != nullptr && p != call; prev = p, p = p->Next());
1011
1012 if (p != nullptr)
1013 p->dequeue(fd_table[fd].closeHandler, prev);
1014 call->cancel("comm_remove_close_handler");
1015}
1016
1017static void
1019{
1020
1021 struct linger L;
1022 L.l_onoff = 0; /* off */
1023 L.l_linger = 0;
1024
1025 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
1026 int xerrno = errno;
1027 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1028 }
1029 fd_table[fd].flags.nolinger = true;
1030}
1031
1032static void
1034{
1035 int on = 1;
1036 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0) {
1037 int xerrno = errno;
1038 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1039 }
1040}
1041
1042static void
1044{
1045 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0) {
1046 int xerrno = errno;
1047 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1048 }
1049 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0) {
1050 int xerrno = errno;
1051 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1052 }
1053#ifdef TCP_WINDOW_CLAMP
1054 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0) {
1055 int xerrno = errno;
1056 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1057 }
1058#endif
1059}
1060
1061int
1063{
1064#if _SQUID_WINDOWS_
1065 int nonblocking = TRUE;
1066
1067 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1068 int xerrno = errno;
1069 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno) << " " << fd_table[fd].type);
1070 return Comm::COMM_ERROR;
1071 }
1072
1073#else
1074 int flags;
1075 int dummy = 0;
1076
1077 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1078 int xerrno = errno;
1079 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1080 return Comm::COMM_ERROR;
1081 }
1082
1083 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1084 int xerrno = errno;
1085 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1086 return Comm::COMM_ERROR;
1087 }
1088#endif
1089
1090 fd_table[fd].flags.nonblocking = true;
1091 return 0;
1092}
1093
1094int
1096{
1097#if _SQUID_WINDOWS_
1098 int nonblocking = FALSE;
1099
1100 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1101#else
1102 int flags;
1103 int dummy = 0;
1104
1105 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1106 int xerrno = errno;
1107 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1108 return Comm::COMM_ERROR;
1109 }
1110
1111 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1112#endif
1113 int xerrno = errno;
1114 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1115 return Comm::COMM_ERROR;
1116 }
1117
1118 fd_table[fd].flags.nonblocking = false;
1119 return 0;
1120}
1121
1122void
1124{
1125#ifdef FD_CLOEXEC
1126 int flags;
1127 int dummy = 0;
1128
1129 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1130 int xerrno = errno;
1131 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFD: " << xstrerr(xerrno));
1132 return;
1133 }
1134
1135 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) {
1136 int xerrno = errno;
1137 debugs(50, DBG_CRITICAL, "ERROR: " << MYNAME << "FD " << fd << ": set close-on-exec failed: " << xstrerr(xerrno));
1138 }
1139
1140 fd_table[fd].flags.close_on_exec = true;
1141
1142#endif
1143}
1144
1145#ifdef TCP_NODELAY
1146static void
1147commSetTcpNoDelay(int fd)
1148{
1149 int on = 1;
1150
1151 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0) {
1152 int xerrno = errno;
1153 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1154 }
1155
1156 fd_table[fd].flags.nodelay = true;
1157}
1158
1159#endif
1160
1161void
1163{
1165
1166 /* make sure the accept() socket FIFO delay queue exists */
1168
1169 // make sure the IO pending callback table exists
1171
1172 /* XXX account fd_table */
1173 /* Keep a few file descriptors free so that we don't run out of FD's
1174 * after accepting a client but before it opens a socket or a file.
1175 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1176 RESERVED_FD = min(100, Squid_MaxFD / 4);
1177
1179
1180 /* setup the select loop module */
1182}
1183
1184void
1186{
1187 delete TheHalfClosed;
1188 TheHalfClosed = nullptr;
1189
1191}
1192
1193#if USE_DELAY_POOLS
1194// called when the queue is done waiting for the client bucket to fill
1195void
1197{
1198 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1199 assert(queue);
1200
1201 ClientInfo *clientInfo = queue->clientInfo;
1202 // ClientInfo invalidates queue if freed, so if we got here through,
1203 // evenAdd cbdata protections, everything should be valid and consistent
1204 assert(clientInfo);
1205 assert(clientInfo->hasQueue());
1206 assert(clientInfo->hasQueue(queue));
1207 assert(clientInfo->eventWaiting);
1208 clientInfo->eventWaiting = false;
1209
1210 do {
1211 clientInfo->writeOrDequeue();
1212 if (clientInfo->selectWaiting)
1213 return;
1214 } while (clientInfo->hasQueue());
1215
1216 debugs(77, 3, "emptied queue");
1217}
1218
1219void
1221{
1223 const auto head = quotaPeekFd();
1224 const auto &headFde = fd_table[head];
1225 CallBack(headFde.codeContext, [&] {
1226 const auto ccb = COMMIO_FD_WRITECB(head);
1227 // check that the head descriptor is still relevant
1228 if (headFde.clientInfo == this &&
1229 quotaPeekReserv() == ccb->quotaQueueReserv &&
1230 !headFde.closing()) {
1231
1232 // wait for the head descriptor to become ready for writing
1233 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1234 selectWaiting = true;
1235 } else {
1236 quotaDequeue(); // remove the no longer relevant descriptor
1237 }
1238 });
1239}
1240
1241bool
1243{
1245 return !quotaQueue->empty();
1246}
1247
1248bool
1250{
1252 return quotaQueue == q;
1253}
1254
1256int
1258{
1260 return quotaQueue->front();
1261}
1262
1264unsigned int
1266{
1268 return quotaQueue->outs + 1;
1269}
1270
1272unsigned int
1274{
1276 return quotaQueue->enqueue(fd);
1277}
1278
1280void
1282{
1285}
1286
1287void
1289{
1290 if (!eventWaiting && !selectWaiting && hasQueue()) {
1291 // wait at least a second if the bucket is empty
1292 const double delay = (bucketLevel < 1.0) ? 1.0 : 0.0;
1293 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1294 quotaQueue, delay, 0, true);
1295 eventWaiting = true;
1296 }
1297}
1298
1300int
1302{
1303 /* If we have multiple clients and give full bucketSize to each client then
1304 * clt1 may often get a lot more because clt1->clt2 time distance in the
1305 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1306 * We divide quota evenly to be more fair. */
1307
1308 if (!rationedCount) {
1309 rationedCount = quotaQueue->size() + 1;
1310
1311 // The delay in ration recalculation _temporary_ deprives clients from
1312 // bytes that should have trickled in while rationedCount was positive.
1313 refillBucket();
1314
1315 // Rounding errors do not accumulate here, but we round down to avoid
1316 // negative bucket sizes after write with rationedCount=1.
1317 rationedQuota = static_cast<int>(floor(bucketLevel/rationedCount));
1318 debugs(77,5, "new rationedQuota: " << rationedQuota <<
1319 '*' << rationedCount);
1320 }
1321
1322 --rationedCount;
1323 debugs(77,7, "rationedQuota: " << rationedQuota <<
1324 " rations remaining: " << rationedCount);
1325
1326 // update 'last seen' time to prevent clientdb GC from dropping us
1328 return rationedQuota;
1329}
1330
1331bool
1333{
1334 assert(hasQueue());
1335 assert(quotaPeekFd() == state->conn->fd);
1336 quotaDequeue(); // we will write or requeue below
1337 if (nleft > 0 && !BandwidthBucket::applyQuota(nleft, state)) {
1338 state->quotaQueueReserv = quotaEnqueue(state->conn->fd);
1340 return false;
1341 }
1342 return true;
1343}
1344
1345void
1347{
1348 if (writeLimitingActive) {
1349 state->quotaQueueReserv = quotaEnqueue(state->conn->fd);
1351 }
1352}
1353
1354void
1356{
1358 // kick queue or it will get stuck as commWriteHandle is not called
1360}
1361
1362void
1364{
1365 if (len > 0)
1367 // even if we wrote nothing, we were served; give others a chance
1369}
1370
1371void
1372ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1373{
1374 debugs(77,5, "Write limits for " << (const char*)key <<
1375 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1376 " highwatermark=" << aHighWatermark);
1377
1378 // set or possibly update traffic shaping parameters
1379 writeLimitingActive = true;
1380 writeSpeedLimit = aWriteSpeedLimit;
1381 bucketSizeLimit = aHighWatermark;
1382
1383 // but some members should only be set once for a newly activated bucket
1384 if (firstTimeConnection) {
1385 firstTimeConnection = false;
1386
1389 quotaQueue = new CommQuotaQueue(this);
1390
1391 bucketLevel = anInitialBurst;
1393 }
1394}
1395
1397 ins(0), outs(0)
1398{
1400}
1401
1403{
1404 assert(!clientInfo); // ClientInfo should clear this before destroying us
1405}
1406
1408unsigned int
1410{
1411 debugs(77,5, "clt" << (const char*)clientInfo->key <<
1412 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1413 fds.push_back(fd);
1414 fd_table[fd].codeContext = CodeContext::Current();
1415 return ++ins;
1416}
1417
1419void
1421{
1422 assert(!fds.empty());
1423 debugs(77,5, "clt" << (const char*)clientInfo->key <<
1424 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1425 fds.size());
1426 fds.pop_front();
1427 ++outs;
1428}
1429#endif /* USE_DELAY_POOLS */
1430
1431/*
1432 * hm, this might be too general-purpose for all the places we'd
1433 * like to use it.
1434 */
1435int
1436ignoreErrno(int ierrno)
1437{
1438 switch (ierrno) {
1439
1440 case EINPROGRESS:
1441
1442 case EWOULDBLOCK:
1443#if EAGAIN != EWOULDBLOCK
1444
1445 case EAGAIN:
1446#endif
1447
1448 case EALREADY:
1449
1450 case EINTR:
1451#ifdef ERESTART
1452
1453 case ERESTART:
1454#endif
1455
1456 return 1;
1457
1458 default:
1459 return 0;
1460 }
1461
1462 /* NOTREACHED */
1463}
1464
1465void
1467{
1468 int fd;
1469 fde *F = nullptr;
1470
1471 for (fd = 0; fd <= Biggest_FD; ++fd) {
1472 F = &fd_table[fd];
1473
1474 if (!F->flags.open)
1475 continue;
1476
1477 if (F->type != FD_SOCKET)
1478 continue;
1479
1480 if (F->flags.ipc) /* don't close inter-process sockets */
1481 continue;
1482
1483 if (F->timeoutHandler != nullptr) {
1484 AsyncCall::Pointer callback = F->timeoutHandler;
1485 F->timeoutHandler = nullptr;
1486 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1487 ScheduleCallHere(callback);
1488 } else {
1489 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1491 }
1492 }
1493}
1494
1495static bool
1497{
1498 if (!F->flags.open)
1499 return true;
1500
1501 if (F->timeout == 0)
1502 return true;
1503
1504 if (F->timeout > squid_curtime)
1505 return true;
1506
1507 return false;
1508}
1509
1510static bool
1512{
1513 if (!COMMIO_FD_WRITECB(fd)->active())
1514 return false;
1515
1516 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1517 return false;
1518
1519 return true;
1520}
1521
1522void
1524{
1525 int fd;
1526 fde *F = nullptr;
1527 AsyncCall::Pointer callback;
1528
1529 for (fd = 0; fd <= Biggest_FD; ++fd) {
1530 F = &fd_table[fd];
1531
1532 if (writeTimedOut(fd)) {
1533 // We have an active write callback and we are timed out
1534 CodeContext::Reset(F->codeContext);
1535 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1536 Comm::SetSelect(fd, COMM_SELECT_WRITE, nullptr, nullptr, 0);
1537 COMMIO_FD_WRITECB(fd)->finish(Comm::COMM_ERROR, ETIMEDOUT);
1539 continue;
1540#if USE_DELAY_POOLS
1541 } else if (F->writeQuotaHandler != nullptr && COMMIO_FD_WRITECB(fd)->conn != nullptr) {
1542 // TODO: Move and extract quota() call to place it inside F->codeContext.
1543 if (!F->writeQuotaHandler->selectWaiting && F->writeQuotaHandler->quota() && !F->closing()) {
1544 CodeContext::Reset(F->codeContext);
1545 F->writeQuotaHandler->selectWaiting = true;
1548 }
1549 continue;
1550#endif
1551 }
1552 else if (AlreadyTimedOut(F))
1553 continue;
1554
1555 CodeContext::Reset(F->codeContext);
1556 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1557
1558 if (F->timeoutHandler != nullptr) {
1559 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1560 callback = F->timeoutHandler;
1561 F->timeoutHandler = nullptr;
1562 ScheduleCallHere(callback);
1563 } else {
1564 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1565 comm_close(fd);
1566 }
1567
1569 }
1570}
1571
1573// by scheduling a read callback to a monitoring handler that
1574// will close the connection on read errors.
1575void
1577{
1578 debugs(5, 5, "adding FD " << fd << " to " << *TheHalfClosed);
1580 (void)TheHalfClosed->add(fd); // could also assert the result
1581 fd_table[fd].codeContext = CodeContext::Current();
1582 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1583}
1584
1585static
1586void
1588{
1590 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, nullptr, 1.0, 1);
1591 WillCheckHalfClosed = true;
1592 }
1593}
1594
1597static
1598void
1600{
1601 debugs(5, 5, "checking " << *TheHalfClosed);
1602
1603 typedef DescriptorSet::const_iterator DSCI;
1604 const DSCI end = TheHalfClosed->end();
1605 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1606 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1607 c->fd = *i;
1608 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1609 CallBack(fd_table[c->fd].codeContext, [&c] {
1610 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1611 CommIoCbPtrFun(&commHalfClosedReader, nullptr));
1612 Comm::Read(c, call);
1613 fd_table[c->fd].halfClosedReader = call;
1614 });
1615 } else
1616 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1617 }
1618
1619 WillCheckHalfClosed = false; // as far as we know
1620 commPlanHalfClosedCheck(); // may need to check again
1621}
1622
1624// We are monitoring if the read handler for the fd is the monitoring handler.
1625bool
1627{
1628 return TheHalfClosed->has(fd);
1629}
1630
1632void
1634{
1635 debugs(5, 5, "removing FD " << fd << " from " << *TheHalfClosed);
1636
1637 // cancel the read if one was scheduled
1638 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1639 if (reader != nullptr)
1640 Comm::ReadCancel(fd, reader);
1641 fd_table[fd].halfClosedReader = nullptr;
1642
1643 TheHalfClosed->del(fd);
1644}
1645
1647static void
1648commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int, void *)
1649{
1650 // there cannot be more data coming in on half-closed connections
1651 assert(size == 0);
1652 assert(conn != nullptr);
1653 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1654
1655 fd_table[conn->fd].halfClosedReader = nullptr; // done reading, for now
1656
1657 // nothing to do if fd is being closed
1658 if (flag == Comm::ERR_CLOSING)
1659 return;
1660
1661 // if read failed, close the connection
1662 if (flag != Comm::OK) {
1663 debugs(5, 3, "closing " << conn);
1664 conn->close();
1665 return;
1666 }
1667
1668 // continue waiting for close or error
1669 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1670}
1671
1672int
1674{
1675 static time_t last_timeout = 0;
1676
1677 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1678 if (squid_curtime > last_timeout) {
1679 last_timeout = squid_curtime;
1680 checkTimeouts();
1681 }
1682
1683 switch (Comm::DoSelect(timeout)) {
1684
1685 case Comm::OK:
1686
1687 case Comm::TIMEOUT:
1688 return 0;
1689
1690 case Comm::IDLE:
1691
1692 case Comm::SHUTDOWN:
1693 return EVENT_IDLE;
1694
1695 case Comm::COMM_ERROR:
1696 return EVENT_ERROR;
1697
1698 default:
1699 fatal_dump("comm.cc: Internal error -- this should never happen.");
1700 return EVENT_ERROR;
1701 };
1702}
1703
1705int
1706comm_open_uds(int sock_type,
1707 int proto,
1708 struct sockaddr_un* addr,
1709 int flags)
1710{
1711 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1712
1713 int new_socket;
1714
1715 /* Create socket for accepting new connections. */
1717
1718 /* Setup the socket addrinfo details for use */
1719 struct addrinfo AI;
1720 AI.ai_flags = 0;
1721 AI.ai_family = PF_UNIX;
1722 AI.ai_socktype = sock_type;
1723 AI.ai_protocol = proto;
1724 AI.ai_addrlen = SUN_LEN(addr);
1725 AI.ai_addr = (sockaddr*)addr;
1726 AI.ai_canonname = nullptr;
1727 AI.ai_next = nullptr;
1728
1729 debugs(50, 3, "Attempt open socket for: " << addr->sun_path);
1730
1731 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1732 int xerrno = errno;
1733 /* Increase the number of reserved fd's if calls to socket()
1734 * are failing because the open file table is full. This
1735 * limits the number of simultaneous clients */
1736
1737 if (limitError(xerrno)) {
1738 debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
1740 } else {
1741 debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
1742 }
1743 return -1;
1744 }
1745
1746 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1747
1748 /* update fdstat */
1749 debugs(50, 5, "FD " << new_socket << " is a new socket");
1750
1751 assert(!isOpen(new_socket));
1752 fd_open(new_socket, FD_MSGHDR, addr->sun_path);
1753
1754 fd_table[new_socket].sock_family = AI.ai_family;
1755
1756 if (!(flags & COMM_NOCLOEXEC))
1757 commSetCloseOnExec(new_socket);
1758
1759 if (flags & COMM_REUSEADDR)
1760 commSetReuseAddr(new_socket);
1761
1762 if (flags & COMM_NONBLOCKING) {
1763 if (commSetNonBlocking(new_socket) != Comm::OK) {
1764 comm_close(new_socket);
1765 return -1;
1766 }
1767 }
1768
1769 if (flags & COMM_DOBIND) {
1770 if (commBind(new_socket, AI) != Comm::OK) {
1771 comm_close(new_socket);
1772 return -1;
1773 }
1774 }
1775
1776#ifdef TCP_NODELAY
1777 if (sock_type == SOCK_STREAM)
1778 commSetTcpNoDelay(new_socket);
1779
1780#endif
1781
1782 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1783 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1784
1785 return new_socket;
1786}
1787
#define ScheduleCallHere(call)
Definition: AsyncCall.h:165
RefCount< AsyncCallT< Dialer > > asyncCall(int aDebugSection, int aDebugLevel, const char *aName, const Dialer &aDialer)
Definition: AsyncCall.h:155
UnaryFunDialer< Argument1 > callDialer(void(*handler)(Argument1), Argument1 arg1)
helper function to simplify UnaryFunDialer creation
Definition: AsyncFunCalls.h:60
void CallBack(const CodeContext::Pointer &callbackContext, Fun &&callback)
Definition: CodeContext.h:116
CommCbFunPtrCallT< Dialer > * commCbCall(int debugSection, int debugLevel, const char *callName, const Dialer &dialer)
Definition: CommCalls.h:312
void IOCB(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int xerrno, void *data)
Definition: CommCalls.h:34
void CLCB(const CommCloseCbParams &params)
Definition: CommCalls.h:40
#define COMM_TRANSPARENT
Definition: Connection.h:50
#define COMM_NOCLOEXEC
Definition: Connection.h:47
#define COMM_REUSEPORT
Definition: Connection.h:52
#define COMM_DOBIND
Definition: Connection.h:49
#define COMM_NONBLOCKING
Definition: Connection.h:46
#define COMM_REUSEADDR
Definition: Connection.h:48
#define COMM_DOBIND_PORT_LATER
Internal Comm optimization: Keep the source port unassigned until connect(2)
Definition: Connection.h:56
#define COMMIO_FD_WRITECB(fd)
Definition: IoCallback.h:79
#define COMMIO_FD_READCB(fd)
Definition: IoCallback.h:78
int size
Definition: ModDevPoll.cc:75
time_t squid_curtime
Definition: stub_libtime.cc:20
class SquidConfig Config
Definition: SquidConfig.cc:12
StatCounters statCounter
Definition: StatCounters.cc:12
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:59
int conn
the current server connection FD
Definition: Transport.cc:26
squidaio_request_t * head
Definition: aiops.cc:126
#define assert(EX)
Definition: assert.h:17
#define CBDATA_CLASS_INIT(type)
Definition: cbdata.h:320
void dequeue(AsyncCall::Pointer &head, AsyncCall::Pointer &prev)
remove us from the queue; we are head unless we are queued after prev
Definition: AsyncCall.cc:84
bool cancel(const char *reason)
Definition: AsyncCall.cc:56
bool canceled() const
Definition: AsyncCall.h:52
AsyncCall::Pointer & Next()
Definition: AsyncCall.h:65
void setNext(AsyncCall::Pointer aNext)
Definition: AsyncCall.h:61
Base class for Squid-to-client bandwidth limiting.
double bucketLevel
how much can be written now
void refillBucket()
Increases the bucket level with the writeSpeedLimit speed.
double bucketSizeLimit
maximum bucket size
virtual void onFdClosed()
Performs cleanup when the related file descriptor becomes closed.
double prevTime
previous time when we checked
virtual bool applyQuota(int &nleft, Comm::IoCallback *state)
double writeSpeedLimit
Write speed limit in bytes per second.
static BandwidthBucket * SelectBucket(fde *f)
virtual void reduceBucket(const int len)
Decreases the bucket level.
bool selectWaiting
is between commSetSelect and commHandleWrite
void reduceBucket(int len) override
Decreases the bucket level.
Definition: comm.cc:1363
void quotaDequeue()
pops queue head from queue
Definition: comm.cc:1281
bool hasQueue() const
whether any clients are waiting for write quota
Definition: comm.cc:1242
void writeOrDequeue()
either selects the head descriptor for writing or calls quotaDequeue()
Definition: comm.cc:1220
void setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
Definition: comm.cc:1372
CommQuotaQueue * quotaQueue
clients waiting for more write quota
Definition: ClientInfo.h:72
void onFdClosed() override
Performs cleanup when the related file descriptor becomes closed.
Definition: comm.cc:1355
bool eventWaiting
waiting for commHandleWriteHelper event to fire
Definition: ClientInfo.h:75
void scheduleWrite(Comm::IoCallback *state) override
Will plan another write call.
Definition: comm.cc:1346
time_t last_seen
Definition: ClientInfo.h:67
unsigned int quotaEnqueue(int fd)
client starts waiting in queue; create the queue if necessary
Definition: comm.cc:1273
int quota() override
allocate quota for a just dequeued client
Definition: comm.cc:1301
int quotaPeekFd() const
returns the next fd reservation
Definition: comm.cc:1257
int rationedQuota
precomputed quota preserving fairness among clients
Definition: ClientInfo.h:73
unsigned int quotaPeekReserv() const
returns the next reserv. to pop
Definition: comm.cc:1265
int rationedCount
number of clients that will receive rationedQuota
Definition: ClientInfo.h:74
bool firstTimeConnection
is this first time connection for this client
Definition: ClientInfo.h:70
void kickQuotaQueue()
Definition: comm.cc:1288
bool applyQuota(int &nleft, Comm::IoCallback *state) override
Definition: comm.cc:1332
bool writeLimitingActive
Is write limiter active.
Definition: ClientInfo.h:69
static const Pointer & Current()
Definition: CodeContext.cc:33
static void Reset()
forgets the current context, setting it to nil/unknown
Definition: CodeContext.cc:77
~CommQuotaQueue()
Definition: comm.cc:1402
int ins
number of enqueue calls, used to generate a "reservation" ID
Definition: ClientInfo.h:130
ClientInfo * clientInfo
bucket responsible for quota maintenance
Definition: ClientInfo.h:127
void dequeue()
removes queue head
Definition: comm.cc:1420
unsigned int enqueue(int fd)
places the given fd at the end of the queue; returns reservation ID
Definition: comm.cc:1409
int outs
number of dequeue calls, used to check the "reservation" ID
Definition: ClientInfo.h:131
size_t size() const
Definition: ClientInfo.h:122
Store fds
descriptor queue
Definition: ClientInfo.h:136
bool empty() const
Definition: ClientInfo.h:121
CommQuotaQueue(ClientInfo *info)
Definition: comm.cc:1396
int front() const
Definition: ClientInfo.h:123
int checkEvents(int timeout) override
Definition: comm.cc:1673
static AcceptLimiter & Instance()
Details about a particular Comm IO callback event.
Definition: IoCallback.h:30
Comm::ConnectionPointer conn
Definition: IoCallback.h:33
unsigned int quotaQueueReserv
reservation ID from CommQuotaQueue
Definition: IoCallback.h:42
An unordered collection of unique descriptors with O(1) add/del/has ops.
Definition: DescriptorSet.h:19
const_iterator begin() const
begin iterator a la STL; may become invalid if the object is modified
Definition: DescriptorSet.h:40
const_iterator end() const
end iterator a la STL; may become invalid if the object is modified
Definition: DescriptorSet.h:42
bool del(int fd)
deletes if there; returns true if deleted
bool empty() const
number of descriptors in the set
Definition: DescriptorSet.h:37
const int * const_iterator
Definition: DescriptorSet.h:22
bool has(const int fd) const
checks whether fd is in the set
Definition: DescriptorSet.h:28
bool add(int fd)
adds if unique; returns true if added
char * toStr(char *buf, const unsigned int blen, int force=AF_UNSPEC) const
Definition: Address.cc:792
static void InitAddr(struct addrinfo *&ai)
Definition: Address.cc:668
bool setIPv4()
Definition: Address.cc:224
static void FreeAddr(struct addrinfo *&ai)
Definition: Address.cc:686
void getAddrInfo(struct addrinfo *&ai, int force=AF_UNSPEC) const
Definition: Address.cc:599
bool isIPv4() const
Definition: Address.cc:158
bool isNoAddr() const
Definition: Address.cc:284
bool isAnyAddr() const
Definition: Address.cc:170
bool isIPv6() const
Definition: Address.cc:164
unsigned short port() const
Definition: Address.cc:778
C * getRaw() const
Definition: RefCount.h:80
size_t tcpRcvBufsz
Definition: SquidConfig.h:242
time_t write
Definition: SquidConfig.h:111
struct SquidConfig::@93 Timeout
struct StatCounters::@131 syscalls
struct StatCounters::@131::@136 sock
Definition: fde.h:52
#define SUN_LEN(ptr)
Definition: cmsg.h:113
void fd_open(const int fd, unsigned int, const char *description)
Definition: minimal.cc:14
void fd_close(const int fd)
Definition: minimal.cc:20
void commCallCloseHandlers(int fd)
Definition: comm.cc:755
int commSetNonBlocking(int fd)
Definition: comm.cc:1062
void _comm_close(int fd, char const *file, int line)
Definition: comm.cc:842
AsyncCall::Pointer comm_add_close_handler(int fd, CLCB *handler, void *data)
Definition: comm.cc:945
static void comm_close_complete(const int fd)
Definition: comm.cc:816
void comm_remove_close_handler(int fd, CLCB *handler, void *data)
Definition: comm.cc:974
int commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
Definition: comm.cc:591
unsigned short comm_local_port(int fd)
Definition: comm.cc:164
void commStopHalfClosedMonitor(int const fd)
stop waiting for possibly half-closed connection to close
Definition: comm.cc:1633
void old_comm_reset_close(int fd)
Definition: comm.cc:796
static bool WillCheckHalfClosed
the set of half-closed FDs
Definition: comm.cc:74
bool comm_has_incomplete_write(int fd)
Definition: comm.cc:151
static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI)
update FD tables after a local or remote (IPC) comm_openex();
Definition: comm.cc:437
static bool AlreadyTimedOut(fde *F)
Definition: comm.cc:1496
static EVH commHalfClosedCheck
true if check is scheduled
Definition: comm.cc:75
void commSetCloseOnExec(int fd)
Definition: comm.cc:1123
static bool limitError(int const anErrno)
Definition: comm.cc:287
void commUnsetFdTimeout(int fd)
clear a timeout handler by FD number
Definition: comm.cc:578
static bool writeTimedOut(int fd)
Definition: comm.cc:1511
static IOCB commHalfClosedReader
Definition: comm.cc:60
int ignoreErrno(int ierrno)
Definition: comm.cc:1436
static void commSetReuseAddr(int)
Definition: comm.cc:1033
static void comm_set_v6only(int fd, int tos)
Definition: comm.cc:293
void comm_init(void)
Definition: comm.cc:1162
int commUnsetNonBlocking(int fd)
Definition: comm.cc:1095
int comm_open_uds(int sock_type, int proto, struct sockaddr_un *addr, int flags)
Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
Definition: comm.cc:1706
void checkTimeouts(void)
Definition: comm.cc:1523
void comm_exit(void)
Definition: comm.cc:1185
int comm_udp_sendto(int fd, const Ip::Address &to_addr, const void *buf, int len)
Definition: comm.cc:914
static int comm_openex(int sock_type, int proto, Ip::Address &, int flags, const char *note)
Definition: comm.cc:355
void comm_open_listener(int sock_type, int proto, Comm::ConnectionPointer &conn, const char *note)
Definition: comm.cc:256
static void commHandleWriteHelper(void *data)
Definition: comm.cc:1196
static void commSetBindAddressNoPort(int)
Definition: comm.cc:209
bool isOpen(const int fd)
Definition: comm.cc:88
bool commHasHalfClosedMonitor(int fd)
checks whether we are waiting for possibly half-closed connection to close
Definition: comm.cc:1626
static void commSetTcpRcvbuf(int, int)
Definition: comm.cc:1043
void commCloseAllSockets(void)
Definition: comm.cc:1466
void comm_import_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI)
update Comm state after getting a comm_open() FD from another process
Definition: comm.cc:538
int comm_udp_recv(int fd, void *buf, size_t len, int flags)
Definition: comm.cc:138
static void commPlanHalfClosedCheck()
Definition: comm.cc:1587
static DescriptorSet * TheHalfClosed
Definition: comm.cc:73
static Comm::Flag commBind(int s, struct addrinfo &)
Definition: comm.cc:223
int comm_open(int sock_type, int proto, Ip::Address &addr, int flags, const char *note)
Definition: comm.cc:242
int comm_connect_addr(int sock, const Ip::Address &address)
Definition: comm.cc:630
static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI)
Definition: comm.cc:459
ssize_t comm_udp_send(int s, const void *buf, size_t len, int flags)
Definition: comm.cc:145
static void comm_empty_os_read_buffers(int fd)
Definition: comm.cc:102
static void commStartTlsClose(const int fd)
Definition: comm.cc:810
void commStartHalfClosedMonitor(int fd)
Start waiting for a possibly half-closed connection to close.
Definition: comm.cc:1576
static void commSetNoLinger(int)
Definition: comm.cc:1018
int commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
Definition: comm.cc:617
int comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
Definition: comm.cc:125
static void comm_set_transparent(int fd)
Definition: comm.cc:312
void comm_reset_close(const Comm::ConnectionPointer &conn)
Definition: comm.cc:781
#define comm_close(x)
Definition: comm.h:27
#define SQUID_NONBLOCK
A const & min(A const &lhs, A const &rhs)
#define DBG_DATA
Definition: Stream.h:40
#define MYNAME
Definition: Stream.h:235
#define DBG_IMPORTANT
Definition: Stream.h:38
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:193
#define DBG_CRITICAL
Definition: Stream.h:37
#define COMM_SELECT_READ
Definition: defines.h:24
#define COMM_SELECT_WRITE
Definition: defines.h:25
@ FD_SOCKET
Definition: enums.h:16
@ FD_FILE
Definition: enums.h:15
@ FD_MSGHDR
Definition: enums.h:18
void eventAdd(const char *name, EVH *func, void *arg, double when, int weight, bool cbdata)
Definition: event.cc:107
void EVH(void *)
Definition: event.h:18
void fatal_dump(const char *message)
Definition: fatal.cc:78
void fdAdjustReserved(void)
Definition: fd.cc:286
#define fd_table
Definition: fde.h:189
int FD_READ_METHOD(int fd, char *buf, int len)
Definition: fde.h:194
int opt_reuseaddr
int shutting_down
int Squid_MaxFD
int RESERVED_FD
int Biggest_FD
int reconfiguring
#define MAX_IPSTRLEN
Length of buffer that needs to be allocated to old a null-terminated IP-string.
Definition: forward.h:25
#define IPV6_SPECIAL_SPLITSTACK
Definition: tools.h:22
#define IPV6_SPECIAL_V4MAPPING
Definition: tools.h:21
static uint32 F(uint32 X, uint32 Y, uint32 Z)
Definition: md4.c:46
PF HandleWrite
Definition: forward.h:33
void ReadCancel(int fd, AsyncCall::Pointer &callback)
Cancel the read pending on FD. No action if none pending.
Definition: Read.cc:219
void CallbackTableDestruct()
Definition: IoCallback.cc:34
bool IsConnOpen(const Comm::ConnectionPointer &conn)
Definition: Connection.cc:27
Flag
Definition: Flag.h:15
@ SHUTDOWN
Definition: Flag.h:19
@ OK
Definition: Flag.h:16
@ ERR_CLOSING
Definition: Flag.h:24
@ IDLE
Definition: Flag.h:20
@ TIMEOUT
Definition: Flag.h:18
@ COMM_ERROR
Definition: Flag.h:17
@ ERR_PROTOCOL
Definition: Flag.h:25
@ INPROGRESS
Definition: Flag.h:21
Comm::Flag DoSelect(int)
Do poll and trigger callback functions as appropriate.
Definition: ModDevPoll.cc:311
void CallbackTableInit()
Definition: IoCallback.cc:22
void SelectLoopInit(void)
Initialize the module on Squid startup.
Definition: ModDevPoll.cc:176
void SetSelect(int, unsigned int, PF *, void *, time_t)
Mark an FD to be watched for its IO status.
Definition: ModDevPoll.cc:223
SSL Connection
Definition: Session.h:45
void SessionSendGoodbye(const Security::SessionPointer &)
send the shutdown/bye notice for an active TLS session.
Definition: Session.cc:199
void Controller::create() STUB void Controller Controller nil
static void handler(int signo)
Definition: purge.cc:858
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
Definition: Stream.h:63
#define TRUE
Definition: std-includes.h:55
#define FALSE
Definition: std-includes.h:56
int EnableIpv6
Whether IPv6 is supported and type of support.
Definition: tools.h:25
struct sockaddr * ai_addr
socklen_t ai_addrlen
struct addrinfo * ai_next
char sun_path[256]
Definition: cmsg.h:108
Comm::AcceptLimiter dummy
Definition: stub_libcomm.cc:16
double current_dtime
the current UNIX time in seconds (with microsecond precision)
Definition: stub_libtime.cc:19
void leave_suid(void)
Definition: tools.cc:559
void enter_suid(void)
Definition: tools.cc:623
int socklen_t
Definition: types.h:152
const char * xstrerr(int error)
Definition: xstrerror.cc:83

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors