| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
|---|
| 4 | *		operating system.  INET is implemented using the  BSD Socket | 
|---|
| 5 | *		interface as the means of communication with the user level. | 
|---|
| 6 | * | 
|---|
| 7 | *		Implementation of the Transmission Control Protocol(TCP). | 
|---|
| 8 | * | 
|---|
| 9 | * Authors:	Ross Biro | 
|---|
| 10 | *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
|---|
| 11 | *		Mark Evans, <evansmp@uhura.aston.ac.uk> | 
|---|
| 12 | *		Corey Minyard <wf-rch!minyard@relay.EU.net> | 
|---|
| 13 | *		Florian La Roche, <flla@stud.uni-sb.de> | 
|---|
| 14 | *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | 
|---|
| 15 | *		Linus Torvalds, <torvalds@cs.helsinki.fi> | 
|---|
| 16 | *		Alan Cox, <gw4pts@gw4pts.ampr.org> | 
|---|
| 17 | *		Matthew Dillon, <dillon@apollo.west.oic.com> | 
|---|
| 18 | *		Arnt Gulbrandsen, <agulbra@nvg.unit.no> | 
|---|
| 19 | *		Jorge Cwik, <jorge@laser.satlink.net> | 
|---|
| 20 | */ | 
|---|
| 21 |  | 
|---|
| 22 | /* | 
|---|
| 23 | * Changes:	Pedro Roque	:	Retransmit queue handled by TCP. | 
|---|
| 24 | *				:	Fragmentation on mtu decrease | 
|---|
| 25 | *				:	Segment collapse on retransmit | 
|---|
| 26 | *				:	AF independence | 
|---|
| 27 | * | 
|---|
| 28 | *		Linus Torvalds	:	send_delayed_ack | 
|---|
| 29 | *		David S. Miller	:	Charge memory using the right skb | 
|---|
| 30 | *					during syn/ack processing. | 
|---|
| 31 | *		David S. Miller :	Output engine completely rewritten. | 
|---|
| 32 | *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr. | 
|---|
| 33 | *		Cacophonix Gaul :	draft-minshall-nagle-01 | 
|---|
| 34 | *		J Hadi Salim	:	ECN support | 
|---|
| 35 | * | 
|---|
| 36 | */ | 
|---|
| 37 |  | 
|---|
| 38 | #define pr_fmt(fmt) "TCP: " fmt | 
|---|
| 39 |  | 
|---|
| 40 | #include <net/tcp.h> | 
|---|
| 41 | #include <net/tcp_ecn.h> | 
|---|
| 42 | #include <net/mptcp.h> | 
|---|
| 43 | #include <net/proto_memory.h> | 
|---|
| 44 | #include <net/psp.h> | 
|---|
| 45 |  | 
|---|
| 46 | #include <linux/compiler.h> | 
|---|
| 47 | #include <linux/gfp.h> | 
|---|
| 48 | #include <linux/module.h> | 
|---|
| 49 | #include <linux/static_key.h> | 
|---|
| 50 | #include <linux/skbuff_ref.h> | 
|---|
| 51 |  | 
|---|
| 52 | #include <trace/events/tcp.h> | 
|---|
| 53 |  | 
|---|
| 54 | /* Refresh clocks of a TCP socket, | 
|---|
| 55 | * ensuring monotically increasing values. | 
|---|
| 56 | */ | 
|---|
| 57 | void tcp_mstamp_refresh(struct tcp_sock *tp) | 
|---|
| 58 | { | 
|---|
| 59 | u64 val = tcp_clock_ns(); | 
|---|
| 60 |  | 
|---|
| 61 | tp->tcp_clock_cache = val; | 
|---|
| 62 | tp->tcp_mstamp = div_u64(dividend: val, NSEC_PER_USEC); | 
|---|
| 63 | } | 
|---|
| 64 |  | 
|---|
| 65 | static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | 
|---|
| 66 | int push_one, gfp_t gfp); | 
|---|
| 67 |  | 
|---|
| 68 | /* Account for new data that has been sent to the network. */ | 
|---|
| 69 | static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) | 
|---|
| 70 | { | 
|---|
| 71 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 72 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 73 | unsigned int prior_packets = tp->packets_out; | 
|---|
| 74 |  | 
|---|
| 75 | WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); | 
|---|
| 76 |  | 
|---|
| 77 | __skb_unlink(skb, list: &sk->sk_write_queue); | 
|---|
| 78 | tcp_rbtree_insert(root: &sk->tcp_rtx_queue, skb); | 
|---|
| 79 |  | 
|---|
| 80 | if (tp->highest_sack == NULL) | 
|---|
| 81 | tp->highest_sack = skb; | 
|---|
| 82 |  | 
|---|
| 83 | tp->packets_out += tcp_skb_pcount(skb); | 
|---|
| 84 | if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) | 
|---|
| 85 | tcp_rearm_rto(sk); | 
|---|
| 86 |  | 
|---|
| 87 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, | 
|---|
| 88 | tcp_skb_pcount(skb)); | 
|---|
| 89 | tcp_check_space(sk); | 
|---|
| 90 | } | 
|---|
| 91 |  | 
|---|
| 92 | /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one | 
|---|
| 93 | * window scaling factor due to loss of precision. | 
|---|
| 94 | * If window has been shrunk, what should we make? It is not clear at all. | 
|---|
| 95 | * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( | 
|---|
| 96 | * Anything in between SND.UNA...SND.UNA+SND.WND also can be already | 
|---|
| 97 | * invalid. OK, let's make this for now: | 
|---|
| 98 | */ | 
|---|
| 99 | static inline __u32 tcp_acceptable_seq(const struct sock *sk) | 
|---|
| 100 | { | 
|---|
| 101 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 102 |  | 
|---|
| 103 | if (!before(seq1: tcp_wnd_end(tp), seq2: tp->snd_nxt) || | 
|---|
| 104 | (tp->rx_opt.wscale_ok && | 
|---|
| 105 | ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) | 
|---|
| 106 | return tp->snd_nxt; | 
|---|
| 107 | else | 
|---|
| 108 | return tcp_wnd_end(tp); | 
|---|
| 109 | } | 
|---|
| 110 |  | 
|---|
| 111 | /* Calculate mss to advertise in SYN segment. | 
|---|
| 112 | * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: | 
|---|
| 113 | * | 
|---|
| 114 | * 1. It is independent of path mtu. | 
|---|
| 115 | * 2. Ideally, it is maximal possible segment size i.e. 65535-40. | 
|---|
| 116 | * 3. For IPv4 it is reasonable to calculate it from maximal MTU of | 
|---|
| 117 | *    attached devices, because some buggy hosts are confused by | 
|---|
| 118 | *    large MSS. | 
|---|
| 119 | * 4. We do not make 3, we advertise MSS, calculated from first | 
|---|
| 120 | *    hop device mtu, but allow to raise it to ip_rt_min_advmss. | 
|---|
| 121 | *    This may be overridden via information stored in routing table. | 
|---|
| 122 | * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, | 
|---|
| 123 | *    probably even Jumbo". | 
|---|
| 124 | */ | 
|---|
| 125 | static __u16 tcp_advertise_mss(struct sock *sk) | 
|---|
| 126 | { | 
|---|
| 127 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 128 | const struct dst_entry *dst = __sk_dst_get(sk); | 
|---|
| 129 | int mss = tp->advmss; | 
|---|
| 130 |  | 
|---|
| 131 | if (dst) { | 
|---|
| 132 | unsigned int metric = dst_metric_advmss(dst); | 
|---|
| 133 |  | 
|---|
| 134 | if (metric < mss) { | 
|---|
| 135 | mss = metric; | 
|---|
| 136 | tp->advmss = mss; | 
|---|
| 137 | } | 
|---|
| 138 | } | 
|---|
| 139 |  | 
|---|
| 140 | return (__u16)mss; | 
|---|
| 141 | } | 
|---|
| 142 |  | 
|---|
| 143 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". | 
|---|
| 144 | * This is the first part of cwnd validation mechanism. | 
|---|
| 145 | */ | 
|---|
| 146 | void tcp_cwnd_restart(struct sock *sk, s32 delta) | 
|---|
| 147 | { | 
|---|
| 148 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 149 | u32 restart_cwnd = tcp_init_cwnd(tp, dst: __sk_dst_get(sk)); | 
|---|
| 150 | u32 cwnd = tcp_snd_cwnd(tp); | 
|---|
| 151 |  | 
|---|
| 152 | tcp_ca_event(sk, event: CA_EVENT_CWND_RESTART); | 
|---|
| 153 |  | 
|---|
| 154 | tp->snd_ssthresh = tcp_current_ssthresh(sk); | 
|---|
| 155 | restart_cwnd = min(restart_cwnd, cwnd); | 
|---|
| 156 |  | 
|---|
| 157 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) | 
|---|
| 158 | cwnd >>= 1; | 
|---|
| 159 | tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd)); | 
|---|
| 160 | tp->snd_cwnd_stamp = tcp_jiffies32; | 
|---|
| 161 | tp->snd_cwnd_used = 0; | 
|---|
| 162 | } | 
|---|
| 163 |  | 
|---|
| 164 | /* Congestion state accounting after a packet has been sent. */ | 
|---|
| 165 | static void tcp_event_data_sent(struct tcp_sock *tp, | 
|---|
| 166 | struct sock *sk) | 
|---|
| 167 | { | 
|---|
| 168 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 169 | const u32 now = tcp_jiffies32; | 
|---|
| 170 |  | 
|---|
| 171 | if (tcp_packets_in_flight(tp) == 0) | 
|---|
| 172 | tcp_ca_event(sk, event: CA_EVENT_TX_START); | 
|---|
| 173 |  | 
|---|
| 174 | tp->lsndtime = now; | 
|---|
| 175 |  | 
|---|
| 176 | /* If it is a reply for ato after last received | 
|---|
| 177 | * packet, increase pingpong count. | 
|---|
| 178 | */ | 
|---|
| 179 | if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) | 
|---|
| 180 | inet_csk_inc_pingpong_cnt(sk); | 
|---|
| 181 | } | 
|---|
| 182 |  | 
|---|
| 183 | /* Account for an ACK we sent. */ | 
|---|
| 184 | static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt) | 
|---|
| 185 | { | 
|---|
| 186 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 187 |  | 
|---|
| 188 | if (unlikely(tp->compressed_ack)) { | 
|---|
| 189 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | 
|---|
| 190 | tp->compressed_ack); | 
|---|
| 191 | tp->compressed_ack = 0; | 
|---|
| 192 | if (hrtimer_try_to_cancel(timer: &tp->compressed_ack_timer) == 1) | 
|---|
| 193 | __sock_put(sk); | 
|---|
| 194 | } | 
|---|
| 195 |  | 
|---|
| 196 | if (unlikely(rcv_nxt != tp->rcv_nxt)) | 
|---|
| 197 | return;  /* Special ACK sent by DCTCP to reflect ECN */ | 
|---|
| 198 | tcp_dec_quickack_mode(sk); | 
|---|
| 199 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | 
|---|
| 200 | } | 
|---|
| 201 |  | 
|---|
| 202 | /* Determine a window scaling and initial window to offer. | 
|---|
| 203 | * Based on the assumption that the given amount of space | 
|---|
| 204 | * will be offered. Store the results in the tp structure. | 
|---|
| 205 | * NOTE: for smooth operation initial space offering should | 
|---|
| 206 | * be a multiple of mss if possible. We assume here that mss >= 1. | 
|---|
| 207 | * This MUST be enforced by all callers. | 
|---|
| 208 | */ | 
|---|
| 209 | void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, | 
|---|
| 210 | __u32 *rcv_wnd, __u32 *__window_clamp, | 
|---|
| 211 | int wscale_ok, __u8 *rcv_wscale, | 
|---|
| 212 | __u32 init_rcv_wnd) | 
|---|
| 213 | { | 
|---|
| 214 | unsigned int space = (__space < 0 ? 0 : __space); | 
|---|
| 215 | u32 window_clamp = READ_ONCE(*__window_clamp); | 
|---|
| 216 |  | 
|---|
| 217 | /* If no clamp set the clamp to the max possible scaled window */ | 
|---|
| 218 | if (window_clamp == 0) | 
|---|
| 219 | window_clamp = (U16_MAX << TCP_MAX_WSCALE); | 
|---|
| 220 | space = min(window_clamp, space); | 
|---|
| 221 |  | 
|---|
| 222 | /* Quantize space offering to a multiple of mss if possible. */ | 
|---|
| 223 | if (space > mss) | 
|---|
| 224 | space = rounddown(space, mss); | 
|---|
| 225 |  | 
|---|
| 226 | /* NOTE: offering an initial window larger than 32767 | 
|---|
| 227 | * will break some buggy TCP stacks. If the admin tells us | 
|---|
| 228 | * it is likely we could be speaking with such a buggy stack | 
|---|
| 229 | * we will truncate our initial window offering to 32K-1 | 
|---|
| 230 | * unless the remote has sent us a window scaling option, | 
|---|
| 231 | * which we interpret as a sign the remote TCP is not | 
|---|
| 232 | * misinterpreting the window field as a signed quantity. | 
|---|
| 233 | */ | 
|---|
| 234 | if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) | 
|---|
| 235 | (*rcv_wnd) = min(space, MAX_TCP_WINDOW); | 
|---|
| 236 | else | 
|---|
| 237 | (*rcv_wnd) = space; | 
|---|
| 238 |  | 
|---|
| 239 | if (init_rcv_wnd) | 
|---|
| 240 | *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); | 
|---|
| 241 |  | 
|---|
| 242 | *rcv_wscale = 0; | 
|---|
| 243 | if (wscale_ok) { | 
|---|
| 244 | /* Set window scaling on max possible window */ | 
|---|
| 245 | space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); | 
|---|
| 246 | space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); | 
|---|
| 247 | space = min_t(u32, space, window_clamp); | 
|---|
| 248 | *rcv_wscale = clamp_t(int, ilog2(space) - 15, | 
|---|
| 249 | 0, TCP_MAX_WSCALE); | 
|---|
| 250 | } | 
|---|
| 251 | /* Set the clamp no higher than max representable value */ | 
|---|
| 252 | WRITE_ONCE(*__window_clamp, | 
|---|
| 253 | min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp)); | 
|---|
| 254 | } | 
|---|
| 255 | EXPORT_IPV6_MOD(tcp_select_initial_window); | 
|---|
| 256 |  | 
|---|
| 257 | /* Chose a new window to advertise, update state in tcp_sock for the | 
|---|
| 258 | * socket, and return result with RFC1323 scaling applied.  The return | 
|---|
| 259 | * value can be stuffed directly into th->window for an outgoing | 
|---|
| 260 | * frame. | 
|---|
| 261 | */ | 
|---|
| 262 | static u16 tcp_select_window(struct sock *sk) | 
|---|
| 263 | { | 
|---|
| 264 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 265 | struct net *net = sock_net(sk); | 
|---|
| 266 | u32 old_win = tp->rcv_wnd; | 
|---|
| 267 | u32 cur_win, new_win; | 
|---|
| 268 |  | 
|---|
| 269 | /* Make the window 0 if we failed to queue the data because we | 
|---|
| 270 | * are out of memory. | 
|---|
| 271 | */ | 
|---|
| 272 | if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) { | 
|---|
| 273 | tp->pred_flags = 0; | 
|---|
| 274 | tp->rcv_wnd = 0; | 
|---|
| 275 | tp->rcv_wup = tp->rcv_nxt; | 
|---|
| 276 | return 0; | 
|---|
| 277 | } | 
|---|
| 278 |  | 
|---|
| 279 | cur_win = tcp_receive_window(tp); | 
|---|
| 280 | new_win = __tcp_select_window(sk); | 
|---|
| 281 | if (new_win < cur_win) { | 
|---|
| 282 | /* Danger Will Robinson! | 
|---|
| 283 | * Don't update rcv_wup/rcv_wnd here or else | 
|---|
| 284 | * we will not be able to advertise a zero | 
|---|
| 285 | * window in time.  --DaveM | 
|---|
| 286 | * | 
|---|
| 287 | * Relax Will Robinson. | 
|---|
| 288 | */ | 
|---|
| 289 | if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { | 
|---|
| 290 | /* Never shrink the offered window */ | 
|---|
| 291 | if (new_win == 0) | 
|---|
| 292 | NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV); | 
|---|
| 293 | new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); | 
|---|
| 294 | } | 
|---|
| 295 | } | 
|---|
| 296 |  | 
|---|
| 297 | tp->rcv_wnd = new_win; | 
|---|
| 298 | tp->rcv_wup = tp->rcv_nxt; | 
|---|
| 299 |  | 
|---|
| 300 | /* Make sure we do not exceed the maximum possible | 
|---|
| 301 | * scaled window. | 
|---|
| 302 | */ | 
|---|
| 303 | if (!tp->rx_opt.rcv_wscale && | 
|---|
| 304 | READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) | 
|---|
| 305 | new_win = min(new_win, MAX_TCP_WINDOW); | 
|---|
| 306 | else | 
|---|
| 307 | new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); | 
|---|
| 308 |  | 
|---|
| 309 | /* RFC1323 scaling applied */ | 
|---|
| 310 | new_win >>= tp->rx_opt.rcv_wscale; | 
|---|
| 311 |  | 
|---|
| 312 | /* If we advertise zero window, disable fast path. */ | 
|---|
| 313 | if (new_win == 0) { | 
|---|
| 314 | tp->pred_flags = 0; | 
|---|
| 315 | if (old_win) | 
|---|
| 316 | NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV); | 
|---|
| 317 | } else if (old_win == 0) { | 
|---|
| 318 | NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV); | 
|---|
| 319 | } | 
|---|
| 320 |  | 
|---|
| 321 | return new_win; | 
|---|
| 322 | } | 
|---|
| 323 |  | 
|---|
| 324 | /* Set up ECN state for a packet on a ESTABLISHED socket that is about to | 
|---|
| 325 | * be sent. | 
|---|
| 326 | */ | 
|---|
| 327 | static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, | 
|---|
| 328 | struct tcphdr *th, int ) | 
|---|
| 329 | { | 
|---|
| 330 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 331 |  | 
|---|
| 332 | if (!tcp_ecn_mode_any(tp)) | 
|---|
| 333 | return; | 
|---|
| 334 |  | 
|---|
| 335 | if (tcp_ecn_mode_accecn(tp)) { | 
|---|
| 336 | if (!tcp_accecn_ace_fail_recv(tp)) | 
|---|
| 337 | INET_ECN_xmit(sk); | 
|---|
| 338 | tcp_accecn_set_ace(tp, skb, th); | 
|---|
| 339 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ACCECN; | 
|---|
| 340 | } else { | 
|---|
| 341 | /* Not-retransmitted data segment: set ECT and inject CWR. */ | 
|---|
| 342 | if (skb->len != tcp_header_len && | 
|---|
| 343 | !before(TCP_SKB_CB(skb)->seq, seq2: tp->snd_nxt)) { | 
|---|
| 344 | INET_ECN_xmit(sk); | 
|---|
| 345 | if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { | 
|---|
| 346 | tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; | 
|---|
| 347 | th->cwr = 1; | 
|---|
| 348 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | 
|---|
| 349 | } | 
|---|
| 350 | } else if (!tcp_ca_needs_ecn(sk)) { | 
|---|
| 351 | /* ACK or retransmitted segment: clear ECT|CE */ | 
|---|
| 352 | INET_ECN_dontxmit(sk); | 
|---|
| 353 | } | 
|---|
| 354 | if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) | 
|---|
| 355 | th->ece = 1; | 
|---|
| 356 | } | 
|---|
| 357 | } | 
|---|
| 358 |  | 
|---|
| 359 | /* Constructs common control bits of non-data skb. If SYN/FIN is present, | 
|---|
| 360 | * auto increment end seqno. | 
|---|
| 361 | */ | 
|---|
| 362 | static void tcp_init_nondata_skb(struct sk_buff *skb, struct sock *sk, | 
|---|
| 363 | u32 seq, u16 flags) | 
|---|
| 364 | { | 
|---|
| 365 | skb->ip_summed = CHECKSUM_PARTIAL; | 
|---|
| 366 |  | 
|---|
| 367 | TCP_SKB_CB(skb)->tcp_flags = flags; | 
|---|
| 368 |  | 
|---|
| 369 | tcp_skb_pcount_set(skb, segs: 1); | 
|---|
| 370 | psp_enqueue_set_decrypted(sk, skb); | 
|---|
| 371 |  | 
|---|
| 372 | TCP_SKB_CB(skb)->seq = seq; | 
|---|
| 373 | if (flags & (TCPHDR_SYN | TCPHDR_FIN)) | 
|---|
| 374 | seq++; | 
|---|
| 375 | TCP_SKB_CB(skb)->end_seq = seq; | 
|---|
| 376 | } | 
|---|
| 377 |  | 
|---|
| 378 | static inline bool tcp_urg_mode(const struct tcp_sock *tp) | 
|---|
| 379 | { | 
|---|
| 380 | return tp->snd_una != tp->snd_up; | 
|---|
| 381 | } | 
|---|
| 382 |  | 
|---|
| 383 | #define OPTION_SACK_ADVERTISE	BIT(0) | 
|---|
| 384 | #define OPTION_TS		BIT(1) | 
|---|
| 385 | #define OPTION_MD5		BIT(2) | 
|---|
| 386 | #define OPTION_WSCALE		BIT(3) | 
|---|
| 387 | #define OPTION_FAST_OPEN_COOKIE	BIT(8) | 
|---|
| 388 | #define OPTION_SMC		BIT(9) | 
|---|
| 389 | #define OPTION_MPTCP		BIT(10) | 
|---|
| 390 | #define OPTION_AO		BIT(11) | 
|---|
| 391 | #define OPTION_ACCECN		BIT(12) | 
|---|
| 392 |  | 
|---|
| 393 | static void smc_options_write(__be32 *ptr, u16 *options) | 
|---|
| 394 | { | 
|---|
| 395 | #if IS_ENABLED(CONFIG_SMC) | 
|---|
| 396 | if (static_branch_unlikely(&tcp_have_smc)) { | 
|---|
| 397 | if (unlikely(OPTION_SMC & *options)) { | 
|---|
| 398 | *ptr++ = htonl((TCPOPT_NOP  << 24) | | 
|---|
| 399 | (TCPOPT_NOP  << 16) | | 
|---|
| 400 | (TCPOPT_EXP <<  8) | | 
|---|
| 401 | (TCPOLEN_EXP_SMC_BASE)); | 
|---|
| 402 | *ptr++ = htonl(TCPOPT_SMC_MAGIC); | 
|---|
| 403 | } | 
|---|
| 404 | } | 
|---|
| 405 | #endif | 
|---|
| 406 | } | 
|---|
| 407 |  | 
|---|
| 408 | struct tcp_out_options { | 
|---|
| 409 | u16 options;		/* bit field of OPTION_* */ | 
|---|
| 410 | u16 mss;		/* 0 to disable */ | 
|---|
| 411 | u8 ws;			/* window scale, 0 to disable */ | 
|---|
| 412 | u8 num_sack_blocks;	/* number of SACK blocks to include */ | 
|---|
| 413 | u8 num_accecn_fields:7,	/* number of AccECN fields needed */ | 
|---|
| 414 | use_synack_ecn_bytes:1; /* Use synack_ecn_bytes or not */ | 
|---|
| 415 | u8 hash_size;		/* bytes in hash_location */ | 
|---|
| 416 | u8 bpf_opt_len;		/* length of BPF hdr option */ | 
|---|
| 417 | __u8 *hash_location;	/* temporary pointer, overloaded */ | 
|---|
| 418 | __u32 tsval, tsecr;	/* need to include OPTION_TS */ | 
|---|
| 419 | struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */ | 
|---|
| 420 | struct mptcp_out_options mptcp; | 
|---|
| 421 | }; | 
|---|
| 422 |  | 
|---|
| 423 | static void mptcp_options_write(struct tcphdr *th, __be32 *ptr, | 
|---|
| 424 | struct tcp_sock *tp, | 
|---|
| 425 | struct tcp_out_options *opts) | 
|---|
| 426 | { | 
|---|
| 427 | #if IS_ENABLED(CONFIG_MPTCP) | 
|---|
| 428 | if (unlikely(OPTION_MPTCP & opts->options)) | 
|---|
| 429 | mptcp_write_options(th, ptr, tp, &opts->mptcp); | 
|---|
| 430 | #endif | 
|---|
| 431 | } | 
|---|
| 432 |  | 
|---|
| 433 | #ifdef CONFIG_CGROUP_BPF | 
|---|
| 434 | static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb, | 
|---|
| 435 | enum tcp_synack_type synack_type) | 
|---|
| 436 | { | 
|---|
| 437 | if (unlikely(!skb)) | 
|---|
| 438 | return BPF_WRITE_HDR_TCP_CURRENT_MSS; | 
|---|
| 439 |  | 
|---|
| 440 | if (unlikely(synack_type == TCP_SYNACK_COOKIE)) | 
|---|
| 441 | return BPF_WRITE_HDR_TCP_SYNACK_COOKIE; | 
|---|
| 442 |  | 
|---|
| 443 | return 0; | 
|---|
| 444 | } | 
|---|
| 445 |  | 
|---|
| 446 | /* req, syn_skb and synack_type are used when writing synack */ | 
|---|
| 447 | static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, | 
|---|
| 448 | struct request_sock *req, | 
|---|
| 449 | struct sk_buff *syn_skb, | 
|---|
| 450 | enum tcp_synack_type synack_type, | 
|---|
| 451 | struct tcp_out_options *opts, | 
|---|
| 452 | unsigned int *remaining) | 
|---|
| 453 | { | 
|---|
| 454 | struct bpf_sock_ops_kern sock_ops; | 
|---|
| 455 | int err; | 
|---|
| 456 |  | 
|---|
| 457 | if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), | 
|---|
| 458 | BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) || | 
|---|
| 459 | !*remaining) | 
|---|
| 460 | return; | 
|---|
| 461 |  | 
|---|
| 462 | /* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */ | 
|---|
| 463 |  | 
|---|
| 464 | /* init sock_ops */ | 
|---|
| 465 | memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); | 
|---|
| 466 |  | 
|---|
| 467 | sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB; | 
|---|
| 468 |  | 
|---|
| 469 | if (req) { | 
|---|
| 470 | /* The listen "sk" cannot be passed here because | 
|---|
| 471 | * it is not locked.  It would not make too much | 
|---|
| 472 | * sense to do bpf_setsockopt(listen_sk) based | 
|---|
| 473 | * on individual connection request also. | 
|---|
| 474 | * | 
|---|
| 475 | * Thus, "req" is passed here and the cgroup-bpf-progs | 
|---|
| 476 | * of the listen "sk" will be run. | 
|---|
| 477 | * | 
|---|
| 478 | * "req" is also used here for fastopen even the "sk" here is | 
|---|
| 479 | * a fullsock "child" sk.  It is to keep the behavior | 
|---|
| 480 | * consistent between fastopen and non-fastopen on | 
|---|
| 481 | * the bpf programming side. | 
|---|
| 482 | */ | 
|---|
| 483 | sock_ops.sk = (struct sock *)req; | 
|---|
| 484 | sock_ops.syn_skb = syn_skb; | 
|---|
| 485 | } else { | 
|---|
| 486 | sock_owned_by_me(sk); | 
|---|
| 487 |  | 
|---|
| 488 | sock_ops.is_fullsock = 1; | 
|---|
| 489 | sock_ops.is_locked_tcp_sock = 1; | 
|---|
| 490 | sock_ops.sk = sk; | 
|---|
| 491 | } | 
|---|
| 492 |  | 
|---|
| 493 | sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type); | 
|---|
| 494 | sock_ops.remaining_opt_len = *remaining; | 
|---|
| 495 | /* tcp_current_mss() does not pass a skb */ | 
|---|
| 496 | if (skb) | 
|---|
| 497 | bpf_skops_init_skb(&sock_ops, skb, 0); | 
|---|
| 498 |  | 
|---|
| 499 | err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); | 
|---|
| 500 |  | 
|---|
| 501 | if (err || sock_ops.remaining_opt_len == *remaining) | 
|---|
| 502 | return; | 
|---|
| 503 |  | 
|---|
| 504 | opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; | 
|---|
| 505 | /* round up to 4 bytes */ | 
|---|
| 506 | opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; | 
|---|
| 507 |  | 
|---|
| 508 | *remaining -= opts->bpf_opt_len; | 
|---|
| 509 | } | 
|---|
| 510 |  | 
|---|
| 511 | static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, | 
|---|
| 512 | struct request_sock *req, | 
|---|
| 513 | struct sk_buff *syn_skb, | 
|---|
| 514 | enum tcp_synack_type synack_type, | 
|---|
| 515 | struct tcp_out_options *opts) | 
|---|
| 516 | { | 
|---|
| 517 | u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; | 
|---|
| 518 | struct bpf_sock_ops_kern sock_ops; | 
|---|
| 519 | int err; | 
|---|
| 520 |  | 
|---|
| 521 | if (likely(!max_opt_len)) | 
|---|
| 522 | return; | 
|---|
| 523 |  | 
|---|
| 524 | memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); | 
|---|
| 525 |  | 
|---|
| 526 | sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB; | 
|---|
| 527 |  | 
|---|
| 528 | if (req) { | 
|---|
| 529 | sock_ops.sk = (struct sock *)req; | 
|---|
| 530 | sock_ops.syn_skb = syn_skb; | 
|---|
| 531 | } else { | 
|---|
| 532 | sock_owned_by_me(sk); | 
|---|
| 533 |  | 
|---|
| 534 | sock_ops.is_fullsock = 1; | 
|---|
| 535 | sock_ops.is_locked_tcp_sock = 1; | 
|---|
| 536 | sock_ops.sk = sk; | 
|---|
| 537 | } | 
|---|
| 538 |  | 
|---|
| 539 | sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type); | 
|---|
| 540 | sock_ops.remaining_opt_len = max_opt_len; | 
|---|
| 541 | first_opt_off = tcp_hdrlen(skb) - max_opt_len; | 
|---|
| 542 | bpf_skops_init_skb(&sock_ops, skb, first_opt_off); | 
|---|
| 543 |  | 
|---|
| 544 | err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); | 
|---|
| 545 |  | 
|---|
| 546 | if (err) | 
|---|
| 547 | nr_written = 0; | 
|---|
| 548 | else | 
|---|
| 549 | nr_written = max_opt_len - sock_ops.remaining_opt_len; | 
|---|
| 550 |  | 
|---|
| 551 | if (nr_written < max_opt_len) | 
|---|
| 552 | memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, | 
|---|
| 553 | max_opt_len - nr_written); | 
|---|
| 554 | } | 
|---|
| 555 | #else | 
|---|
| 556 | static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, | 
|---|
| 557 | struct request_sock *req, | 
|---|
| 558 | struct sk_buff *syn_skb, | 
|---|
| 559 | enum tcp_synack_type synack_type, | 
|---|
| 560 | struct tcp_out_options *opts, | 
|---|
| 561 | unsigned int *remaining) | 
|---|
| 562 | { | 
|---|
| 563 | } | 
|---|
| 564 |  | 
|---|
| 565 | static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, | 
|---|
| 566 | struct request_sock *req, | 
|---|
| 567 | struct sk_buff *syn_skb, | 
|---|
| 568 | enum tcp_synack_type synack_type, | 
|---|
| 569 | struct tcp_out_options *opts) | 
|---|
| 570 | { | 
|---|
| 571 | } | 
|---|
| 572 | #endif | 
|---|
| 573 |  | 
|---|
| 574 | static __be32 *process_tcp_ao_options(struct tcp_sock *tp, | 
|---|
| 575 | const struct tcp_request_sock *tcprsk, | 
|---|
| 576 | struct tcp_out_options *opts, | 
|---|
| 577 | struct tcp_key *key, __be32 *ptr) | 
|---|
| 578 | { | 
|---|
| 579 | #ifdef CONFIG_TCP_AO | 
|---|
| 580 | u8 maclen = tcp_ao_maclen(key->ao_key); | 
|---|
| 581 |  | 
|---|
| 582 | if (tcprsk) { | 
|---|
| 583 | u8 aolen = maclen + sizeof(struct tcp_ao_hdr); | 
|---|
| 584 |  | 
|---|
| 585 | *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) | | 
|---|
| 586 | (tcprsk->ao_keyid << 8) | | 
|---|
| 587 | (tcprsk->ao_rcv_next)); | 
|---|
| 588 | } else { | 
|---|
| 589 | struct tcp_ao_key *rnext_key; | 
|---|
| 590 | struct tcp_ao_info *ao_info; | 
|---|
| 591 |  | 
|---|
| 592 | ao_info = rcu_dereference_check(tp->ao_info, | 
|---|
| 593 | lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); | 
|---|
| 594 | rnext_key = READ_ONCE(ao_info->rnext_key); | 
|---|
| 595 | if (WARN_ON_ONCE(!rnext_key)) | 
|---|
| 596 | return ptr; | 
|---|
| 597 | *ptr++ = htonl((TCPOPT_AO << 24) | | 
|---|
| 598 | (tcp_ao_len(key->ao_key) << 16) | | 
|---|
| 599 | (key->ao_key->sndid << 8) | | 
|---|
| 600 | (rnext_key->rcvid)); | 
|---|
| 601 | } | 
|---|
| 602 | opts->hash_location = (__u8 *)ptr; | 
|---|
| 603 | ptr += maclen / sizeof(*ptr); | 
|---|
| 604 | if (unlikely(maclen % sizeof(*ptr))) { | 
|---|
| 605 | memset(ptr, TCPOPT_NOP, sizeof(*ptr)); | 
|---|
| 606 | ptr++; | 
|---|
| 607 | } | 
|---|
| 608 | #endif | 
|---|
| 609 | return ptr; | 
|---|
| 610 | } | 
|---|
| 611 |  | 
|---|
| 612 | /* Initial values for AccECN option, ordered is based on ECN field bits | 
|---|
| 613 | * similar to received_ecn_bytes. Used for SYN/ACK AccECN option. | 
|---|
| 614 | */ | 
|---|
| 615 | static const u32 synack_ecn_bytes[3] = { 0, 0, 0 }; | 
|---|
| 616 |  | 
|---|
| 617 | /* Write previously computed TCP options to the packet. | 
|---|
| 618 | * | 
|---|
| 619 | * Beware: Something in the Internet is very sensitive to the ordering of | 
|---|
| 620 | * TCP options, we learned this through the hard way, so be careful here. | 
|---|
| 621 | * Luckily we can at least blame others for their non-compliance but from | 
|---|
| 622 | * inter-operability perspective it seems that we're somewhat stuck with | 
|---|
| 623 | * the ordering which we have been using if we want to keep working with | 
|---|
| 624 | * those broken things (not that it currently hurts anybody as there isn't | 
|---|
| 625 | * particular reason why the ordering would need to be changed). | 
|---|
| 626 | * | 
|---|
| 627 | * At least SACK_PERM as the first option is known to lead to a disaster | 
|---|
| 628 | * (but it may well be that other scenarios fail similarly). | 
|---|
| 629 | */ | 
|---|
| 630 | static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp, | 
|---|
| 631 | const struct tcp_request_sock *tcprsk, | 
|---|
| 632 | struct tcp_out_options *opts, | 
|---|
| 633 | struct tcp_key *key) | 
|---|
| 634 | { | 
|---|
| 635 | u8 leftover_highbyte = TCPOPT_NOP; /* replace 1st NOP if avail */ | 
|---|
| 636 | u8 leftover_lowbyte = TCPOPT_NOP;  /* replace 2nd NOP in succession */ | 
|---|
| 637 | __be32 *ptr = (__be32 *)(th + 1); | 
|---|
| 638 | u16 options = opts->options;	/* mungable copy */ | 
|---|
| 639 |  | 
|---|
| 640 | if (tcp_key_is_md5(key)) { | 
|---|
| 641 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 
|---|
| 642 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); | 
|---|
| 643 | /* overload cookie hash location */ | 
|---|
| 644 | opts->hash_location = (__u8 *)ptr; | 
|---|
| 645 | ptr += 4; | 
|---|
| 646 | } else if (tcp_key_is_ao(key)) { | 
|---|
| 647 | ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr); | 
|---|
| 648 | } | 
|---|
| 649 | if (unlikely(opts->mss)) { | 
|---|
| 650 | *ptr++ = htonl((TCPOPT_MSS << 24) | | 
|---|
| 651 | (TCPOLEN_MSS << 16) | | 
|---|
| 652 | opts->mss); | 
|---|
| 653 | } | 
|---|
| 654 |  | 
|---|
| 655 | if (likely(OPTION_TS & options)) { | 
|---|
| 656 | if (unlikely(OPTION_SACK_ADVERTISE & options)) { | 
|---|
| 657 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | | 
|---|
| 658 | (TCPOLEN_SACK_PERM << 16) | | 
|---|
| 659 | (TCPOPT_TIMESTAMP << 8) | | 
|---|
| 660 | TCPOLEN_TIMESTAMP); | 
|---|
| 661 | options &= ~OPTION_SACK_ADVERTISE; | 
|---|
| 662 | } else { | 
|---|
| 663 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 
|---|
| 664 | (TCPOPT_NOP << 16) | | 
|---|
| 665 | (TCPOPT_TIMESTAMP << 8) | | 
|---|
| 666 | TCPOLEN_TIMESTAMP); | 
|---|
| 667 | } | 
|---|
| 668 | *ptr++ = htonl(opts->tsval); | 
|---|
| 669 | *ptr++ = htonl(opts->tsecr); | 
|---|
| 670 | } | 
|---|
| 671 |  | 
|---|
| 672 | if (OPTION_ACCECN & options) { | 
|---|
| 673 | const u32 *ecn_bytes = opts->use_synack_ecn_bytes ? | 
|---|
| 674 | synack_ecn_bytes : | 
|---|
| 675 | tp->received_ecn_bytes; | 
|---|
| 676 | const u8 ect0_idx = INET_ECN_ECT_0 - 1; | 
|---|
| 677 | const u8 ect1_idx = INET_ECN_ECT_1 - 1; | 
|---|
| 678 | const u8 ce_idx = INET_ECN_CE - 1; | 
|---|
| 679 | u32 e0b; | 
|---|
| 680 | u32 e1b; | 
|---|
| 681 | u32 ceb; | 
|---|
| 682 | u8 len; | 
|---|
| 683 |  | 
|---|
| 684 | e0b = ecn_bytes[ect0_idx] + TCP_ACCECN_E0B_INIT_OFFSET; | 
|---|
| 685 | e1b = ecn_bytes[ect1_idx] + TCP_ACCECN_E1B_INIT_OFFSET; | 
|---|
| 686 | ceb = ecn_bytes[ce_idx] + TCP_ACCECN_CEB_INIT_OFFSET; | 
|---|
| 687 | len = TCPOLEN_ACCECN_BASE + | 
|---|
| 688 | opts->num_accecn_fields * TCPOLEN_ACCECN_PERFIELD; | 
|---|
| 689 |  | 
|---|
| 690 | if (opts->num_accecn_fields == 2) { | 
|---|
| 691 | *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | | 
|---|
| 692 | ((e1b >> 8) & 0xffff)); | 
|---|
| 693 | *ptr++ = htonl(((e1b & 0xff) << 24) | | 
|---|
| 694 | (ceb & 0xffffff)); | 
|---|
| 695 | } else if (opts->num_accecn_fields == 1) { | 
|---|
| 696 | *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | | 
|---|
| 697 | ((e1b >> 8) & 0xffff)); | 
|---|
| 698 | leftover_highbyte = e1b & 0xff; | 
|---|
| 699 | leftover_lowbyte = TCPOPT_NOP; | 
|---|
| 700 | } else if (opts->num_accecn_fields == 0) { | 
|---|
| 701 | leftover_highbyte = TCPOPT_ACCECN1; | 
|---|
| 702 | leftover_lowbyte = len; | 
|---|
| 703 | } else if (opts->num_accecn_fields == 3) { | 
|---|
| 704 | *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | | 
|---|
| 705 | ((e1b >> 8) & 0xffff)); | 
|---|
| 706 | *ptr++ = htonl(((e1b & 0xff) << 24) | | 
|---|
| 707 | (ceb & 0xffffff)); | 
|---|
| 708 | *ptr++ = htonl(((e0b & 0xffffff) << 8) | | 
|---|
| 709 | TCPOPT_NOP); | 
|---|
| 710 | } | 
|---|
| 711 | if (tp) { | 
|---|
| 712 | tp->accecn_minlen = 0; | 
|---|
| 713 | tp->accecn_opt_tstamp = tp->tcp_mstamp; | 
|---|
| 714 | if (tp->accecn_opt_demand) | 
|---|
| 715 | tp->accecn_opt_demand--; | 
|---|
| 716 | } | 
|---|
| 717 | } | 
|---|
| 718 |  | 
|---|
| 719 | if (unlikely(OPTION_SACK_ADVERTISE & options)) { | 
|---|
| 720 | *ptr++ = htonl((leftover_highbyte << 24) | | 
|---|
| 721 | (leftover_lowbyte << 16) | | 
|---|
| 722 | (TCPOPT_SACK_PERM << 8) | | 
|---|
| 723 | TCPOLEN_SACK_PERM); | 
|---|
| 724 | leftover_highbyte = TCPOPT_NOP; | 
|---|
| 725 | leftover_lowbyte = TCPOPT_NOP; | 
|---|
| 726 | } | 
|---|
| 727 |  | 
|---|
| 728 | if (unlikely(OPTION_WSCALE & options)) { | 
|---|
| 729 | u8 highbyte = TCPOPT_NOP; | 
|---|
| 730 |  | 
|---|
| 731 | /* Do not split the leftover 2-byte to fit into a single | 
|---|
| 732 | * NOP, i.e., replace this NOP only when 1 byte is leftover | 
|---|
| 733 | * within leftover_highbyte. | 
|---|
| 734 | */ | 
|---|
| 735 | if (unlikely(leftover_highbyte != TCPOPT_NOP && | 
|---|
| 736 | leftover_lowbyte == TCPOPT_NOP)) { | 
|---|
| 737 | highbyte = leftover_highbyte; | 
|---|
| 738 | leftover_highbyte = TCPOPT_NOP; | 
|---|
| 739 | } | 
|---|
| 740 | *ptr++ = htonl((highbyte << 24) | | 
|---|
| 741 | (TCPOPT_WINDOW << 16) | | 
|---|
| 742 | (TCPOLEN_WINDOW << 8) | | 
|---|
| 743 | opts->ws); | 
|---|
| 744 | } | 
|---|
| 745 |  | 
|---|
| 746 | if (unlikely(opts->num_sack_blocks)) { | 
|---|
| 747 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? | 
|---|
| 748 | tp->duplicate_sack : tp->selective_acks; | 
|---|
| 749 | int this_sack; | 
|---|
| 750 |  | 
|---|
| 751 | *ptr++ = htonl((leftover_highbyte << 24) | | 
|---|
| 752 | (leftover_lowbyte << 16) | | 
|---|
| 753 | (TCPOPT_SACK <<  8) | | 
|---|
| 754 | (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * | 
|---|
| 755 | TCPOLEN_SACK_PERBLOCK))); | 
|---|
| 756 | leftover_highbyte = TCPOPT_NOP; | 
|---|
| 757 | leftover_lowbyte = TCPOPT_NOP; | 
|---|
| 758 |  | 
|---|
| 759 | for (this_sack = 0; this_sack < opts->num_sack_blocks; | 
|---|
| 760 | ++this_sack) { | 
|---|
| 761 | *ptr++ = htonl(sp[this_sack].start_seq); | 
|---|
| 762 | *ptr++ = htonl(sp[this_sack].end_seq); | 
|---|
| 763 | } | 
|---|
| 764 |  | 
|---|
| 765 | tp->rx_opt.dsack = 0; | 
|---|
| 766 | } else if (unlikely(leftover_highbyte != TCPOPT_NOP || | 
|---|
| 767 | leftover_lowbyte != TCPOPT_NOP)) { | 
|---|
| 768 | *ptr++ = htonl((leftover_highbyte << 24) | | 
|---|
| 769 | (leftover_lowbyte << 16) | | 
|---|
| 770 | (TCPOPT_NOP << 8) | | 
|---|
| 771 | TCPOPT_NOP); | 
|---|
| 772 | leftover_highbyte = TCPOPT_NOP; | 
|---|
| 773 | leftover_lowbyte = TCPOPT_NOP; | 
|---|
| 774 | } | 
|---|
| 775 |  | 
|---|
| 776 | if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { | 
|---|
| 777 | struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; | 
|---|
| 778 | u8 *p = (u8 *)ptr; | 
|---|
| 779 | u32 len; /* Fast Open option length */ | 
|---|
| 780 |  | 
|---|
| 781 | if (foc->exp) { | 
|---|
| 782 | len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; | 
|---|
| 783 | *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | | 
|---|
| 784 | TCPOPT_FASTOPEN_MAGIC); | 
|---|
| 785 | p += TCPOLEN_EXP_FASTOPEN_BASE; | 
|---|
| 786 | } else { | 
|---|
| 787 | len = TCPOLEN_FASTOPEN_BASE + foc->len; | 
|---|
| 788 | *p++ = TCPOPT_FASTOPEN; | 
|---|
| 789 | *p++ = len; | 
|---|
| 790 | } | 
|---|
| 791 |  | 
|---|
| 792 | memcpy(to: p, from: foc->val, len: foc->len); | 
|---|
| 793 | if ((len & 3) == 2) { | 
|---|
| 794 | p[foc->len] = TCPOPT_NOP; | 
|---|
| 795 | p[foc->len + 1] = TCPOPT_NOP; | 
|---|
| 796 | } | 
|---|
| 797 | ptr += (len + 3) >> 2; | 
|---|
| 798 | } | 
|---|
| 799 |  | 
|---|
| 800 | smc_options_write(ptr, options: &options); | 
|---|
| 801 |  | 
|---|
| 802 | mptcp_options_write(th, ptr, tp, opts); | 
|---|
| 803 | } | 
|---|
| 804 |  | 
|---|
| 805 | static void smc_set_option(const struct tcp_sock *tp, | 
|---|
| 806 | struct tcp_out_options *opts, | 
|---|
| 807 | unsigned int *remaining) | 
|---|
| 808 | { | 
|---|
| 809 | #if IS_ENABLED(CONFIG_SMC) | 
|---|
| 810 | if (static_branch_unlikely(&tcp_have_smc)) { | 
|---|
| 811 | if (tp->syn_smc) { | 
|---|
| 812 | if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { | 
|---|
| 813 | opts->options |= OPTION_SMC; | 
|---|
| 814 | *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; | 
|---|
| 815 | } | 
|---|
| 816 | } | 
|---|
| 817 | } | 
|---|
| 818 | #endif | 
|---|
| 819 | } | 
|---|
| 820 |  | 
|---|
| 821 | static void smc_set_option_cond(const struct tcp_sock *tp, | 
|---|
| 822 | const struct inet_request_sock *ireq, | 
|---|
| 823 | struct tcp_out_options *opts, | 
|---|
| 824 | unsigned int *remaining) | 
|---|
| 825 | { | 
|---|
| 826 | #if IS_ENABLED(CONFIG_SMC) | 
|---|
| 827 | if (static_branch_unlikely(&tcp_have_smc)) { | 
|---|
| 828 | if (tp->syn_smc && ireq->smc_ok) { | 
|---|
| 829 | if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { | 
|---|
| 830 | opts->options |= OPTION_SMC; | 
|---|
| 831 | *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; | 
|---|
| 832 | } | 
|---|
| 833 | } | 
|---|
| 834 | } | 
|---|
| 835 | #endif | 
|---|
| 836 | } | 
|---|
| 837 |  | 
|---|
| 838 | static void mptcp_set_option_cond(const struct request_sock *req, | 
|---|
| 839 | struct tcp_out_options *opts, | 
|---|
| 840 | unsigned int *remaining) | 
|---|
| 841 | { | 
|---|
| 842 | if (rsk_is_mptcp(req)) { | 
|---|
| 843 | unsigned int size; | 
|---|
| 844 |  | 
|---|
| 845 | if (mptcp_synack_options(req, size: &size, opts: &opts->mptcp)) { | 
|---|
| 846 | if (*remaining >= size) { | 
|---|
| 847 | opts->options |= OPTION_MPTCP; | 
|---|
| 848 | *remaining -= size; | 
|---|
| 849 | } | 
|---|
| 850 | } | 
|---|
| 851 | } | 
|---|
| 852 | } | 
|---|
| 853 |  | 
|---|
| 854 | static u32 tcp_synack_options_combine_saving(struct tcp_out_options *opts) | 
|---|
| 855 | { | 
|---|
| 856 | /* How much there's room for combining with the alignment padding? */ | 
|---|
| 857 | if ((opts->options & (OPTION_SACK_ADVERTISE | OPTION_TS)) == | 
|---|
| 858 | OPTION_SACK_ADVERTISE) | 
|---|
| 859 | return 2; | 
|---|
| 860 | else if (opts->options & OPTION_WSCALE) | 
|---|
| 861 | return 1; | 
|---|
| 862 | return 0; | 
|---|
| 863 | } | 
|---|
| 864 |  | 
|---|
| 865 | /* Calculates how long AccECN option will fit to @remaining option space. | 
|---|
| 866 | * | 
|---|
| 867 | * AccECN option can sometimes replace NOPs used for alignment of other | 
|---|
| 868 | * TCP options (up to @max_combine_saving available). | 
|---|
| 869 | * | 
|---|
| 870 | * Only solutions with at least @required AccECN fields are accepted. | 
|---|
| 871 | * | 
|---|
| 872 | * Returns: The size of the AccECN option excluding space repurposed from | 
|---|
| 873 | * the alignment of the other options. | 
|---|
| 874 | */ | 
|---|
| 875 | static int tcp_options_fit_accecn(struct tcp_out_options *opts, int required, | 
|---|
| 876 | int remaining) | 
|---|
| 877 | { | 
|---|
| 878 | int size = TCP_ACCECN_MAXSIZE; | 
|---|
| 879 | int sack_blocks_reduce = 0; | 
|---|
| 880 | int max_combine_saving; | 
|---|
| 881 | int rem = remaining; | 
|---|
| 882 | int align_size; | 
|---|
| 883 |  | 
|---|
| 884 | if (opts->use_synack_ecn_bytes) | 
|---|
| 885 | max_combine_saving = tcp_synack_options_combine_saving(opts); | 
|---|
| 886 | else | 
|---|
| 887 | max_combine_saving = opts->num_sack_blocks > 0 ? 2 : 0; | 
|---|
| 888 | opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS; | 
|---|
| 889 | while (opts->num_accecn_fields >= required) { | 
|---|
| 890 | /* Pad to dword if cannot combine */ | 
|---|
| 891 | if ((size & 0x3) > max_combine_saving) | 
|---|
| 892 | align_size = ALIGN(size, 4); | 
|---|
| 893 | else | 
|---|
| 894 | align_size = ALIGN_DOWN(size, 4); | 
|---|
| 895 |  | 
|---|
| 896 | if (rem >= align_size) { | 
|---|
| 897 | size = align_size; | 
|---|
| 898 | break; | 
|---|
| 899 | } else if (opts->num_accecn_fields == required && | 
|---|
| 900 | opts->num_sack_blocks > 2 && | 
|---|
| 901 | required > 0) { | 
|---|
| 902 | /* Try to fit the option by removing one SACK block */ | 
|---|
| 903 | opts->num_sack_blocks--; | 
|---|
| 904 | sack_blocks_reduce++; | 
|---|
| 905 | rem = rem + TCPOLEN_SACK_PERBLOCK; | 
|---|
| 906 |  | 
|---|
| 907 | opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS; | 
|---|
| 908 | size = TCP_ACCECN_MAXSIZE; | 
|---|
| 909 | continue; | 
|---|
| 910 | } | 
|---|
| 911 |  | 
|---|
| 912 | opts->num_accecn_fields--; | 
|---|
| 913 | size -= TCPOLEN_ACCECN_PERFIELD; | 
|---|
| 914 | } | 
|---|
| 915 | if (sack_blocks_reduce > 0) { | 
|---|
| 916 | if (opts->num_accecn_fields >= required) | 
|---|
| 917 | size -= sack_blocks_reduce * TCPOLEN_SACK_PERBLOCK; | 
|---|
| 918 | else | 
|---|
| 919 | opts->num_sack_blocks += sack_blocks_reduce; | 
|---|
| 920 | } | 
|---|
| 921 | if (opts->num_accecn_fields < required) | 
|---|
| 922 | return 0; | 
|---|
| 923 |  | 
|---|
| 924 | opts->options |= OPTION_ACCECN; | 
|---|
| 925 | return size; | 
|---|
| 926 | } | 
|---|
| 927 |  | 
|---|
| 928 | /* Compute TCP options for SYN packets. This is not the final | 
|---|
| 929 | * network wire format yet. | 
|---|
| 930 | */ | 
|---|
| 931 | static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | 
|---|
| 932 | struct tcp_out_options *opts, | 
|---|
| 933 | struct tcp_key *key) | 
|---|
| 934 | { | 
|---|
| 935 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 936 | unsigned int remaining = MAX_TCP_OPTION_SPACE; | 
|---|
| 937 | struct tcp_fastopen_request *fastopen = tp->fastopen_req; | 
|---|
| 938 | bool timestamps; | 
|---|
| 939 |  | 
|---|
| 940 | /* Better than switch (key.type) as it has static branches */ | 
|---|
| 941 | if (tcp_key_is_md5(key)) { | 
|---|
| 942 | timestamps = false; | 
|---|
| 943 | opts->options |= OPTION_MD5; | 
|---|
| 944 | remaining -= TCPOLEN_MD5SIG_ALIGNED; | 
|---|
| 945 | } else { | 
|---|
| 946 | timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps); | 
|---|
| 947 | if (tcp_key_is_ao(key)) { | 
|---|
| 948 | opts->options |= OPTION_AO; | 
|---|
| 949 | remaining -= tcp_ao_len_aligned(key: key->ao_key); | 
|---|
| 950 | } | 
|---|
| 951 | } | 
|---|
| 952 |  | 
|---|
| 953 | /* We always get an MSS option.  The option bytes which will be seen in | 
|---|
| 954 | * normal data packets should timestamps be used, must be in the MSS | 
|---|
| 955 | * advertised.  But we subtract them from tp->mss_cache so that | 
|---|
| 956 | * calculations in tcp_sendmsg are simpler etc.  So account for this | 
|---|
| 957 | * fact here if necessary.  If we don't do this correctly, as a | 
|---|
| 958 | * receiver we won't recognize data packets as being full sized when we | 
|---|
| 959 | * should, and thus we won't abide by the delayed ACK rules correctly. | 
|---|
| 960 | * SACKs don't matter, we never delay an ACK when we have any of those | 
|---|
| 961 | * going out.  */ | 
|---|
| 962 | opts->mss = tcp_advertise_mss(sk); | 
|---|
| 963 | remaining -= TCPOLEN_MSS_ALIGNED; | 
|---|
| 964 |  | 
|---|
| 965 | if (likely(timestamps)) { | 
|---|
| 966 | opts->options |= OPTION_TS; | 
|---|
| 967 | opts->tsval = tcp_skb_timestamp_ts(usec_ts: tp->tcp_usec_ts, skb) + tp->tsoffset; | 
|---|
| 968 | opts->tsecr = tp->rx_opt.ts_recent; | 
|---|
| 969 | remaining -= TCPOLEN_TSTAMP_ALIGNED; | 
|---|
| 970 | } | 
|---|
| 971 | if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { | 
|---|
| 972 | opts->ws = tp->rx_opt.rcv_wscale; | 
|---|
| 973 | opts->options |= OPTION_WSCALE; | 
|---|
| 974 | remaining -= TCPOLEN_WSCALE_ALIGNED; | 
|---|
| 975 | } | 
|---|
| 976 | if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { | 
|---|
| 977 | opts->options |= OPTION_SACK_ADVERTISE; | 
|---|
| 978 | if (unlikely(!(OPTION_TS & opts->options))) | 
|---|
| 979 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 
|---|
| 980 | } | 
|---|
| 981 |  | 
|---|
| 982 | if (fastopen && fastopen->cookie.len >= 0) { | 
|---|
| 983 | u32 need = fastopen->cookie.len; | 
|---|
| 984 |  | 
|---|
| 985 | need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : | 
|---|
| 986 | TCPOLEN_FASTOPEN_BASE; | 
|---|
| 987 | need = (need + 3) & ~3U;  /* Align to 32 bits */ | 
|---|
| 988 | if (remaining >= need) { | 
|---|
| 989 | opts->options |= OPTION_FAST_OPEN_COOKIE; | 
|---|
| 990 | opts->fastopen_cookie = &fastopen->cookie; | 
|---|
| 991 | remaining -= need; | 
|---|
| 992 | tp->syn_fastopen = 1; | 
|---|
| 993 | tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; | 
|---|
| 994 | } | 
|---|
| 995 | } | 
|---|
| 996 |  | 
|---|
| 997 | smc_set_option(tp, opts, remaining: &remaining); | 
|---|
| 998 |  | 
|---|
| 999 | if (sk_is_mptcp(sk)) { | 
|---|
| 1000 | unsigned int size; | 
|---|
| 1001 |  | 
|---|
| 1002 | if (mptcp_syn_options(sk, skb, size: &size, opts: &opts->mptcp)) { | 
|---|
| 1003 | if (remaining >= size) { | 
|---|
| 1004 | opts->options |= OPTION_MPTCP; | 
|---|
| 1005 | remaining -= size; | 
|---|
| 1006 | } | 
|---|
| 1007 | } | 
|---|
| 1008 | } | 
|---|
| 1009 |  | 
|---|
| 1010 | /* Simultaneous open SYN/ACK needs AccECN option but not SYN. | 
|---|
| 1011 | * It is attempted to negotiate the use of AccECN also on the first | 
|---|
| 1012 | * retransmitted SYN, as mentioned in "3.1.4.1. Retransmitted SYNs" | 
|---|
| 1013 | * of AccECN draft. | 
|---|
| 1014 | */ | 
|---|
| 1015 | if (unlikely((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) && | 
|---|
| 1016 | tcp_ecn_mode_accecn(tp) && | 
|---|
| 1017 | inet_csk(sk)->icsk_retransmits < 2 && | 
|---|
| 1018 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) && | 
|---|
| 1019 | remaining >= TCPOLEN_ACCECN_BASE)) { | 
|---|
| 1020 | opts->use_synack_ecn_bytes = 1; | 
|---|
| 1021 | remaining -= tcp_options_fit_accecn(opts, required: 0, remaining); | 
|---|
| 1022 | } | 
|---|
| 1023 |  | 
|---|
| 1024 | bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, synack_type: 0, opts, remaining: &remaining); | 
|---|
| 1025 |  | 
|---|
| 1026 | return MAX_TCP_OPTION_SPACE - remaining; | 
|---|
| 1027 | } | 
|---|
| 1028 |  | 
|---|
| 1029 | /* Set up TCP options for SYN-ACKs. */ | 
|---|
| 1030 | static unsigned int tcp_synack_options(const struct sock *sk, | 
|---|
| 1031 | struct request_sock *req, | 
|---|
| 1032 | unsigned int mss, struct sk_buff *skb, | 
|---|
| 1033 | struct tcp_out_options *opts, | 
|---|
| 1034 | const struct tcp_key *key, | 
|---|
| 1035 | struct tcp_fastopen_cookie *foc, | 
|---|
| 1036 | enum tcp_synack_type synack_type, | 
|---|
| 1037 | struct sk_buff *syn_skb) | 
|---|
| 1038 | { | 
|---|
| 1039 | struct inet_request_sock *ireq = inet_rsk(sk: req); | 
|---|
| 1040 | unsigned int remaining = MAX_TCP_OPTION_SPACE; | 
|---|
| 1041 | struct tcp_request_sock *treq = tcp_rsk(req); | 
|---|
| 1042 |  | 
|---|
| 1043 | if (tcp_key_is_md5(key)) { | 
|---|
| 1044 | opts->options |= OPTION_MD5; | 
|---|
| 1045 | remaining -= TCPOLEN_MD5SIG_ALIGNED; | 
|---|
| 1046 |  | 
|---|
| 1047 | /* We can't fit any SACK blocks in a packet with MD5 + TS | 
|---|
| 1048 | * options. There was discussion about disabling SACK | 
|---|
| 1049 | * rather than TS in order to fit in better with old, | 
|---|
| 1050 | * buggy kernels, but that was deemed to be unnecessary. | 
|---|
| 1051 | */ | 
|---|
| 1052 | if (synack_type != TCP_SYNACK_COOKIE) | 
|---|
| 1053 | ireq->tstamp_ok &= !ireq->sack_ok; | 
|---|
| 1054 | } else if (tcp_key_is_ao(key)) { | 
|---|
| 1055 | opts->options |= OPTION_AO; | 
|---|
| 1056 | remaining -= tcp_ao_len_aligned(key: key->ao_key); | 
|---|
| 1057 | ireq->tstamp_ok &= !ireq->sack_ok; | 
|---|
| 1058 | } | 
|---|
| 1059 |  | 
|---|
| 1060 | /* We always send an MSS option. */ | 
|---|
| 1061 | opts->mss = mss; | 
|---|
| 1062 | remaining -= TCPOLEN_MSS_ALIGNED; | 
|---|
| 1063 |  | 
|---|
| 1064 | if (likely(ireq->wscale_ok)) { | 
|---|
| 1065 | opts->ws = ireq->rcv_wscale; | 
|---|
| 1066 | opts->options |= OPTION_WSCALE; | 
|---|
| 1067 | remaining -= TCPOLEN_WSCALE_ALIGNED; | 
|---|
| 1068 | } | 
|---|
| 1069 | if (likely(ireq->tstamp_ok)) { | 
|---|
| 1070 | opts->options |= OPTION_TS; | 
|---|
| 1071 | opts->tsval = tcp_skb_timestamp_ts(usec_ts: tcp_rsk(req)->req_usec_ts, skb) + | 
|---|
| 1072 | tcp_rsk(req)->ts_off; | 
|---|
| 1073 | if (!tcp_rsk(req)->snt_tsval_first) { | 
|---|
| 1074 | if (!opts->tsval) | 
|---|
| 1075 | opts->tsval = ~0U; | 
|---|
| 1076 | tcp_rsk(req)->snt_tsval_first = opts->tsval; | 
|---|
| 1077 | } | 
|---|
| 1078 | WRITE_ONCE(tcp_rsk(req)->snt_tsval_last, opts->tsval); | 
|---|
| 1079 | opts->tsecr = req->ts_recent; | 
|---|
| 1080 | remaining -= TCPOLEN_TSTAMP_ALIGNED; | 
|---|
| 1081 | } | 
|---|
| 1082 | if (likely(ireq->sack_ok)) { | 
|---|
| 1083 | opts->options |= OPTION_SACK_ADVERTISE; | 
|---|
| 1084 | if (unlikely(!ireq->tstamp_ok)) | 
|---|
| 1085 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 
|---|
| 1086 | } | 
|---|
| 1087 | if (foc != NULL && foc->len >= 0) { | 
|---|
| 1088 | u32 need = foc->len; | 
|---|
| 1089 |  | 
|---|
| 1090 | need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : | 
|---|
| 1091 | TCPOLEN_FASTOPEN_BASE; | 
|---|
| 1092 | need = (need + 3) & ~3U;  /* Align to 32 bits */ | 
|---|
| 1093 | if (remaining >= need) { | 
|---|
| 1094 | opts->options |= OPTION_FAST_OPEN_COOKIE; | 
|---|
| 1095 | opts->fastopen_cookie = foc; | 
|---|
| 1096 | remaining -= need; | 
|---|
| 1097 | } | 
|---|
| 1098 | } | 
|---|
| 1099 |  | 
|---|
| 1100 | mptcp_set_option_cond(req, opts, remaining: &remaining); | 
|---|
| 1101 |  | 
|---|
| 1102 | smc_set_option_cond(tcp_sk(sk), ireq, opts, remaining: &remaining); | 
|---|
| 1103 |  | 
|---|
| 1104 | if (treq->accecn_ok && | 
|---|
| 1105 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) && | 
|---|
| 1106 | req->num_timeout < 1 && remaining >= TCPOLEN_ACCECN_BASE) { | 
|---|
| 1107 | opts->use_synack_ecn_bytes = 1; | 
|---|
| 1108 | remaining -= tcp_options_fit_accecn(opts, required: 0, remaining); | 
|---|
| 1109 | } | 
|---|
| 1110 |  | 
|---|
| 1111 | bpf_skops_hdr_opt_len(sk: (struct sock *)sk, skb, req, syn_skb, | 
|---|
| 1112 | synack_type, opts, remaining: &remaining); | 
|---|
| 1113 |  | 
|---|
| 1114 | return MAX_TCP_OPTION_SPACE - remaining; | 
|---|
| 1115 | } | 
|---|
| 1116 |  | 
|---|
| 1117 | /* Compute TCP options for ESTABLISHED sockets. This is not the | 
|---|
| 1118 | * final wire format yet. | 
|---|
| 1119 | */ | 
|---|
| 1120 | static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, | 
|---|
| 1121 | struct tcp_out_options *opts, | 
|---|
| 1122 | struct tcp_key *key) | 
|---|
| 1123 | { | 
|---|
| 1124 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1125 | unsigned int size = 0; | 
|---|
| 1126 | unsigned int eff_sacks; | 
|---|
| 1127 |  | 
|---|
| 1128 | opts->options = 0; | 
|---|
| 1129 |  | 
|---|
| 1130 | /* Better than switch (key.type) as it has static branches */ | 
|---|
| 1131 | if (tcp_key_is_md5(key)) { | 
|---|
| 1132 | opts->options |= OPTION_MD5; | 
|---|
| 1133 | size += TCPOLEN_MD5SIG_ALIGNED; | 
|---|
| 1134 | } else if (tcp_key_is_ao(key)) { | 
|---|
| 1135 | opts->options |= OPTION_AO; | 
|---|
| 1136 | size += tcp_ao_len_aligned(key: key->ao_key); | 
|---|
| 1137 | } | 
|---|
| 1138 |  | 
|---|
| 1139 | if (likely(tp->rx_opt.tstamp_ok)) { | 
|---|
| 1140 | opts->options |= OPTION_TS; | 
|---|
| 1141 | opts->tsval = skb ? tcp_skb_timestamp_ts(usec_ts: tp->tcp_usec_ts, skb) + | 
|---|
| 1142 | tp->tsoffset : 0; | 
|---|
| 1143 | opts->tsecr = tp->rx_opt.ts_recent; | 
|---|
| 1144 | size += TCPOLEN_TSTAMP_ALIGNED; | 
|---|
| 1145 | } | 
|---|
| 1146 |  | 
|---|
| 1147 | /* MPTCP options have precedence over SACK for the limited TCP | 
|---|
| 1148 | * option space because a MPTCP connection would be forced to | 
|---|
| 1149 | * fall back to regular TCP if a required multipath option is | 
|---|
| 1150 | * missing. SACK still gets a chance to use whatever space is | 
|---|
| 1151 | * left. | 
|---|
| 1152 | */ | 
|---|
| 1153 | if (sk_is_mptcp(sk)) { | 
|---|
| 1154 | unsigned int remaining = MAX_TCP_OPTION_SPACE - size; | 
|---|
| 1155 | unsigned int opt_size = 0; | 
|---|
| 1156 |  | 
|---|
| 1157 | if (mptcp_established_options(sk, skb, size: &opt_size, remaining, | 
|---|
| 1158 | opts: &opts->mptcp)) { | 
|---|
| 1159 | opts->options |= OPTION_MPTCP; | 
|---|
| 1160 | size += opt_size; | 
|---|
| 1161 | } | 
|---|
| 1162 | } | 
|---|
| 1163 |  | 
|---|
| 1164 | eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; | 
|---|
| 1165 | if (unlikely(eff_sacks)) { | 
|---|
| 1166 | const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; | 
|---|
| 1167 | if (likely(remaining >= TCPOLEN_SACK_BASE_ALIGNED + | 
|---|
| 1168 | TCPOLEN_SACK_PERBLOCK)) { | 
|---|
| 1169 | opts->num_sack_blocks = | 
|---|
| 1170 | min_t(unsigned int, eff_sacks, | 
|---|
| 1171 | (remaining - TCPOLEN_SACK_BASE_ALIGNED) / | 
|---|
| 1172 | TCPOLEN_SACK_PERBLOCK); | 
|---|
| 1173 |  | 
|---|
| 1174 | size += TCPOLEN_SACK_BASE_ALIGNED + | 
|---|
| 1175 | opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; | 
|---|
| 1176 | } else { | 
|---|
| 1177 | opts->num_sack_blocks = 0; | 
|---|
| 1178 | } | 
|---|
| 1179 | } else { | 
|---|
| 1180 | opts->num_sack_blocks = 0; | 
|---|
| 1181 | } | 
|---|
| 1182 |  | 
|---|
| 1183 | if (tcp_ecn_mode_accecn(tp)) { | 
|---|
| 1184 | int ecn_opt = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option); | 
|---|
| 1185 |  | 
|---|
| 1186 | if (ecn_opt && tp->saw_accecn_opt && !tcp_accecn_opt_fail_send(tp) && | 
|---|
| 1187 | (ecn_opt >= TCP_ACCECN_OPTION_FULL || tp->accecn_opt_demand || | 
|---|
| 1188 | tcp_accecn_option_beacon_check(sk))) { | 
|---|
| 1189 | opts->use_synack_ecn_bytes = 0; | 
|---|
| 1190 | size += tcp_options_fit_accecn(opts, required: tp->accecn_minlen, | 
|---|
| 1191 | MAX_TCP_OPTION_SPACE - size); | 
|---|
| 1192 | } | 
|---|
| 1193 | } | 
|---|
| 1194 |  | 
|---|
| 1195 | if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp, | 
|---|
| 1196 | BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) { | 
|---|
| 1197 | unsigned int remaining = MAX_TCP_OPTION_SPACE - size; | 
|---|
| 1198 |  | 
|---|
| 1199 | bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, synack_type: 0, opts, remaining: &remaining); | 
|---|
| 1200 |  | 
|---|
| 1201 | size = MAX_TCP_OPTION_SPACE - remaining; | 
|---|
| 1202 | } | 
|---|
| 1203 |  | 
|---|
| 1204 | return size; | 
|---|
| 1205 | } | 
|---|
| 1206 |  | 
|---|
| 1207 |  | 
|---|
| 1208 | /* TCP SMALL QUEUES (TSQ) | 
|---|
| 1209 | * | 
|---|
| 1210 | * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) | 
|---|
| 1211 | * to reduce RTT and bufferbloat. | 
|---|
| 1212 | * We do this using a special skb destructor (tcp_wfree). | 
|---|
| 1213 | * | 
|---|
| 1214 | * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb | 
|---|
| 1215 | * needs to be reallocated in a driver. | 
|---|
| 1216 | * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc | 
|---|
| 1217 | * | 
|---|
| 1218 | * Since transmit from skb destructor is forbidden, we use a BH work item | 
|---|
| 1219 | * to process all sockets that eventually need to send more skbs. | 
|---|
| 1220 | * We use one work item per cpu, with its own queue of sockets. | 
|---|
| 1221 | */ | 
|---|
| 1222 | struct tsq_work { | 
|---|
| 1223 | struct work_struct	work; | 
|---|
| 1224 | struct list_head	head; /* queue of tcp sockets */ | 
|---|
| 1225 | }; | 
|---|
| 1226 | static DEFINE_PER_CPU(struct tsq_work, tsq_work); | 
|---|
| 1227 |  | 
|---|
| 1228 | static void tcp_tsq_write(struct sock *sk) | 
|---|
| 1229 | { | 
|---|
| 1230 | if ((1 << sk->sk_state) & | 
|---|
| 1231 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | | 
|---|
| 1232 | TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) { | 
|---|
| 1233 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1234 |  | 
|---|
| 1235 | if (tp->lost_out > tp->retrans_out && | 
|---|
| 1236 | tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) { | 
|---|
| 1237 | tcp_mstamp_refresh(tp); | 
|---|
| 1238 | tcp_xmit_retransmit_queue(sk); | 
|---|
| 1239 | } | 
|---|
| 1240 |  | 
|---|
| 1241 | tcp_write_xmit(sk, mss_now: tcp_current_mss(sk), nonagle: tp->nonagle, | 
|---|
| 1242 | push_one: 0, GFP_ATOMIC); | 
|---|
| 1243 | } | 
|---|
| 1244 | } | 
|---|
| 1245 |  | 
|---|
| 1246 | static void tcp_tsq_handler(struct sock *sk) | 
|---|
| 1247 | { | 
|---|
| 1248 | bh_lock_sock(sk); | 
|---|
| 1249 | if (!sock_owned_by_user(sk)) | 
|---|
| 1250 | tcp_tsq_write(sk); | 
|---|
| 1251 | else if (!test_and_set_bit(nr: TCP_TSQ_DEFERRED, addr: &sk->sk_tsq_flags)) | 
|---|
| 1252 | sock_hold(sk); | 
|---|
| 1253 | bh_unlock_sock(sk); | 
|---|
| 1254 | } | 
|---|
| 1255 | /* | 
|---|
| 1256 | * One work item per cpu tries to send more skbs. | 
|---|
| 1257 | * We run in BH context but need to disable irqs when | 
|---|
| 1258 | * transferring tsq->head because tcp_wfree() might | 
|---|
| 1259 | * interrupt us (non NAPI drivers) | 
|---|
| 1260 | */ | 
|---|
| 1261 | static void tcp_tsq_workfn(struct work_struct *work) | 
|---|
| 1262 | { | 
|---|
| 1263 | struct tsq_work *tsq = container_of(work, struct tsq_work, work); | 
|---|
| 1264 | LIST_HEAD(list); | 
|---|
| 1265 | unsigned long flags; | 
|---|
| 1266 | struct list_head *q, *n; | 
|---|
| 1267 | struct tcp_sock *tp; | 
|---|
| 1268 | struct sock *sk; | 
|---|
| 1269 |  | 
|---|
| 1270 | local_irq_save(flags); | 
|---|
| 1271 | list_splice_init(list: &tsq->head, head: &list); | 
|---|
| 1272 | local_irq_restore(flags); | 
|---|
| 1273 |  | 
|---|
| 1274 | list_for_each_safe(q, n, &list) { | 
|---|
| 1275 | tp = list_entry(q, struct tcp_sock, tsq_node); | 
|---|
| 1276 | list_del(entry: &tp->tsq_node); | 
|---|
| 1277 |  | 
|---|
| 1278 | sk = (struct sock *)tp; | 
|---|
| 1279 | smp_mb__before_atomic(); | 
|---|
| 1280 | clear_bit(nr: TSQ_QUEUED, addr: &sk->sk_tsq_flags); | 
|---|
| 1281 |  | 
|---|
| 1282 | tcp_tsq_handler(sk); | 
|---|
| 1283 | sk_free(sk); | 
|---|
| 1284 | } | 
|---|
| 1285 | } | 
|---|
| 1286 |  | 
|---|
| 1287 | #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\ | 
|---|
| 1288 | TCPF_WRITE_TIMER_DEFERRED |	\ | 
|---|
| 1289 | TCPF_DELACK_TIMER_DEFERRED |	\ | 
|---|
| 1290 | TCPF_MTU_REDUCED_DEFERRED |	\ | 
|---|
| 1291 | TCPF_ACK_DEFERRED) | 
|---|
| 1292 | /** | 
|---|
| 1293 | * tcp_release_cb - tcp release_sock() callback | 
|---|
| 1294 | * @sk: socket | 
|---|
| 1295 | * | 
|---|
| 1296 | * called from release_sock() to perform protocol dependent | 
|---|
| 1297 | * actions before socket release. | 
|---|
| 1298 | */ | 
|---|
| 1299 | void tcp_release_cb(struct sock *sk) | 
|---|
| 1300 | { | 
|---|
| 1301 | unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); | 
|---|
| 1302 | unsigned long nflags; | 
|---|
| 1303 |  | 
|---|
| 1304 | /* perform an atomic operation only if at least one flag is set */ | 
|---|
| 1305 | do { | 
|---|
| 1306 | if (!(flags & TCP_DEFERRED_ALL)) | 
|---|
| 1307 | return; | 
|---|
| 1308 | nflags = flags & ~TCP_DEFERRED_ALL; | 
|---|
| 1309 | } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); | 
|---|
| 1310 |  | 
|---|
| 1311 | if (flags & TCPF_TSQ_DEFERRED) { | 
|---|
| 1312 | tcp_tsq_write(sk); | 
|---|
| 1313 | __sock_put(sk); | 
|---|
| 1314 | } | 
|---|
| 1315 |  | 
|---|
| 1316 | if (flags & TCPF_WRITE_TIMER_DEFERRED) { | 
|---|
| 1317 | tcp_write_timer_handler(sk); | 
|---|
| 1318 | __sock_put(sk); | 
|---|
| 1319 | } | 
|---|
| 1320 | if (flags & TCPF_DELACK_TIMER_DEFERRED) { | 
|---|
| 1321 | tcp_delack_timer_handler(sk); | 
|---|
| 1322 | __sock_put(sk); | 
|---|
| 1323 | } | 
|---|
| 1324 | if (flags & TCPF_MTU_REDUCED_DEFERRED) { | 
|---|
| 1325 | inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); | 
|---|
| 1326 | __sock_put(sk); | 
|---|
| 1327 | } | 
|---|
| 1328 | if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk)) | 
|---|
| 1329 | tcp_send_ack(sk); | 
|---|
| 1330 | } | 
|---|
| 1331 | EXPORT_IPV6_MOD(tcp_release_cb); | 
|---|
| 1332 |  | 
|---|
| 1333 | void __init tcp_tsq_work_init(void) | 
|---|
| 1334 | { | 
|---|
| 1335 | int i; | 
|---|
| 1336 |  | 
|---|
| 1337 | for_each_possible_cpu(i) { | 
|---|
| 1338 | struct tsq_work *tsq = &per_cpu(tsq_work, i); | 
|---|
| 1339 |  | 
|---|
| 1340 | INIT_LIST_HEAD(list: &tsq->head); | 
|---|
| 1341 | INIT_WORK(&tsq->work, tcp_tsq_workfn); | 
|---|
| 1342 | } | 
|---|
| 1343 | } | 
|---|
| 1344 |  | 
|---|
| 1345 | /* | 
|---|
| 1346 | * Write buffer destructor automatically called from kfree_skb. | 
|---|
| 1347 | * We can't xmit new skbs from this context, as we might already | 
|---|
| 1348 | * hold qdisc lock. | 
|---|
| 1349 | */ | 
|---|
| 1350 | void tcp_wfree(struct sk_buff *skb) | 
|---|
| 1351 | { | 
|---|
| 1352 | struct sock *sk = skb->sk; | 
|---|
| 1353 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1354 | unsigned long flags, nval, oval; | 
|---|
| 1355 | struct tsq_work *tsq; | 
|---|
| 1356 | bool empty; | 
|---|
| 1357 |  | 
|---|
| 1358 | /* Keep one reference on sk_wmem_alloc. | 
|---|
| 1359 | * Will be released by sk_free() from here or tcp_tsq_workfn() | 
|---|
| 1360 | */ | 
|---|
| 1361 | WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); | 
|---|
| 1362 |  | 
|---|
| 1363 | /* If this softirq is serviced by ksoftirqd, we are likely under stress. | 
|---|
| 1364 | * Wait until our queues (qdisc + devices) are drained. | 
|---|
| 1365 | * This gives : | 
|---|
| 1366 | * - less callbacks to tcp_write_xmit(), reducing stress (batches) | 
|---|
| 1367 | * - chance for incoming ACK (processed by another cpu maybe) | 
|---|
| 1368 | *   to migrate this flow (skb->ooo_okay will be eventually set) | 
|---|
| 1369 | */ | 
|---|
| 1370 | if (refcount_read(r: &sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | 
|---|
| 1371 | goto out; | 
|---|
| 1372 |  | 
|---|
| 1373 | oval = smp_load_acquire(&sk->sk_tsq_flags); | 
|---|
| 1374 | do { | 
|---|
| 1375 | if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) | 
|---|
| 1376 | goto out; | 
|---|
| 1377 |  | 
|---|
| 1378 | nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; | 
|---|
| 1379 | } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); | 
|---|
| 1380 |  | 
|---|
| 1381 | /* queue this socket to BH workqueue */ | 
|---|
| 1382 | local_irq_save(flags); | 
|---|
| 1383 | tsq = this_cpu_ptr(&tsq_work); | 
|---|
| 1384 | empty = list_empty(head: &tsq->head); | 
|---|
| 1385 | list_add(new: &tp->tsq_node, head: &tsq->head); | 
|---|
| 1386 | if (empty) | 
|---|
| 1387 | queue_work(wq: system_bh_wq, work: &tsq->work); | 
|---|
| 1388 | local_irq_restore(flags); | 
|---|
| 1389 | return; | 
|---|
| 1390 | out: | 
|---|
| 1391 | sk_free(sk); | 
|---|
| 1392 | } | 
|---|
| 1393 |  | 
|---|
| 1394 | /* Note: Called under soft irq. | 
|---|
| 1395 | * We can call TCP stack right away, unless socket is owned by user. | 
|---|
| 1396 | */ | 
|---|
| 1397 | enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) | 
|---|
| 1398 | { | 
|---|
| 1399 | struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); | 
|---|
| 1400 | struct sock *sk = (struct sock *)tp; | 
|---|
| 1401 |  | 
|---|
| 1402 | tcp_tsq_handler(sk); | 
|---|
| 1403 | sock_put(sk); | 
|---|
| 1404 |  | 
|---|
| 1405 | return HRTIMER_NORESTART; | 
|---|
| 1406 | } | 
|---|
| 1407 |  | 
|---|
| 1408 | static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, | 
|---|
| 1409 | u64 prior_wstamp) | 
|---|
| 1410 | { | 
|---|
| 1411 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1412 |  | 
|---|
| 1413 | if (sk->sk_pacing_status != SK_PACING_NONE) { | 
|---|
| 1414 | unsigned long rate = READ_ONCE(sk->sk_pacing_rate); | 
|---|
| 1415 |  | 
|---|
| 1416 | /* Original sch_fq does not pace first 10 MSS | 
|---|
| 1417 | * Note that tp->data_segs_out overflows after 2^32 packets, | 
|---|
| 1418 | * this is a minor annoyance. | 
|---|
| 1419 | */ | 
|---|
| 1420 | if (rate != ~0UL && rate && tp->data_segs_out >= 10) { | 
|---|
| 1421 | u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); | 
|---|
| 1422 | u64 credit = tp->tcp_wstamp_ns - prior_wstamp; | 
|---|
| 1423 |  | 
|---|
| 1424 | /* take into account OS jitter */ | 
|---|
| 1425 | len_ns -= min_t(u64, len_ns / 2, credit); | 
|---|
| 1426 | tp->tcp_wstamp_ns += len_ns; | 
|---|
| 1427 | } | 
|---|
| 1428 | } | 
|---|
| 1429 | list_move_tail(list: &skb->tcp_tsorted_anchor, head: &tp->tsorted_sent_queue); | 
|---|
| 1430 | } | 
|---|
| 1431 |  | 
|---|
| 1432 | INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); | 
|---|
| 1433 | INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); | 
|---|
| 1434 | INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); | 
|---|
| 1435 |  | 
|---|
| 1436 | /* This routine actually transmits TCP packets queued in by | 
|---|
| 1437 | * tcp_do_sendmsg().  This is used by both the initial | 
|---|
| 1438 | * transmission and possible later retransmissions. | 
|---|
| 1439 | * All SKB's seen here are completely headerless.  It is our | 
|---|
| 1440 | * job to build the TCP header, and pass the packet down to | 
|---|
| 1441 | * IP so it can do the same plus pass the packet off to the | 
|---|
| 1442 | * device. | 
|---|
| 1443 | * | 
|---|
| 1444 | * We are working here with either a clone of the original | 
|---|
| 1445 | * SKB, or a fresh unique copy made by the retransmit engine. | 
|---|
| 1446 | */ | 
|---|
| 1447 | static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, | 
|---|
| 1448 | int clone_it, gfp_t gfp_mask, u32 rcv_nxt) | 
|---|
| 1449 | { | 
|---|
| 1450 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 1451 | struct inet_sock *inet; | 
|---|
| 1452 | struct tcp_sock *tp; | 
|---|
| 1453 | struct tcp_skb_cb *tcb; | 
|---|
| 1454 | struct tcp_out_options opts; | 
|---|
| 1455 | unsigned int tcp_options_size, ; | 
|---|
| 1456 | struct sk_buff *oskb = NULL; | 
|---|
| 1457 | struct tcp_key key; | 
|---|
| 1458 | struct tcphdr *th; | 
|---|
| 1459 | u64 prior_wstamp; | 
|---|
| 1460 | int err; | 
|---|
| 1461 |  | 
|---|
| 1462 | BUG_ON(!skb || !tcp_skb_pcount(skb)); | 
|---|
| 1463 | tp = tcp_sk(sk); | 
|---|
| 1464 | prior_wstamp = tp->tcp_wstamp_ns; | 
|---|
| 1465 | tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); | 
|---|
| 1466 | skb_set_delivery_time(skb, kt: tp->tcp_wstamp_ns, tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 1467 | if (clone_it) { | 
|---|
| 1468 | oskb = skb; | 
|---|
| 1469 |  | 
|---|
| 1470 | tcp_skb_tsorted_save(oskb) { | 
|---|
| 1471 | if (unlikely(skb_cloned(oskb))) | 
|---|
| 1472 | skb = pskb_copy(skb: oskb, gfp_mask); | 
|---|
| 1473 | else | 
|---|
| 1474 | skb = skb_clone(skb: oskb, priority: gfp_mask); | 
|---|
| 1475 | } tcp_skb_tsorted_restore(oskb); | 
|---|
| 1476 |  | 
|---|
| 1477 | if (unlikely(!skb)) | 
|---|
| 1478 | return -ENOBUFS; | 
|---|
| 1479 | /* retransmit skbs might have a non zero value in skb->dev | 
|---|
| 1480 | * because skb->dev is aliased with skb->rbnode.rb_left | 
|---|
| 1481 | */ | 
|---|
| 1482 | skb->dev = NULL; | 
|---|
| 1483 | } | 
|---|
| 1484 |  | 
|---|
| 1485 | inet = inet_sk(sk); | 
|---|
| 1486 | tcb = TCP_SKB_CB(skb); | 
|---|
| 1487 | memset(s: &opts, c: 0, n: sizeof(opts)); | 
|---|
| 1488 |  | 
|---|
| 1489 | tcp_get_current_key(sk, out: &key); | 
|---|
| 1490 | if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { | 
|---|
| 1491 | tcp_options_size = tcp_syn_options(sk, skb, opts: &opts, key: &key); | 
|---|
| 1492 | } else { | 
|---|
| 1493 | tcp_options_size = tcp_established_options(sk, skb, opts: &opts, key: &key); | 
|---|
| 1494 | /* Force a PSH flag on all (GSO) packets to expedite GRO flush | 
|---|
| 1495 | * at receiver : This slightly improve GRO performance. | 
|---|
| 1496 | * Note that we do not force the PSH flag for non GSO packets, | 
|---|
| 1497 | * because they might be sent under high congestion events, | 
|---|
| 1498 | * and in this case it is better to delay the delivery of 1-MSS | 
|---|
| 1499 | * packets and thus the corresponding ACK packet that would | 
|---|
| 1500 | * release the following packet. | 
|---|
| 1501 | */ | 
|---|
| 1502 | if (tcp_skb_pcount(skb) > 1) | 
|---|
| 1503 | tcb->tcp_flags |= TCPHDR_PSH; | 
|---|
| 1504 | } | 
|---|
| 1505 | tcp_header_size = tcp_options_size + sizeof(struct tcphdr); | 
|---|
| 1506 |  | 
|---|
| 1507 | /* We set skb->ooo_okay to one if this packet can select | 
|---|
| 1508 | * a different TX queue than prior packets of this flow, | 
|---|
| 1509 | * to avoid self inflicted reorders. | 
|---|
| 1510 | * The 'other' queue decision is based on current cpu number | 
|---|
| 1511 | * if XPS is enabled, or sk->sk_txhash otherwise. | 
|---|
| 1512 | * We can switch to another (and better) queue if: | 
|---|
| 1513 | * 1) No packet with payload is in qdisc/device queues. | 
|---|
| 1514 | *    Delays in TX completion can defeat the test | 
|---|
| 1515 | *    even if packets were already sent. | 
|---|
| 1516 | * 2) Or rtx queue is empty. | 
|---|
| 1517 | *    This mitigates above case if ACK packets for | 
|---|
| 1518 | *    all prior packets were already processed. | 
|---|
| 1519 | */ | 
|---|
| 1520 | skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || | 
|---|
| 1521 | tcp_rtx_queue_empty(sk); | 
|---|
| 1522 |  | 
|---|
| 1523 | /* If we had to use memory reserve to allocate this skb, | 
|---|
| 1524 | * this might cause drops if packet is looped back : | 
|---|
| 1525 | * Other socket might not have SOCK_MEMALLOC. | 
|---|
| 1526 | * Packets not looped back do not care about pfmemalloc. | 
|---|
| 1527 | */ | 
|---|
| 1528 | skb->pfmemalloc = 0; | 
|---|
| 1529 |  | 
|---|
| 1530 | skb_push(skb, len: tcp_header_size); | 
|---|
| 1531 | skb_reset_transport_header(skb); | 
|---|
| 1532 |  | 
|---|
| 1533 | skb_orphan(skb); | 
|---|
| 1534 | skb->sk = sk; | 
|---|
| 1535 | skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; | 
|---|
| 1536 | refcount_add(i: skb->truesize, r: &sk->sk_wmem_alloc); | 
|---|
| 1537 |  | 
|---|
| 1538 | skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); | 
|---|
| 1539 |  | 
|---|
| 1540 | /* Build TCP header and checksum it. */ | 
|---|
| 1541 | th = (struct tcphdr *)skb->data; | 
|---|
| 1542 | th->source		= inet->inet_sport; | 
|---|
| 1543 | th->dest		= inet->inet_dport; | 
|---|
| 1544 | th->seq			= htonl(tcb->seq); | 
|---|
| 1545 | th->ack_seq		= htonl(rcv_nxt); | 
|---|
| 1546 | *(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) | | 
|---|
| 1547 | (tcb->tcp_flags & TCPHDR_FLAGS_MASK)); | 
|---|
| 1548 |  | 
|---|
| 1549 | th->check		= 0; | 
|---|
| 1550 | th->urg_ptr		= 0; | 
|---|
| 1551 |  | 
|---|
| 1552 | /* The urg_mode check is necessary during a below snd_una win probe */ | 
|---|
| 1553 | if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { | 
|---|
| 1554 | if (before(seq1: tp->snd_up, seq2: tcb->seq + 0x10000)) { | 
|---|
| 1555 | th->urg_ptr = htons(tp->snd_up - tcb->seq); | 
|---|
| 1556 | th->urg = 1; | 
|---|
| 1557 | } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { | 
|---|
| 1558 | th->urg_ptr = htons(0xFFFF); | 
|---|
| 1559 | th->urg = 1; | 
|---|
| 1560 | } | 
|---|
| 1561 | } | 
|---|
| 1562 |  | 
|---|
| 1563 | skb_shinfo(skb)->gso_type = sk->sk_gso_type; | 
|---|
| 1564 | if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { | 
|---|
| 1565 | th->window      = htons(tcp_select_window(sk)); | 
|---|
| 1566 | tcp_ecn_send(sk, skb, th, tcp_header_len: tcp_header_size); | 
|---|
| 1567 | } else { | 
|---|
| 1568 | /* RFC1323: The window in SYN & SYN/ACK segments | 
|---|
| 1569 | * is never scaled. | 
|---|
| 1570 | */ | 
|---|
| 1571 | th->window	= htons(min(tp->rcv_wnd, 65535U)); | 
|---|
| 1572 | } | 
|---|
| 1573 |  | 
|---|
| 1574 | tcp_options_write(th, tp, NULL, opts: &opts, key: &key); | 
|---|
| 1575 |  | 
|---|
| 1576 | if (tcp_key_is_md5(key: &key)) { | 
|---|
| 1577 | #ifdef CONFIG_TCP_MD5SIG | 
|---|
| 1578 | /* Calculate the MD5 hash, as we have all we need now */ | 
|---|
| 1579 | sk_gso_disable(sk); | 
|---|
| 1580 | tp->af_specific->calc_md5_hash(opts.hash_location, | 
|---|
| 1581 | key.md5_key, sk, skb); | 
|---|
| 1582 | #endif | 
|---|
| 1583 | } else if (tcp_key_is_ao(key: &key)) { | 
|---|
| 1584 | int err; | 
|---|
| 1585 |  | 
|---|
| 1586 | err = tcp_ao_transmit_skb(sk, skb, key: key.ao_key, th, | 
|---|
| 1587 | hash_location: opts.hash_location); | 
|---|
| 1588 | if (err) { | 
|---|
| 1589 | sk_skb_reason_drop(sk, skb, reason: SKB_DROP_REASON_NOT_SPECIFIED); | 
|---|
| 1590 | return -ENOMEM; | 
|---|
| 1591 | } | 
|---|
| 1592 | } | 
|---|
| 1593 |  | 
|---|
| 1594 | /* BPF prog is the last one writing header option */ | 
|---|
| 1595 | bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, synack_type: 0, opts: &opts); | 
|---|
| 1596 |  | 
|---|
| 1597 | INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, | 
|---|
| 1598 | tcp_v6_send_check, tcp_v4_send_check, | 
|---|
| 1599 | sk, skb); | 
|---|
| 1600 |  | 
|---|
| 1601 | if (likely(tcb->tcp_flags & TCPHDR_ACK)) | 
|---|
| 1602 | tcp_event_ack_sent(sk, rcv_nxt); | 
|---|
| 1603 |  | 
|---|
| 1604 | if (skb->len != tcp_header_size) { | 
|---|
| 1605 | tcp_event_data_sent(tp, sk); | 
|---|
| 1606 | tp->data_segs_out += tcp_skb_pcount(skb); | 
|---|
| 1607 | tp->bytes_sent += skb->len - tcp_header_size; | 
|---|
| 1608 | } | 
|---|
| 1609 |  | 
|---|
| 1610 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) | 
|---|
| 1611 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, | 
|---|
| 1612 | tcp_skb_pcount(skb)); | 
|---|
| 1613 |  | 
|---|
| 1614 | tp->segs_out += tcp_skb_pcount(skb); | 
|---|
| 1615 | skb_set_hash_from_sk(skb, sk); | 
|---|
| 1616 | /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ | 
|---|
| 1617 | skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); | 
|---|
| 1618 | skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); | 
|---|
| 1619 |  | 
|---|
| 1620 | /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ | 
|---|
| 1621 |  | 
|---|
| 1622 | /* Cleanup our debris for IP stacks */ | 
|---|
| 1623 | memset(s: skb->cb, c: 0, max(sizeof(struct inet_skb_parm), | 
|---|
| 1624 | sizeof(struct inet6_skb_parm))); | 
|---|
| 1625 |  | 
|---|
| 1626 | tcp_add_tx_delay(skb, tp); | 
|---|
| 1627 |  | 
|---|
| 1628 | err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, | 
|---|
| 1629 | inet6_csk_xmit, ip_queue_xmit, | 
|---|
| 1630 | sk, skb, &inet->cork.fl); | 
|---|
| 1631 |  | 
|---|
| 1632 | if (unlikely(err > 0)) { | 
|---|
| 1633 | tcp_enter_cwr(sk); | 
|---|
| 1634 | err = net_xmit_eval(err); | 
|---|
| 1635 | } | 
|---|
| 1636 | if (!err && oskb) { | 
|---|
| 1637 | tcp_update_skb_after_send(sk, skb: oskb, prior_wstamp); | 
|---|
| 1638 | tcp_rate_skb_sent(sk, skb: oskb); | 
|---|
| 1639 | } | 
|---|
| 1640 | return err; | 
|---|
| 1641 | } | 
|---|
| 1642 |  | 
|---|
| 1643 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | 
|---|
| 1644 | gfp_t gfp_mask) | 
|---|
| 1645 | { | 
|---|
| 1646 | return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, | 
|---|
| 1647 | tcp_sk(sk)->rcv_nxt); | 
|---|
| 1648 | } | 
|---|
| 1649 |  | 
|---|
| 1650 | /* This routine just queues the buffer for sending. | 
|---|
| 1651 | * | 
|---|
| 1652 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | 
|---|
| 1653 | * otherwise socket can stall. | 
|---|
| 1654 | */ | 
|---|
| 1655 | static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | 
|---|
| 1656 | { | 
|---|
| 1657 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1658 |  | 
|---|
| 1659 | /* Advance write_seq and place onto the write_queue. */ | 
|---|
| 1660 | WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); | 
|---|
| 1661 | __skb_header_release(skb); | 
|---|
| 1662 | psp_enqueue_set_decrypted(sk, skb); | 
|---|
| 1663 | tcp_add_write_queue_tail(sk, skb); | 
|---|
| 1664 | sk_wmem_queued_add(sk, val: skb->truesize); | 
|---|
| 1665 | sk_mem_charge(sk, size: skb->truesize); | 
|---|
| 1666 | } | 
|---|
| 1667 |  | 
|---|
| 1668 | /* Initialize TSO segments for a packet. */ | 
|---|
| 1669 | static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) | 
|---|
| 1670 | { | 
|---|
| 1671 | int tso_segs; | 
|---|
| 1672 |  | 
|---|
| 1673 | if (skb->len <= mss_now) { | 
|---|
| 1674 | /* Avoid the costly divide in the normal | 
|---|
| 1675 | * non-TSO case. | 
|---|
| 1676 | */ | 
|---|
| 1677 | TCP_SKB_CB(skb)->tcp_gso_size = 0; | 
|---|
| 1678 | tcp_skb_pcount_set(skb, segs: 1); | 
|---|
| 1679 | return 1; | 
|---|
| 1680 | } | 
|---|
| 1681 | TCP_SKB_CB(skb)->tcp_gso_size = mss_now; | 
|---|
| 1682 | tso_segs = DIV_ROUND_UP(skb->len, mss_now); | 
|---|
| 1683 | tcp_skb_pcount_set(skb, segs: tso_segs); | 
|---|
| 1684 | return tso_segs; | 
|---|
| 1685 | } | 
|---|
| 1686 |  | 
|---|
| 1687 | /* Pcount in the middle of the write queue got changed, we need to do various | 
|---|
| 1688 | * tweaks to fix counters | 
|---|
| 1689 | */ | 
|---|
| 1690 | static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) | 
|---|
| 1691 | { | 
|---|
| 1692 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1693 |  | 
|---|
| 1694 | tp->packets_out -= decr; | 
|---|
| 1695 |  | 
|---|
| 1696 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) | 
|---|
| 1697 | tp->sacked_out -= decr; | 
|---|
| 1698 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) | 
|---|
| 1699 | tp->retrans_out -= decr; | 
|---|
| 1700 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) | 
|---|
| 1701 | tp->lost_out -= decr; | 
|---|
| 1702 |  | 
|---|
| 1703 | /* Reno case is special. Sigh... */ | 
|---|
| 1704 | if (tcp_is_reno(tp) && decr > 0) | 
|---|
| 1705 | tp->sacked_out -= min_t(u32, tp->sacked_out, decr); | 
|---|
| 1706 |  | 
|---|
| 1707 | tcp_verify_left_out(tp); | 
|---|
| 1708 | } | 
|---|
| 1709 |  | 
|---|
| 1710 | static bool tcp_has_tx_tstamp(const struct sk_buff *skb) | 
|---|
| 1711 | { | 
|---|
| 1712 | return TCP_SKB_CB(skb)->txstamp_ack || | 
|---|
| 1713 | (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); | 
|---|
| 1714 | } | 
|---|
| 1715 |  | 
|---|
| 1716 | static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) | 
|---|
| 1717 | { | 
|---|
| 1718 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 
|---|
| 1719 |  | 
|---|
| 1720 | if (unlikely(tcp_has_tx_tstamp(skb)) && | 
|---|
| 1721 | !before(seq1: shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { | 
|---|
| 1722 | struct skb_shared_info *shinfo2 = skb_shinfo(skb2); | 
|---|
| 1723 | u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; | 
|---|
| 1724 |  | 
|---|
| 1725 | shinfo->tx_flags &= ~tsflags; | 
|---|
| 1726 | shinfo2->tx_flags |= tsflags; | 
|---|
| 1727 | swap(shinfo->tskey, shinfo2->tskey); | 
|---|
| 1728 | TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; | 
|---|
| 1729 | TCP_SKB_CB(skb)->txstamp_ack = 0; | 
|---|
| 1730 | } | 
|---|
| 1731 | } | 
|---|
| 1732 |  | 
|---|
| 1733 | static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) | 
|---|
| 1734 | { | 
|---|
| 1735 | TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; | 
|---|
| 1736 | TCP_SKB_CB(skb)->eor = 0; | 
|---|
| 1737 | } | 
|---|
| 1738 |  | 
|---|
| 1739 | /* Insert buff after skb on the write or rtx queue of sk.  */ | 
|---|
| 1740 | static void tcp_insert_write_queue_after(struct sk_buff *skb, | 
|---|
| 1741 | struct sk_buff *buff, | 
|---|
| 1742 | struct sock *sk, | 
|---|
| 1743 | enum tcp_queue tcp_queue) | 
|---|
| 1744 | { | 
|---|
| 1745 | if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE) | 
|---|
| 1746 | __skb_queue_after(list: &sk->sk_write_queue, prev: skb, newsk: buff); | 
|---|
| 1747 | else | 
|---|
| 1748 | tcp_rbtree_insert(root: &sk->tcp_rtx_queue, skb: buff); | 
|---|
| 1749 | } | 
|---|
| 1750 |  | 
|---|
| 1751 | /* Function to create two new TCP segments.  Shrinks the given segment | 
|---|
| 1752 | * to the specified size and appends a new segment with the rest of the | 
|---|
| 1753 | * packet to the list.  This won't be called frequently, I hope. | 
|---|
| 1754 | * Remember, these are still headerless SKBs at this point. | 
|---|
| 1755 | */ | 
|---|
| 1756 | int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, | 
|---|
| 1757 | struct sk_buff *skb, u32 len, | 
|---|
| 1758 | unsigned int mss_now, gfp_t gfp) | 
|---|
| 1759 | { | 
|---|
| 1760 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1761 | struct sk_buff *buff; | 
|---|
| 1762 | int old_factor; | 
|---|
| 1763 | long limit; | 
|---|
| 1764 | u16 flags; | 
|---|
| 1765 | int nlen; | 
|---|
| 1766 |  | 
|---|
| 1767 | if (WARN_ON(len > skb->len)) | 
|---|
| 1768 | return -EINVAL; | 
|---|
| 1769 |  | 
|---|
| 1770 | DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); | 
|---|
| 1771 |  | 
|---|
| 1772 | /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. | 
|---|
| 1773 | * We need some allowance to not penalize applications setting small | 
|---|
| 1774 | * SO_SNDBUF values. | 
|---|
| 1775 | * Also allow first and last skb in retransmit queue to be split. | 
|---|
| 1776 | */ | 
|---|
| 1777 | limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); | 
|---|
| 1778 | if (unlikely((sk->sk_wmem_queued >> 1) > limit && | 
|---|
| 1779 | tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && | 
|---|
| 1780 | skb != tcp_rtx_queue_head(sk) && | 
|---|
| 1781 | skb != tcp_rtx_queue_tail(sk))) { | 
|---|
| 1782 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); | 
|---|
| 1783 | return -ENOMEM; | 
|---|
| 1784 | } | 
|---|
| 1785 |  | 
|---|
| 1786 | if (skb_unclone_keeptruesize(skb, pri: gfp)) | 
|---|
| 1787 | return -ENOMEM; | 
|---|
| 1788 |  | 
|---|
| 1789 | /* Get a new skb... force flag on. */ | 
|---|
| 1790 | buff = tcp_stream_alloc_skb(sk, gfp, force_schedule: true); | 
|---|
| 1791 | if (!buff) | 
|---|
| 1792 | return -ENOMEM; /* We'll just try again later. */ | 
|---|
| 1793 | skb_copy_decrypted(to: buff, from: skb); | 
|---|
| 1794 | mptcp_skb_ext_copy(to: buff, from: skb); | 
|---|
| 1795 |  | 
|---|
| 1796 | sk_wmem_queued_add(sk, val: buff->truesize); | 
|---|
| 1797 | sk_mem_charge(sk, size: buff->truesize); | 
|---|
| 1798 | nlen = skb->len - len; | 
|---|
| 1799 | buff->truesize += nlen; | 
|---|
| 1800 | skb->truesize -= nlen; | 
|---|
| 1801 |  | 
|---|
| 1802 | /* Correct the sequence numbers. */ | 
|---|
| 1803 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; | 
|---|
| 1804 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; | 
|---|
| 1805 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; | 
|---|
| 1806 |  | 
|---|
| 1807 | /* PSH and FIN should only be set in the second packet. */ | 
|---|
| 1808 | flags = TCP_SKB_CB(skb)->tcp_flags; | 
|---|
| 1809 | TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); | 
|---|
| 1810 | TCP_SKB_CB(buff)->tcp_flags = flags; | 
|---|
| 1811 | TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; | 
|---|
| 1812 | tcp_skb_fragment_eor(skb, skb2: buff); | 
|---|
| 1813 |  | 
|---|
| 1814 | skb_split(skb, skb1: buff, len); | 
|---|
| 1815 |  | 
|---|
| 1816 | skb_set_delivery_time(skb: buff, kt: skb->tstamp, tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 1817 | tcp_fragment_tstamp(skb, skb2: buff); | 
|---|
| 1818 |  | 
|---|
| 1819 | old_factor = tcp_skb_pcount(skb); | 
|---|
| 1820 |  | 
|---|
| 1821 | /* Fix up tso_factor for both original and new SKB.  */ | 
|---|
| 1822 | tcp_set_skb_tso_segs(skb, mss_now); | 
|---|
| 1823 | tcp_set_skb_tso_segs(skb: buff, mss_now); | 
|---|
| 1824 |  | 
|---|
| 1825 | /* Update delivered info for the new segment */ | 
|---|
| 1826 | TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; | 
|---|
| 1827 |  | 
|---|
| 1828 | /* If this packet has been sent out already, we must | 
|---|
| 1829 | * adjust the various packet counters. | 
|---|
| 1830 | */ | 
|---|
| 1831 | if (!before(seq1: tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { | 
|---|
| 1832 | int diff = old_factor - tcp_skb_pcount(skb) - | 
|---|
| 1833 | tcp_skb_pcount(skb: buff); | 
|---|
| 1834 |  | 
|---|
| 1835 | if (diff) | 
|---|
| 1836 | tcp_adjust_pcount(sk, skb, decr: diff); | 
|---|
| 1837 | } | 
|---|
| 1838 |  | 
|---|
| 1839 | /* Link BUFF into the send queue. */ | 
|---|
| 1840 | __skb_header_release(skb: buff); | 
|---|
| 1841 | tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); | 
|---|
| 1842 | if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE) | 
|---|
| 1843 | list_add(new: &buff->tcp_tsorted_anchor, head: &skb->tcp_tsorted_anchor); | 
|---|
| 1844 |  | 
|---|
| 1845 | return 0; | 
|---|
| 1846 | } | 
|---|
| 1847 |  | 
|---|
| 1848 | /* This is similar to __pskb_pull_tail(). The difference is that pulled | 
|---|
| 1849 | * data is not copied, but immediately discarded. | 
|---|
| 1850 | */ | 
|---|
| 1851 | static int __pskb_trim_head(struct sk_buff *skb, int len) | 
|---|
| 1852 | { | 
|---|
| 1853 | struct skb_shared_info *shinfo; | 
|---|
| 1854 | int i, k, eat; | 
|---|
| 1855 |  | 
|---|
| 1856 | DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); | 
|---|
| 1857 | eat = len; | 
|---|
| 1858 | k = 0; | 
|---|
| 1859 | shinfo = skb_shinfo(skb); | 
|---|
| 1860 | for (i = 0; i < shinfo->nr_frags; i++) { | 
|---|
| 1861 | int size = skb_frag_size(frag: &shinfo->frags[i]); | 
|---|
| 1862 |  | 
|---|
| 1863 | if (size <= eat) { | 
|---|
| 1864 | skb_frag_unref(skb, f: i); | 
|---|
| 1865 | eat -= size; | 
|---|
| 1866 | } else { | 
|---|
| 1867 | shinfo->frags[k] = shinfo->frags[i]; | 
|---|
| 1868 | if (eat) { | 
|---|
| 1869 | skb_frag_off_add(frag: &shinfo->frags[k], delta: eat); | 
|---|
| 1870 | skb_frag_size_sub(frag: &shinfo->frags[k], delta: eat); | 
|---|
| 1871 | eat = 0; | 
|---|
| 1872 | } | 
|---|
| 1873 | k++; | 
|---|
| 1874 | } | 
|---|
| 1875 | } | 
|---|
| 1876 | shinfo->nr_frags = k; | 
|---|
| 1877 |  | 
|---|
| 1878 | skb->data_len -= len; | 
|---|
| 1879 | skb->len = skb->data_len; | 
|---|
| 1880 | return len; | 
|---|
| 1881 | } | 
|---|
| 1882 |  | 
|---|
| 1883 | /* Remove acked data from a packet in the transmit queue. */ | 
|---|
| 1884 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | 
|---|
| 1885 | { | 
|---|
| 1886 | u32 delta_truesize; | 
|---|
| 1887 |  | 
|---|
| 1888 | if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) | 
|---|
| 1889 | return -ENOMEM; | 
|---|
| 1890 |  | 
|---|
| 1891 | delta_truesize = __pskb_trim_head(skb, len); | 
|---|
| 1892 |  | 
|---|
| 1893 | TCP_SKB_CB(skb)->seq += len; | 
|---|
| 1894 |  | 
|---|
| 1895 | skb->truesize	   -= delta_truesize; | 
|---|
| 1896 | sk_wmem_queued_add(sk, val: -delta_truesize); | 
|---|
| 1897 | if (!skb_zcopy_pure(skb)) | 
|---|
| 1898 | sk_mem_uncharge(sk, size: delta_truesize); | 
|---|
| 1899 |  | 
|---|
| 1900 | /* Any change of skb->len requires recalculation of tso factor. */ | 
|---|
| 1901 | if (tcp_skb_pcount(skb) > 1) | 
|---|
| 1902 | tcp_set_skb_tso_segs(skb, mss_now: tcp_skb_mss(skb)); | 
|---|
| 1903 |  | 
|---|
| 1904 | return 0; | 
|---|
| 1905 | } | 
|---|
| 1906 |  | 
|---|
| 1907 | /* Calculate MSS not accounting any TCP options.  */ | 
|---|
| 1908 | static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) | 
|---|
| 1909 | { | 
|---|
| 1910 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1911 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 1912 | int mss_now; | 
|---|
| 1913 |  | 
|---|
| 1914 | /* Calculate base mss without TCP options: | 
|---|
| 1915 | It is MMS_S - sizeof(tcphdr) of rfc1122 | 
|---|
| 1916 | */ | 
|---|
| 1917 | mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); | 
|---|
| 1918 |  | 
|---|
| 1919 | /* Clamp it (mss_clamp does not include tcp options) */ | 
|---|
| 1920 | if (mss_now > tp->rx_opt.mss_clamp) | 
|---|
| 1921 | mss_now = tp->rx_opt.mss_clamp; | 
|---|
| 1922 |  | 
|---|
| 1923 | /* Now subtract optional transport overhead */ | 
|---|
| 1924 | mss_now -= icsk->icsk_ext_hdr_len; | 
|---|
| 1925 |  | 
|---|
| 1926 | /* Then reserve room for full set of TCP options and 8 bytes of data */ | 
|---|
| 1927 | mss_now = max(mss_now, | 
|---|
| 1928 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); | 
|---|
| 1929 | return mss_now; | 
|---|
| 1930 | } | 
|---|
| 1931 |  | 
|---|
| 1932 | /* Calculate MSS. Not accounting for SACKs here.  */ | 
|---|
| 1933 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | 
|---|
| 1934 | { | 
|---|
| 1935 | /* Subtract TCP options size, not including SACKs */ | 
|---|
| 1936 | return __tcp_mtu_to_mss(sk, pmtu) - | 
|---|
| 1937 | (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); | 
|---|
| 1938 | } | 
|---|
| 1939 | EXPORT_IPV6_MOD(tcp_mtu_to_mss); | 
|---|
| 1940 |  | 
|---|
| 1941 | /* Inverse of above */ | 
|---|
| 1942 | int tcp_mss_to_mtu(struct sock *sk, int mss) | 
|---|
| 1943 | { | 
|---|
| 1944 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1945 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 1946 |  | 
|---|
| 1947 | return mss + | 
|---|
| 1948 | tp->tcp_header_len + | 
|---|
| 1949 | icsk->icsk_ext_hdr_len + | 
|---|
| 1950 | icsk->icsk_af_ops->net_header_len; | 
|---|
| 1951 | } | 
|---|
| 1952 | EXPORT_SYMBOL(tcp_mss_to_mtu); | 
|---|
| 1953 |  | 
|---|
| 1954 | /* MTU probing init per socket */ | 
|---|
| 1955 | void tcp_mtup_init(struct sock *sk) | 
|---|
| 1956 | { | 
|---|
| 1957 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1958 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 1959 | struct net *net = sock_net(sk); | 
|---|
| 1960 |  | 
|---|
| 1961 | icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; | 
|---|
| 1962 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + | 
|---|
| 1963 | icsk->icsk_af_ops->net_header_len; | 
|---|
| 1964 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); | 
|---|
| 1965 | icsk->icsk_mtup.probe_size = 0; | 
|---|
| 1966 | if (icsk->icsk_mtup.enabled) | 
|---|
| 1967 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; | 
|---|
| 1968 | } | 
|---|
| 1969 |  | 
|---|
| 1970 | /* This function synchronize snd mss to current pmtu/exthdr set. | 
|---|
| 1971 |  | 
|---|
| 1972 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts | 
|---|
| 1973 | for TCP options, but includes only bare TCP header. | 
|---|
| 1974 |  | 
|---|
| 1975 | tp->rx_opt.mss_clamp is mss negotiated at connection setup. | 
|---|
| 1976 | It is minimum of user_mss and mss received with SYN. | 
|---|
| 1977 | It also does not include TCP options. | 
|---|
| 1978 |  | 
|---|
| 1979 | inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. | 
|---|
| 1980 |  | 
|---|
| 1981 | tp->mss_cache is current effective sending mss, including | 
|---|
| 1982 | all tcp options except for SACKs. It is evaluated, | 
|---|
| 1983 | taking into account current pmtu, but never exceeds | 
|---|
| 1984 | tp->rx_opt.mss_clamp. | 
|---|
| 1985 |  | 
|---|
| 1986 | NOTE1. rfc1122 clearly states that advertised MSS | 
|---|
| 1987 | DOES NOT include either tcp or ip options. | 
|---|
| 1988 |  | 
|---|
| 1989 | NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache | 
|---|
| 1990 | are READ ONLY outside this function.		--ANK (980731) | 
|---|
| 1991 | */ | 
|---|
| 1992 | unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | 
|---|
| 1993 | { | 
|---|
| 1994 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 1995 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 1996 | int mss_now; | 
|---|
| 1997 |  | 
|---|
| 1998 | if (icsk->icsk_mtup.search_high > pmtu) | 
|---|
| 1999 | icsk->icsk_mtup.search_high = pmtu; | 
|---|
| 2000 |  | 
|---|
| 2001 | mss_now = tcp_mtu_to_mss(sk, pmtu); | 
|---|
| 2002 | mss_now = tcp_bound_to_half_wnd(tp, pktsize: mss_now); | 
|---|
| 2003 |  | 
|---|
| 2004 | /* And store cached results */ | 
|---|
| 2005 | icsk->icsk_pmtu_cookie = pmtu; | 
|---|
| 2006 | if (icsk->icsk_mtup.enabled) | 
|---|
| 2007 | mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); | 
|---|
| 2008 | tp->mss_cache = mss_now; | 
|---|
| 2009 |  | 
|---|
| 2010 | return mss_now; | 
|---|
| 2011 | } | 
|---|
| 2012 | EXPORT_IPV6_MOD(tcp_sync_mss); | 
|---|
| 2013 |  | 
|---|
| 2014 | /* Compute the current effective MSS, taking SACKs and IP options, | 
|---|
| 2015 | * and even PMTU discovery events into account. | 
|---|
| 2016 | */ | 
|---|
| 2017 | unsigned int tcp_current_mss(struct sock *sk) | 
|---|
| 2018 | { | 
|---|
| 2019 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2020 | const struct dst_entry *dst = __sk_dst_get(sk); | 
|---|
| 2021 | u32 mss_now; | 
|---|
| 2022 | unsigned int ; | 
|---|
| 2023 | struct tcp_out_options opts; | 
|---|
| 2024 | struct tcp_key key; | 
|---|
| 2025 |  | 
|---|
| 2026 | mss_now = tp->mss_cache; | 
|---|
| 2027 |  | 
|---|
| 2028 | if (dst) { | 
|---|
| 2029 | u32 mtu = dst_mtu(dst); | 
|---|
| 2030 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) | 
|---|
| 2031 | mss_now = tcp_sync_mss(sk, pmtu: mtu); | 
|---|
| 2032 | } | 
|---|
| 2033 | tcp_get_current_key(sk, out: &key); | 
|---|
| 2034 | header_len = tcp_established_options(sk, NULL, opts: &opts, key: &key) + | 
|---|
| 2035 | sizeof(struct tcphdr); | 
|---|
| 2036 | /* The mss_cache is sized based on tp->tcp_header_len, which assumes | 
|---|
| 2037 | * some common options. If this is an odd packet (because we have SACK | 
|---|
| 2038 | * blocks etc) then our calculated header_len will be different, and | 
|---|
| 2039 | * we have to adjust mss_now correspondingly */ | 
|---|
| 2040 | if (header_len != tp->tcp_header_len) { | 
|---|
| 2041 | int delta = (int) header_len - tp->tcp_header_len; | 
|---|
| 2042 | mss_now -= delta; | 
|---|
| 2043 | } | 
|---|
| 2044 |  | 
|---|
| 2045 | return mss_now; | 
|---|
| 2046 | } | 
|---|
| 2047 |  | 
|---|
| 2048 | /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. | 
|---|
| 2049 | * As additional protections, we do not touch cwnd in retransmission phases, | 
|---|
| 2050 | * and if application hit its sndbuf limit recently. | 
|---|
| 2051 | */ | 
|---|
| 2052 | static void tcp_cwnd_application_limited(struct sock *sk) | 
|---|
| 2053 | { | 
|---|
| 2054 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2055 |  | 
|---|
| 2056 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && | 
|---|
| 2057 | sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 
|---|
| 2058 | /* Limited by application or receiver window. */ | 
|---|
| 2059 | u32 init_win = tcp_init_cwnd(tp, dst: __sk_dst_get(sk)); | 
|---|
| 2060 | u32 win_used = max(tp->snd_cwnd_used, init_win); | 
|---|
| 2061 | if (win_used < tcp_snd_cwnd(tp)) { | 
|---|
| 2062 | tp->snd_ssthresh = tcp_current_ssthresh(sk); | 
|---|
| 2063 | tcp_snd_cwnd_set(tp, val: (tcp_snd_cwnd(tp) + win_used) >> 1); | 
|---|
| 2064 | } | 
|---|
| 2065 | tp->snd_cwnd_used = 0; | 
|---|
| 2066 | } | 
|---|
| 2067 | tp->snd_cwnd_stamp = tcp_jiffies32; | 
|---|
| 2068 | } | 
|---|
| 2069 |  | 
|---|
| 2070 | static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) | 
|---|
| 2071 | { | 
|---|
| 2072 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; | 
|---|
| 2073 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2074 |  | 
|---|
| 2075 | /* Track the strongest available signal of the degree to which the cwnd | 
|---|
| 2076 | * is fully utilized. If cwnd-limited then remember that fact for the | 
|---|
| 2077 | * current window. If not cwnd-limited then track the maximum number of | 
|---|
| 2078 | * outstanding packets in the current window. (If cwnd-limited then we | 
|---|
| 2079 | * chose to not update tp->max_packets_out to avoid an extra else | 
|---|
| 2080 | * clause with no functional impact.) | 
|---|
| 2081 | */ | 
|---|
| 2082 | if (!before(seq1: tp->snd_una, seq2: tp->cwnd_usage_seq) || | 
|---|
| 2083 | is_cwnd_limited || | 
|---|
| 2084 | (!tp->is_cwnd_limited && | 
|---|
| 2085 | tp->packets_out > tp->max_packets_out)) { | 
|---|
| 2086 | tp->is_cwnd_limited = is_cwnd_limited; | 
|---|
| 2087 | tp->max_packets_out = tp->packets_out; | 
|---|
| 2088 | tp->cwnd_usage_seq = tp->snd_nxt; | 
|---|
| 2089 | } | 
|---|
| 2090 |  | 
|---|
| 2091 | if (tcp_is_cwnd_limited(sk)) { | 
|---|
| 2092 | /* Network is feed fully. */ | 
|---|
| 2093 | tp->snd_cwnd_used = 0; | 
|---|
| 2094 | tp->snd_cwnd_stamp = tcp_jiffies32; | 
|---|
| 2095 | } else { | 
|---|
| 2096 | /* Network starves. */ | 
|---|
| 2097 | if (tp->packets_out > tp->snd_cwnd_used) | 
|---|
| 2098 | tp->snd_cwnd_used = tp->packets_out; | 
|---|
| 2099 |  | 
|---|
| 2100 | if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && | 
|---|
| 2101 | (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && | 
|---|
| 2102 | !ca_ops->cong_control) | 
|---|
| 2103 | tcp_cwnd_application_limited(sk); | 
|---|
| 2104 |  | 
|---|
| 2105 | /* The following conditions together indicate the starvation | 
|---|
| 2106 | * is caused by insufficient sender buffer: | 
|---|
| 2107 | * 1) just sent some data (see tcp_write_xmit) | 
|---|
| 2108 | * 2) not cwnd limited (this else condition) | 
|---|
| 2109 | * 3) no more data to send (tcp_write_queue_empty()) | 
|---|
| 2110 | * 4) application is hitting buffer limit (SOCK_NOSPACE) | 
|---|
| 2111 | */ | 
|---|
| 2112 | if (tcp_write_queue_empty(sk) && sk->sk_socket && | 
|---|
| 2113 | test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && | 
|---|
| 2114 | (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) | 
|---|
| 2115 | tcp_chrono_start(sk, type: TCP_CHRONO_SNDBUF_LIMITED); | 
|---|
| 2116 | } | 
|---|
| 2117 | } | 
|---|
| 2118 |  | 
|---|
| 2119 | /* Minshall's variant of the Nagle send check. */ | 
|---|
| 2120 | static bool tcp_minshall_check(const struct tcp_sock *tp) | 
|---|
| 2121 | { | 
|---|
| 2122 | return after(tp->snd_sml, tp->snd_una) && | 
|---|
| 2123 | !after(tp->snd_sml, tp->snd_nxt); | 
|---|
| 2124 | } | 
|---|
| 2125 |  | 
|---|
| 2126 | /* Update snd_sml if this skb is under mss | 
|---|
| 2127 | * Note that a TSO packet might end with a sub-mss segment | 
|---|
| 2128 | * The test is really : | 
|---|
| 2129 | * if ((skb->len % mss) != 0) | 
|---|
| 2130 | *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 
|---|
| 2131 | * But we can avoid doing the divide again given we already have | 
|---|
| 2132 | *  skb_pcount = skb->len / mss_now | 
|---|
| 2133 | */ | 
|---|
| 2134 | static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, | 
|---|
| 2135 | const struct sk_buff *skb) | 
|---|
| 2136 | { | 
|---|
| 2137 | if (skb->len < tcp_skb_pcount(skb) * mss_now) | 
|---|
| 2138 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 
|---|
| 2139 | } | 
|---|
| 2140 |  | 
|---|
| 2141 | /* Return false, if packet can be sent now without violation Nagle's rules: | 
|---|
| 2142 | * 1. It is full sized. (provided by caller in %partial bool) | 
|---|
| 2143 | * 2. Or it contains FIN. (already checked by caller) | 
|---|
| 2144 | * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. | 
|---|
| 2145 | * 4. Or TCP_CORK is not set, and all sent packets are ACKed. | 
|---|
| 2146 | *    With Minshall's modification: all sent small packets are ACKed. | 
|---|
| 2147 | */ | 
|---|
| 2148 | static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, | 
|---|
| 2149 | int nonagle) | 
|---|
| 2150 | { | 
|---|
| 2151 | return partial && | 
|---|
| 2152 | ((nonagle & TCP_NAGLE_CORK) || | 
|---|
| 2153 | (!nonagle && tp->packets_out && tcp_minshall_check(tp))); | 
|---|
| 2154 | } | 
|---|
| 2155 |  | 
|---|
| 2156 | /* Return how many segs we'd like on a TSO packet, | 
|---|
| 2157 | * depending on current pacing rate, and how close the peer is. | 
|---|
| 2158 | * | 
|---|
| 2159 | * Rationale is: | 
|---|
| 2160 | * - For close peers, we rather send bigger packets to reduce | 
|---|
| 2161 | *   cpu costs, because occasional losses will be repaired fast. | 
|---|
| 2162 | * - For long distance/rtt flows, we would like to get ACK clocking | 
|---|
| 2163 | *   with 1 ACK per ms. | 
|---|
| 2164 | * | 
|---|
| 2165 | * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting | 
|---|
| 2166 | * in bigger TSO bursts. We we cut the RTT-based allowance in half | 
|---|
| 2167 | * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance | 
|---|
| 2168 | * is below 1500 bytes after 6 * ~500 usec = 3ms. | 
|---|
| 2169 | */ | 
|---|
| 2170 | static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, | 
|---|
| 2171 | int min_tso_segs) | 
|---|
| 2172 | { | 
|---|
| 2173 | unsigned long bytes; | 
|---|
| 2174 | u32 r; | 
|---|
| 2175 |  | 
|---|
| 2176 | bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift); | 
|---|
| 2177 |  | 
|---|
| 2178 | r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); | 
|---|
| 2179 | if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) | 
|---|
| 2180 | bytes += sk->sk_gso_max_size >> r; | 
|---|
| 2181 |  | 
|---|
| 2182 | bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); | 
|---|
| 2183 |  | 
|---|
| 2184 | return max_t(u32, bytes / mss_now, min_tso_segs); | 
|---|
| 2185 | } | 
|---|
| 2186 |  | 
|---|
| 2187 | /* Return the number of segments we want in the skb we are transmitting. | 
|---|
| 2188 | * See if congestion control module wants to decide; otherwise, autosize. | 
|---|
| 2189 | */ | 
|---|
| 2190 | static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) | 
|---|
| 2191 | { | 
|---|
| 2192 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; | 
|---|
| 2193 | u32 min_tso, tso_segs; | 
|---|
| 2194 |  | 
|---|
| 2195 | min_tso = ca_ops->min_tso_segs ? | 
|---|
| 2196 | ca_ops->min_tso_segs(sk) : | 
|---|
| 2197 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); | 
|---|
| 2198 |  | 
|---|
| 2199 | tso_segs = tcp_tso_autosize(sk, mss_now, min_tso_segs: min_tso); | 
|---|
| 2200 | return min_t(u32, tso_segs, sk->sk_gso_max_segs); | 
|---|
| 2201 | } | 
|---|
| 2202 |  | 
|---|
| 2203 | /* Returns the portion of skb which can be sent right away */ | 
|---|
| 2204 | static unsigned int tcp_mss_split_point(const struct sock *sk, | 
|---|
| 2205 | const struct sk_buff *skb, | 
|---|
| 2206 | unsigned int mss_now, | 
|---|
| 2207 | unsigned int max_segs, | 
|---|
| 2208 | int nonagle) | 
|---|
| 2209 | { | 
|---|
| 2210 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2211 | u32 partial, needed, window, max_len; | 
|---|
| 2212 |  | 
|---|
| 2213 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 
|---|
| 2214 | max_len = mss_now * max_segs; | 
|---|
| 2215 |  | 
|---|
| 2216 | if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) | 
|---|
| 2217 | return max_len; | 
|---|
| 2218 |  | 
|---|
| 2219 | needed = min(skb->len, window); | 
|---|
| 2220 |  | 
|---|
| 2221 | if (max_len <= needed) | 
|---|
| 2222 | return max_len; | 
|---|
| 2223 |  | 
|---|
| 2224 | partial = needed % mss_now; | 
|---|
| 2225 | /* If last segment is not a full MSS, check if Nagle rules allow us | 
|---|
| 2226 | * to include this last segment in this skb. | 
|---|
| 2227 | * Otherwise, we'll split the skb at last MSS boundary | 
|---|
| 2228 | */ | 
|---|
| 2229 | if (tcp_nagle_check(partial: partial != 0, tp, nonagle)) | 
|---|
| 2230 | return needed - partial; | 
|---|
| 2231 |  | 
|---|
| 2232 | return needed; | 
|---|
| 2233 | } | 
|---|
| 2234 |  | 
|---|
| 2235 | /* Can at least one segment of SKB be sent right now, according to the | 
|---|
| 2236 | * congestion window rules?  If so, return how many segments are allowed. | 
|---|
| 2237 | */ | 
|---|
| 2238 | static u32 tcp_cwnd_test(const struct tcp_sock *tp) | 
|---|
| 2239 | { | 
|---|
| 2240 | u32 in_flight, cwnd, halfcwnd; | 
|---|
| 2241 |  | 
|---|
| 2242 | in_flight = tcp_packets_in_flight(tp); | 
|---|
| 2243 | cwnd = tcp_snd_cwnd(tp); | 
|---|
| 2244 | if (in_flight >= cwnd) | 
|---|
| 2245 | return 0; | 
|---|
| 2246 |  | 
|---|
| 2247 | /* For better scheduling, ensure we have at least | 
|---|
| 2248 | * 2 GSO packets in flight. | 
|---|
| 2249 | */ | 
|---|
| 2250 | halfcwnd = max(cwnd >> 1, 1U); | 
|---|
| 2251 | return min(halfcwnd, cwnd - in_flight); | 
|---|
| 2252 | } | 
|---|
| 2253 |  | 
|---|
| 2254 | /* Initialize TSO state of a skb. | 
|---|
| 2255 | * This must be invoked the first time we consider transmitting | 
|---|
| 2256 | * SKB onto the wire. | 
|---|
| 2257 | */ | 
|---|
| 2258 | static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) | 
|---|
| 2259 | { | 
|---|
| 2260 | int tso_segs = tcp_skb_pcount(skb); | 
|---|
| 2261 |  | 
|---|
| 2262 | if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) | 
|---|
| 2263 | return tcp_set_skb_tso_segs(skb, mss_now); | 
|---|
| 2264 |  | 
|---|
| 2265 | return tso_segs; | 
|---|
| 2266 | } | 
|---|
| 2267 |  | 
|---|
| 2268 |  | 
|---|
| 2269 | /* Return true if the Nagle test allows this packet to be | 
|---|
| 2270 | * sent now. | 
|---|
| 2271 | */ | 
|---|
| 2272 | static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, | 
|---|
| 2273 | unsigned int cur_mss, int nonagle) | 
|---|
| 2274 | { | 
|---|
| 2275 | /* Nagle rule does not apply to frames, which sit in the middle of the | 
|---|
| 2276 | * write_queue (they have no chances to get new data). | 
|---|
| 2277 | * | 
|---|
| 2278 | * This is implemented in the callers, where they modify the 'nonagle' | 
|---|
| 2279 | * argument based upon the location of SKB in the send queue. | 
|---|
| 2280 | */ | 
|---|
| 2281 | if (nonagle & TCP_NAGLE_PUSH) | 
|---|
| 2282 | return true; | 
|---|
| 2283 |  | 
|---|
| 2284 | /* Don't use the nagle rule for urgent data (or for the final FIN). */ | 
|---|
| 2285 | if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) | 
|---|
| 2286 | return true; | 
|---|
| 2287 |  | 
|---|
| 2288 | if (!tcp_nagle_check(partial: skb->len < cur_mss, tp, nonagle)) | 
|---|
| 2289 | return true; | 
|---|
| 2290 |  | 
|---|
| 2291 | return false; | 
|---|
| 2292 | } | 
|---|
| 2293 |  | 
|---|
| 2294 | /* Does at least the first segment of SKB fit into the send window? */ | 
|---|
| 2295 | static bool tcp_snd_wnd_test(const struct tcp_sock *tp, | 
|---|
| 2296 | const struct sk_buff *skb, | 
|---|
| 2297 | unsigned int cur_mss) | 
|---|
| 2298 | { | 
|---|
| 2299 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 
|---|
| 2300 |  | 
|---|
| 2301 | if (skb->len > cur_mss) | 
|---|
| 2302 | end_seq = TCP_SKB_CB(skb)->seq + cur_mss; | 
|---|
| 2303 |  | 
|---|
| 2304 | return !after(end_seq, tcp_wnd_end(tp)); | 
|---|
| 2305 | } | 
|---|
| 2306 |  | 
|---|
| 2307 | /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet | 
|---|
| 2308 | * which is put after SKB on the list.  It is very much like | 
|---|
| 2309 | * tcp_fragment() except that it may make several kinds of assumptions | 
|---|
| 2310 | * in order to speed up the splitting operation.  In particular, we | 
|---|
| 2311 | * know that all the data is in scatter-gather pages, and that the | 
|---|
| 2312 | * packet has never been sent out before (and thus is not cloned). | 
|---|
| 2313 | */ | 
|---|
| 2314 | static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | 
|---|
| 2315 | unsigned int mss_now, gfp_t gfp) | 
|---|
| 2316 | { | 
|---|
| 2317 | int nlen = skb->len - len; | 
|---|
| 2318 | struct sk_buff *buff; | 
|---|
| 2319 | u16 flags; | 
|---|
| 2320 |  | 
|---|
| 2321 | /* All of a TSO frame must be composed of paged data.  */ | 
|---|
| 2322 | DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); | 
|---|
| 2323 |  | 
|---|
| 2324 | buff = tcp_stream_alloc_skb(sk, gfp, force_schedule: true); | 
|---|
| 2325 | if (unlikely(!buff)) | 
|---|
| 2326 | return -ENOMEM; | 
|---|
| 2327 | skb_copy_decrypted(to: buff, from: skb); | 
|---|
| 2328 | mptcp_skb_ext_copy(to: buff, from: skb); | 
|---|
| 2329 |  | 
|---|
| 2330 | sk_wmem_queued_add(sk, val: buff->truesize); | 
|---|
| 2331 | sk_mem_charge(sk, size: buff->truesize); | 
|---|
| 2332 | buff->truesize += nlen; | 
|---|
| 2333 | skb->truesize -= nlen; | 
|---|
| 2334 |  | 
|---|
| 2335 | /* Correct the sequence numbers. */ | 
|---|
| 2336 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; | 
|---|
| 2337 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; | 
|---|
| 2338 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; | 
|---|
| 2339 |  | 
|---|
| 2340 | /* PSH and FIN should only be set in the second packet. */ | 
|---|
| 2341 | flags = TCP_SKB_CB(skb)->tcp_flags; | 
|---|
| 2342 | TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); | 
|---|
| 2343 | TCP_SKB_CB(buff)->tcp_flags = flags; | 
|---|
| 2344 |  | 
|---|
| 2345 | tcp_skb_fragment_eor(skb, skb2: buff); | 
|---|
| 2346 |  | 
|---|
| 2347 | skb_split(skb, skb1: buff, len); | 
|---|
| 2348 | tcp_fragment_tstamp(skb, skb2: buff); | 
|---|
| 2349 |  | 
|---|
| 2350 | /* Fix up tso_factor for both original and new SKB.  */ | 
|---|
| 2351 | tcp_set_skb_tso_segs(skb, mss_now); | 
|---|
| 2352 | tcp_set_skb_tso_segs(skb: buff, mss_now); | 
|---|
| 2353 |  | 
|---|
| 2354 | /* Link BUFF into the send queue. */ | 
|---|
| 2355 | __skb_header_release(skb: buff); | 
|---|
| 2356 | tcp_insert_write_queue_after(skb, buff, sk, tcp_queue: TCP_FRAG_IN_WRITE_QUEUE); | 
|---|
| 2357 |  | 
|---|
| 2358 | return 0; | 
|---|
| 2359 | } | 
|---|
| 2360 |  | 
|---|
| 2361 | /* Try to defer sending, if possible, in order to minimize the amount | 
|---|
| 2362 | * of TSO splitting we do.  View it as a kind of TSO Nagle test. | 
|---|
| 2363 | * | 
|---|
| 2364 | * This algorithm is from John Heffner. | 
|---|
| 2365 | */ | 
|---|
| 2366 | static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | 
|---|
| 2367 | bool *is_cwnd_limited, | 
|---|
| 2368 | bool *is_rwnd_limited, | 
|---|
| 2369 | u32 max_segs) | 
|---|
| 2370 | { | 
|---|
| 2371 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 2372 | u32 send_win, cong_win, limit, in_flight; | 
|---|
| 2373 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2374 | struct sk_buff *head; | 
|---|
| 2375 | int win_divisor; | 
|---|
| 2376 | s64 delta; | 
|---|
| 2377 |  | 
|---|
| 2378 | if (icsk->icsk_ca_state >= TCP_CA_Recovery) | 
|---|
| 2379 | goto send_now; | 
|---|
| 2380 |  | 
|---|
| 2381 | /* Avoid bursty behavior by allowing defer | 
|---|
| 2382 | * only if the last write was recent (1 ms). | 
|---|
| 2383 | * Note that tp->tcp_wstamp_ns can be in the future if we have | 
|---|
| 2384 | * packets waiting in a qdisc or device for EDT delivery. | 
|---|
| 2385 | */ | 
|---|
| 2386 | delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; | 
|---|
| 2387 | if (delta > 0) | 
|---|
| 2388 | goto send_now; | 
|---|
| 2389 |  | 
|---|
| 2390 | in_flight = tcp_packets_in_flight(tp); | 
|---|
| 2391 |  | 
|---|
| 2392 | BUG_ON(tcp_skb_pcount(skb) <= 1); | 
|---|
| 2393 | BUG_ON(tcp_snd_cwnd(tp) <= in_flight); | 
|---|
| 2394 |  | 
|---|
| 2395 | send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 
|---|
| 2396 |  | 
|---|
| 2397 | /* From in_flight test above, we know that cwnd > in_flight.  */ | 
|---|
| 2398 | cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; | 
|---|
| 2399 |  | 
|---|
| 2400 | limit = min(send_win, cong_win); | 
|---|
| 2401 |  | 
|---|
| 2402 | /* If a full-sized TSO skb can be sent, do it. */ | 
|---|
| 2403 | if (limit >= max_segs * tp->mss_cache) | 
|---|
| 2404 | goto send_now; | 
|---|
| 2405 |  | 
|---|
| 2406 | /* Middle in queue won't get any more data, full sendable already? */ | 
|---|
| 2407 | if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) | 
|---|
| 2408 | goto send_now; | 
|---|
| 2409 |  | 
|---|
| 2410 | win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); | 
|---|
| 2411 | if (win_divisor) { | 
|---|
| 2412 | u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); | 
|---|
| 2413 |  | 
|---|
| 2414 | /* If at least some fraction of a window is available, | 
|---|
| 2415 | * just use it. | 
|---|
| 2416 | */ | 
|---|
| 2417 | chunk /= win_divisor; | 
|---|
| 2418 | if (limit >= chunk) | 
|---|
| 2419 | goto send_now; | 
|---|
| 2420 | } else { | 
|---|
| 2421 | /* Different approach, try not to defer past a single | 
|---|
| 2422 | * ACK.  Receiver should ACK every other full sized | 
|---|
| 2423 | * frame, so if we have space for more than 3 frames | 
|---|
| 2424 | * then send now. | 
|---|
| 2425 | */ | 
|---|
| 2426 | if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) | 
|---|
| 2427 | goto send_now; | 
|---|
| 2428 | } | 
|---|
| 2429 |  | 
|---|
| 2430 | /* TODO : use tsorted_sent_queue ? */ | 
|---|
| 2431 | head = tcp_rtx_queue_head(sk); | 
|---|
| 2432 | if (!head) | 
|---|
| 2433 | goto send_now; | 
|---|
| 2434 | delta = tp->tcp_clock_cache - head->tstamp; | 
|---|
| 2435 | /* If next ACK is likely to come too late (half srtt), do not defer */ | 
|---|
| 2436 | if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) | 
|---|
| 2437 | goto send_now; | 
|---|
| 2438 |  | 
|---|
| 2439 | /* Ok, it looks like it is advisable to defer. | 
|---|
| 2440 | * Three cases are tracked : | 
|---|
| 2441 | * 1) We are cwnd-limited | 
|---|
| 2442 | * 2) We are rwnd-limited | 
|---|
| 2443 | * 3) We are application limited. | 
|---|
| 2444 | */ | 
|---|
| 2445 | if (cong_win < send_win) { | 
|---|
| 2446 | if (cong_win <= skb->len) { | 
|---|
| 2447 | *is_cwnd_limited = true; | 
|---|
| 2448 | return true; | 
|---|
| 2449 | } | 
|---|
| 2450 | } else { | 
|---|
| 2451 | if (send_win <= skb->len) { | 
|---|
| 2452 | *is_rwnd_limited = true; | 
|---|
| 2453 | return true; | 
|---|
| 2454 | } | 
|---|
| 2455 | } | 
|---|
| 2456 |  | 
|---|
| 2457 | /* If this packet won't get more data, do not wait. */ | 
|---|
| 2458 | if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || | 
|---|
| 2459 | TCP_SKB_CB(skb)->eor) | 
|---|
| 2460 | goto send_now; | 
|---|
| 2461 |  | 
|---|
| 2462 | return true; | 
|---|
| 2463 |  | 
|---|
| 2464 | send_now: | 
|---|
| 2465 | return false; | 
|---|
| 2466 | } | 
|---|
| 2467 |  | 
|---|
| 2468 | static inline void tcp_mtu_check_reprobe(struct sock *sk) | 
|---|
| 2469 | { | 
|---|
| 2470 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 2471 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2472 | struct net *net = sock_net(sk); | 
|---|
| 2473 | u32 interval; | 
|---|
| 2474 | s32 delta; | 
|---|
| 2475 |  | 
|---|
| 2476 | interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); | 
|---|
| 2477 | delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; | 
|---|
| 2478 | if (unlikely(delta >= interval * HZ)) { | 
|---|
| 2479 | int mss = tcp_current_mss(sk); | 
|---|
| 2480 |  | 
|---|
| 2481 | /* Update current search range */ | 
|---|
| 2482 | icsk->icsk_mtup.probe_size = 0; | 
|---|
| 2483 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + | 
|---|
| 2484 | sizeof(struct tcphdr) + | 
|---|
| 2485 | icsk->icsk_af_ops->net_header_len; | 
|---|
| 2486 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); | 
|---|
| 2487 |  | 
|---|
| 2488 | /* Update probe time stamp */ | 
|---|
| 2489 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; | 
|---|
| 2490 | } | 
|---|
| 2491 | } | 
|---|
| 2492 |  | 
|---|
| 2493 | static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) | 
|---|
| 2494 | { | 
|---|
| 2495 | struct sk_buff *skb, *next; | 
|---|
| 2496 |  | 
|---|
| 2497 | skb = tcp_send_head(sk); | 
|---|
| 2498 | tcp_for_write_queue_from_safe(skb, next, sk) { | 
|---|
| 2499 | if (len <= skb->len) | 
|---|
| 2500 | break; | 
|---|
| 2501 |  | 
|---|
| 2502 | if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(to: skb, from: next)) | 
|---|
| 2503 | return false; | 
|---|
| 2504 |  | 
|---|
| 2505 | len -= skb->len; | 
|---|
| 2506 | } | 
|---|
| 2507 |  | 
|---|
| 2508 | return true; | 
|---|
| 2509 | } | 
|---|
| 2510 |  | 
|---|
| 2511 | static int tcp_clone_payload(struct sock *sk, struct sk_buff *to, | 
|---|
| 2512 | int probe_size) | 
|---|
| 2513 | { | 
|---|
| 2514 | skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; | 
|---|
| 2515 | int i, todo, len = 0, nr_frags = 0; | 
|---|
| 2516 | const struct sk_buff *skb; | 
|---|
| 2517 |  | 
|---|
| 2518 | if (!sk_wmem_schedule(sk, size: to->truesize + probe_size)) | 
|---|
| 2519 | return -ENOMEM; | 
|---|
| 2520 |  | 
|---|
| 2521 | skb_queue_walk(&sk->sk_write_queue, skb) { | 
|---|
| 2522 | const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; | 
|---|
| 2523 |  | 
|---|
| 2524 | if (skb_headlen(skb)) | 
|---|
| 2525 | return -EINVAL; | 
|---|
| 2526 |  | 
|---|
| 2527 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { | 
|---|
| 2528 | if (len >= probe_size) | 
|---|
| 2529 | goto commit; | 
|---|
| 2530 | todo = min_t(int, skb_frag_size(fragfrom), | 
|---|
| 2531 | probe_size - len); | 
|---|
| 2532 | len += todo; | 
|---|
| 2533 | if (lastfrag && | 
|---|
| 2534 | skb_frag_page(frag: fragfrom) == skb_frag_page(frag: lastfrag) && | 
|---|
| 2535 | skb_frag_off(frag: fragfrom) == skb_frag_off(frag: lastfrag) + | 
|---|
| 2536 | skb_frag_size(frag: lastfrag)) { | 
|---|
| 2537 | skb_frag_size_add(frag: lastfrag, delta: todo); | 
|---|
| 2538 | continue; | 
|---|
| 2539 | } | 
|---|
| 2540 | if (unlikely(nr_frags == MAX_SKB_FRAGS)) | 
|---|
| 2541 | return -E2BIG; | 
|---|
| 2542 | skb_frag_page_copy(fragto, fragfrom); | 
|---|
| 2543 | skb_frag_off_copy(fragto, fragfrom); | 
|---|
| 2544 | skb_frag_size_set(frag: fragto, size: todo); | 
|---|
| 2545 | nr_frags++; | 
|---|
| 2546 | lastfrag = fragto++; | 
|---|
| 2547 | } | 
|---|
| 2548 | } | 
|---|
| 2549 | commit: | 
|---|
| 2550 | WARN_ON_ONCE(len != probe_size); | 
|---|
| 2551 | for (i = 0; i < nr_frags; i++) | 
|---|
| 2552 | skb_frag_ref(skb: to, f: i); | 
|---|
| 2553 |  | 
|---|
| 2554 | skb_shinfo(to)->nr_frags = nr_frags; | 
|---|
| 2555 | to->truesize += probe_size; | 
|---|
| 2556 | to->len += probe_size; | 
|---|
| 2557 | to->data_len += probe_size; | 
|---|
| 2558 | __skb_header_release(skb: to); | 
|---|
| 2559 | return 0; | 
|---|
| 2560 | } | 
|---|
| 2561 |  | 
|---|
| 2562 | /* tcp_mtu_probe() and tcp_grow_skb() can both eat an skb (src) if | 
|---|
| 2563 | * all its payload was moved to another one (dst). | 
|---|
| 2564 | * Make sure to transfer tcp_flags, eor, and tstamp. | 
|---|
| 2565 | */ | 
|---|
| 2566 | static void tcp_eat_one_skb(struct sock *sk, | 
|---|
| 2567 | struct sk_buff *dst, | 
|---|
| 2568 | struct sk_buff *src) | 
|---|
| 2569 | { | 
|---|
| 2570 | TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags; | 
|---|
| 2571 | TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor; | 
|---|
| 2572 | tcp_skb_collapse_tstamp(skb: dst, next_skb: src); | 
|---|
| 2573 | tcp_unlink_write_queue(skb: src, sk); | 
|---|
| 2574 | tcp_wmem_free_skb(sk, skb: src); | 
|---|
| 2575 | } | 
|---|
| 2576 |  | 
|---|
| 2577 | /* Create a new MTU probe if we are ready. | 
|---|
| 2578 | * MTU probe is regularly attempting to increase the path MTU by | 
|---|
| 2579 | * deliberately sending larger packets.  This discovers routing | 
|---|
| 2580 | * changes resulting in larger path MTUs. | 
|---|
| 2581 | * | 
|---|
| 2582 | * Returns 0 if we should wait to probe (no cwnd available), | 
|---|
| 2583 | *         1 if a probe was sent, | 
|---|
| 2584 | *         -1 otherwise | 
|---|
| 2585 | */ | 
|---|
| 2586 | static int tcp_mtu_probe(struct sock *sk) | 
|---|
| 2587 | { | 
|---|
| 2588 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 2589 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2590 | struct sk_buff *skb, *nskb, *next; | 
|---|
| 2591 | struct net *net = sock_net(sk); | 
|---|
| 2592 | int probe_size; | 
|---|
| 2593 | int size_needed; | 
|---|
| 2594 | int copy, len; | 
|---|
| 2595 | int mss_now; | 
|---|
| 2596 | int interval; | 
|---|
| 2597 |  | 
|---|
| 2598 | /* Not currently probing/verifying, | 
|---|
| 2599 | * not in recovery, | 
|---|
| 2600 | * have enough cwnd, and | 
|---|
| 2601 | * not SACKing (the variable headers throw things off) | 
|---|
| 2602 | */ | 
|---|
| 2603 | if (likely(!icsk->icsk_mtup.enabled || | 
|---|
| 2604 | icsk->icsk_mtup.probe_size || | 
|---|
| 2605 | inet_csk(sk)->icsk_ca_state != TCP_CA_Open || | 
|---|
| 2606 | tcp_snd_cwnd(tp) < 11 || | 
|---|
| 2607 | tp->rx_opt.num_sacks || tp->rx_opt.dsack)) | 
|---|
| 2608 | return -1; | 
|---|
| 2609 |  | 
|---|
| 2610 | /* Use binary search for probe_size between tcp_mss_base, | 
|---|
| 2611 | * and current mss_clamp. if (search_high - search_low) | 
|---|
| 2612 | * smaller than a threshold, backoff from probing. | 
|---|
| 2613 | */ | 
|---|
| 2614 | mss_now = tcp_current_mss(sk); | 
|---|
| 2615 | probe_size = tcp_mtu_to_mss(sk, pmtu: (icsk->icsk_mtup.search_high + | 
|---|
| 2616 | icsk->icsk_mtup.search_low) >> 1); | 
|---|
| 2617 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; | 
|---|
| 2618 | interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; | 
|---|
| 2619 | /* When misfortune happens, we are reprobing actively, | 
|---|
| 2620 | * and then reprobe timer has expired. We stick with current | 
|---|
| 2621 | * probing process by not resetting search range to its orignal. | 
|---|
| 2622 | */ | 
|---|
| 2623 | if (probe_size > tcp_mtu_to_mss(sk, pmtu: icsk->icsk_mtup.search_high) || | 
|---|
| 2624 | interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { | 
|---|
| 2625 | /* Check whether enough time has elaplased for | 
|---|
| 2626 | * another round of probing. | 
|---|
| 2627 | */ | 
|---|
| 2628 | tcp_mtu_check_reprobe(sk); | 
|---|
| 2629 | return -1; | 
|---|
| 2630 | } | 
|---|
| 2631 |  | 
|---|
| 2632 | /* Have enough data in the send queue to probe? */ | 
|---|
| 2633 | if (tp->write_seq - tp->snd_nxt < size_needed) | 
|---|
| 2634 | return -1; | 
|---|
| 2635 |  | 
|---|
| 2636 | if (tp->snd_wnd < size_needed) | 
|---|
| 2637 | return -1; | 
|---|
| 2638 | if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) | 
|---|
| 2639 | return 0; | 
|---|
| 2640 |  | 
|---|
| 2641 | /* Do we need to wait to drain cwnd? With none in flight, don't stall */ | 
|---|
| 2642 | if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) { | 
|---|
| 2643 | if (!tcp_packets_in_flight(tp)) | 
|---|
| 2644 | return -1; | 
|---|
| 2645 | else | 
|---|
| 2646 | return 0; | 
|---|
| 2647 | } | 
|---|
| 2648 |  | 
|---|
| 2649 | if (!tcp_can_coalesce_send_queue_head(sk, len: probe_size)) | 
|---|
| 2650 | return -1; | 
|---|
| 2651 |  | 
|---|
| 2652 | /* We're allowed to probe.  Build it now. */ | 
|---|
| 2653 | nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, force_schedule: false); | 
|---|
| 2654 | if (!nskb) | 
|---|
| 2655 | return -1; | 
|---|
| 2656 |  | 
|---|
| 2657 | /* build the payload, and be prepared to abort if this fails. */ | 
|---|
| 2658 | if (tcp_clone_payload(sk, to: nskb, probe_size)) { | 
|---|
| 2659 | tcp_skb_tsorted_anchor_cleanup(skb: nskb); | 
|---|
| 2660 | consume_skb(skb: nskb); | 
|---|
| 2661 | return -1; | 
|---|
| 2662 | } | 
|---|
| 2663 | sk_wmem_queued_add(sk, val: nskb->truesize); | 
|---|
| 2664 | sk_mem_charge(sk, size: nskb->truesize); | 
|---|
| 2665 |  | 
|---|
| 2666 | skb = tcp_send_head(sk); | 
|---|
| 2667 | skb_copy_decrypted(to: nskb, from: skb); | 
|---|
| 2668 | mptcp_skb_ext_copy(to: nskb, from: skb); | 
|---|
| 2669 |  | 
|---|
| 2670 | TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; | 
|---|
| 2671 | TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; | 
|---|
| 2672 | TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; | 
|---|
| 2673 |  | 
|---|
| 2674 | tcp_insert_write_queue_before(new: nskb, skb, sk); | 
|---|
| 2675 | tcp_highest_sack_replace(sk, old: skb, new: nskb); | 
|---|
| 2676 |  | 
|---|
| 2677 | len = 0; | 
|---|
| 2678 | tcp_for_write_queue_from_safe(skb, next, sk) { | 
|---|
| 2679 | copy = min_t(int, skb->len, probe_size - len); | 
|---|
| 2680 |  | 
|---|
| 2681 | if (skb->len <= copy) { | 
|---|
| 2682 | tcp_eat_one_skb(sk, dst: nskb, src: skb); | 
|---|
| 2683 | } else { | 
|---|
| 2684 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & | 
|---|
| 2685 | ~(TCPHDR_FIN|TCPHDR_PSH); | 
|---|
| 2686 | __pskb_trim_head(skb, len: copy); | 
|---|
| 2687 | tcp_set_skb_tso_segs(skb, mss_now); | 
|---|
| 2688 | TCP_SKB_CB(skb)->seq += copy; | 
|---|
| 2689 | } | 
|---|
| 2690 |  | 
|---|
| 2691 | len += copy; | 
|---|
| 2692 |  | 
|---|
| 2693 | if (len >= probe_size) | 
|---|
| 2694 | break; | 
|---|
| 2695 | } | 
|---|
| 2696 | tcp_init_tso_segs(skb: nskb, mss_now: nskb->len); | 
|---|
| 2697 |  | 
|---|
| 2698 | /* We're ready to send.  If this fails, the probe will | 
|---|
| 2699 | * be resegmented into mss-sized pieces by tcp_write_xmit(). | 
|---|
| 2700 | */ | 
|---|
| 2701 | if (!tcp_transmit_skb(sk, skb: nskb, clone_it: 1, GFP_ATOMIC)) { | 
|---|
| 2702 | /* Decrement cwnd here because we are sending | 
|---|
| 2703 | * effectively two packets. */ | 
|---|
| 2704 | tcp_snd_cwnd_set(tp, val: tcp_snd_cwnd(tp) - 1); | 
|---|
| 2705 | tcp_event_new_data_sent(sk, skb: nskb); | 
|---|
| 2706 |  | 
|---|
| 2707 | icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); | 
|---|
| 2708 | tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; | 
|---|
| 2709 | tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; | 
|---|
| 2710 |  | 
|---|
| 2711 | return 1; | 
|---|
| 2712 | } | 
|---|
| 2713 |  | 
|---|
| 2714 | return -1; | 
|---|
| 2715 | } | 
|---|
| 2716 |  | 
|---|
| 2717 | static bool tcp_pacing_check(struct sock *sk) | 
|---|
| 2718 | { | 
|---|
| 2719 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2720 |  | 
|---|
| 2721 | if (!tcp_needs_internal_pacing(sk)) | 
|---|
| 2722 | return false; | 
|---|
| 2723 |  | 
|---|
| 2724 | if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) | 
|---|
| 2725 | return false; | 
|---|
| 2726 |  | 
|---|
| 2727 | if (!hrtimer_is_queued(timer: &tp->pacing_timer)) { | 
|---|
| 2728 | hrtimer_start(timer: &tp->pacing_timer, | 
|---|
| 2729 | tim: ns_to_ktime(ns: tp->tcp_wstamp_ns), | 
|---|
| 2730 | mode: HRTIMER_MODE_ABS_PINNED_SOFT); | 
|---|
| 2731 | sock_hold(sk); | 
|---|
| 2732 | } | 
|---|
| 2733 | return true; | 
|---|
| 2734 | } | 
|---|
| 2735 |  | 
|---|
| 2736 | static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk) | 
|---|
| 2737 | { | 
|---|
| 2738 | const struct rb_node *node = sk->tcp_rtx_queue.rb_node; | 
|---|
| 2739 |  | 
|---|
| 2740 | /* No skb in the rtx queue. */ | 
|---|
| 2741 | if (!node) | 
|---|
| 2742 | return true; | 
|---|
| 2743 |  | 
|---|
| 2744 | /* Only one skb in rtx queue. */ | 
|---|
| 2745 | return !node->rb_left && !node->rb_right; | 
|---|
| 2746 | } | 
|---|
| 2747 |  | 
|---|
| 2748 | /* TCP Small Queues : | 
|---|
| 2749 | * Control number of packets in qdisc/devices to two packets / or ~1 ms. | 
|---|
| 2750 | * (These limits are doubled for retransmits) | 
|---|
| 2751 | * This allows for : | 
|---|
| 2752 | *  - better RTT estimation and ACK scheduling | 
|---|
| 2753 | *  - faster recovery | 
|---|
| 2754 | *  - high rates | 
|---|
| 2755 | * Alas, some drivers / subsystems require a fair amount | 
|---|
| 2756 | * of queued bytes to ensure line rate. | 
|---|
| 2757 | * One example is wifi aggregation (802.11 AMPDU) | 
|---|
| 2758 | */ | 
|---|
| 2759 | static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, | 
|---|
| 2760 | unsigned int factor) | 
|---|
| 2761 | { | 
|---|
| 2762 | unsigned long limit; | 
|---|
| 2763 |  | 
|---|
| 2764 | limit = max_t(unsigned long, | 
|---|
| 2765 | 2 * skb->truesize, | 
|---|
| 2766 | READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift)); | 
|---|
| 2767 | limit = min_t(unsigned long, limit, | 
|---|
| 2768 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); | 
|---|
| 2769 | limit <<= factor; | 
|---|
| 2770 |  | 
|---|
| 2771 | if (static_branch_unlikely(&tcp_tx_delay_enabled) && | 
|---|
| 2772 | tcp_sk(sk)->tcp_tx_delay) { | 
|---|
| 2773 | u64  = (u64)READ_ONCE(sk->sk_pacing_rate) * | 
|---|
| 2774 | tcp_sk(sk)->tcp_tx_delay; | 
|---|
| 2775 |  | 
|---|
| 2776 | /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we | 
|---|
| 2777 | * approximate our needs assuming an ~100% skb->truesize overhead. | 
|---|
| 2778 | * USEC_PER_SEC is approximated by 2^20. | 
|---|
| 2779 | * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. | 
|---|
| 2780 | */ | 
|---|
| 2781 | extra_bytes >>= (20 - 1); | 
|---|
| 2782 | limit += extra_bytes; | 
|---|
| 2783 | } | 
|---|
| 2784 | if (refcount_read(r: &sk->sk_wmem_alloc) > limit) { | 
|---|
| 2785 | /* Always send skb if rtx queue is empty or has one skb. | 
|---|
| 2786 | * No need to wait for TX completion to call us back, | 
|---|
| 2787 | * after softirq schedule. | 
|---|
| 2788 | * This helps when TX completions are delayed too much. | 
|---|
| 2789 | */ | 
|---|
| 2790 | if (tcp_rtx_queue_empty_or_single_skb(sk)) | 
|---|
| 2791 | return false; | 
|---|
| 2792 |  | 
|---|
| 2793 | set_bit(nr: TSQ_THROTTLED, addr: &sk->sk_tsq_flags); | 
|---|
| 2794 | /* It is possible TX completion already happened | 
|---|
| 2795 | * before we set TSQ_THROTTLED, so we must | 
|---|
| 2796 | * test again the condition. | 
|---|
| 2797 | */ | 
|---|
| 2798 | smp_mb__after_atomic(); | 
|---|
| 2799 | if (refcount_read(r: &sk->sk_wmem_alloc) > limit) | 
|---|
| 2800 | return true; | 
|---|
| 2801 | } | 
|---|
| 2802 | return false; | 
|---|
| 2803 | } | 
|---|
| 2804 |  | 
|---|
| 2805 | static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) | 
|---|
| 2806 | { | 
|---|
| 2807 | const u32 now = tcp_jiffies32; | 
|---|
| 2808 | enum tcp_chrono old = tp->chrono_type; | 
|---|
| 2809 |  | 
|---|
| 2810 | if (old > TCP_CHRONO_UNSPEC) | 
|---|
| 2811 | tp->chrono_stat[old - 1] += now - tp->chrono_start; | 
|---|
| 2812 | tp->chrono_start = now; | 
|---|
| 2813 | tp->chrono_type = new; | 
|---|
| 2814 | } | 
|---|
| 2815 |  | 
|---|
| 2816 | void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) | 
|---|
| 2817 | { | 
|---|
| 2818 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2819 |  | 
|---|
| 2820 | /* If there are multiple conditions worthy of tracking in a | 
|---|
| 2821 | * chronograph then the highest priority enum takes precedence | 
|---|
| 2822 | * over the other conditions. So that if something "more interesting" | 
|---|
| 2823 | * starts happening, stop the previous chrono and start a new one. | 
|---|
| 2824 | */ | 
|---|
| 2825 | if (type > tp->chrono_type) | 
|---|
| 2826 | tcp_chrono_set(tp, new: type); | 
|---|
| 2827 | } | 
|---|
| 2828 |  | 
|---|
| 2829 | void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) | 
|---|
| 2830 | { | 
|---|
| 2831 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2832 |  | 
|---|
| 2833 |  | 
|---|
| 2834 | /* There are multiple conditions worthy of tracking in a | 
|---|
| 2835 | * chronograph, so that the highest priority enum takes | 
|---|
| 2836 | * precedence over the other conditions (see tcp_chrono_start). | 
|---|
| 2837 | * If a condition stops, we only stop chrono tracking if | 
|---|
| 2838 | * it's the "most interesting" or current chrono we are | 
|---|
| 2839 | * tracking and starts busy chrono if we have pending data. | 
|---|
| 2840 | */ | 
|---|
| 2841 | if (tcp_rtx_and_write_queues_empty(sk)) | 
|---|
| 2842 | tcp_chrono_set(tp, new: TCP_CHRONO_UNSPEC); | 
|---|
| 2843 | else if (type == tp->chrono_type) | 
|---|
| 2844 | tcp_chrono_set(tp, new: TCP_CHRONO_BUSY); | 
|---|
| 2845 | } | 
|---|
| 2846 |  | 
|---|
| 2847 | /* First skb in the write queue is smaller than ideal packet size. | 
|---|
| 2848 | * Check if we can move payload from the second skb in the queue. | 
|---|
| 2849 | */ | 
|---|
| 2850 | static void tcp_grow_skb(struct sock *sk, struct sk_buff *skb, int amount) | 
|---|
| 2851 | { | 
|---|
| 2852 | struct sk_buff *next_skb = skb->next; | 
|---|
| 2853 | unsigned int nlen; | 
|---|
| 2854 |  | 
|---|
| 2855 | if (tcp_skb_is_last(sk, skb)) | 
|---|
| 2856 | return; | 
|---|
| 2857 |  | 
|---|
| 2858 | if (!tcp_skb_can_collapse(to: skb, from: next_skb)) | 
|---|
| 2859 | return; | 
|---|
| 2860 |  | 
|---|
| 2861 | nlen = min_t(u32, amount, next_skb->len); | 
|---|
| 2862 | if (!nlen || !skb_shift(tgt: skb, skb: next_skb, shiftlen: nlen)) | 
|---|
| 2863 | return; | 
|---|
| 2864 |  | 
|---|
| 2865 | TCP_SKB_CB(skb)->end_seq += nlen; | 
|---|
| 2866 | TCP_SKB_CB(next_skb)->seq += nlen; | 
|---|
| 2867 |  | 
|---|
| 2868 | if (!next_skb->len) { | 
|---|
| 2869 | /* In case FIN is set, we need to update end_seq */ | 
|---|
| 2870 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; | 
|---|
| 2871 |  | 
|---|
| 2872 | tcp_eat_one_skb(sk, dst: skb, src: next_skb); | 
|---|
| 2873 | } | 
|---|
| 2874 | } | 
|---|
| 2875 |  | 
|---|
| 2876 | /* This routine writes packets to the network.  It advances the | 
|---|
| 2877 | * send_head.  This happens as incoming acks open up the remote | 
|---|
| 2878 | * window for us. | 
|---|
| 2879 | * | 
|---|
| 2880 | * LARGESEND note: !tcp_urg_mode is overkill, only frames between | 
|---|
| 2881 | * snd_up-64k-mss .. snd_up cannot be large. However, taking into | 
|---|
| 2882 | * account rare use of URG, this is not a big flaw. | 
|---|
| 2883 | * | 
|---|
| 2884 | * Send at most one packet when push_one > 0. Temporarily ignore | 
|---|
| 2885 | * cwnd limit to force at most one packet out when push_one == 2. | 
|---|
| 2886 |  | 
|---|
| 2887 | * Returns true, if no segments are in flight and we have queued segments, | 
|---|
| 2888 | * but cannot send anything now because of SWS or another problem. | 
|---|
| 2889 | */ | 
|---|
| 2890 | static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | 
|---|
| 2891 | int push_one, gfp_t gfp) | 
|---|
| 2892 | { | 
|---|
| 2893 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 2894 | struct sk_buff *skb; | 
|---|
| 2895 | unsigned int tso_segs, sent_pkts; | 
|---|
| 2896 | u32 cwnd_quota, max_segs; | 
|---|
| 2897 | int result; | 
|---|
| 2898 | bool is_cwnd_limited = false, is_rwnd_limited = false; | 
|---|
| 2899 |  | 
|---|
| 2900 | sent_pkts = 0; | 
|---|
| 2901 |  | 
|---|
| 2902 | tcp_mstamp_refresh(tp); | 
|---|
| 2903 |  | 
|---|
| 2904 | /* AccECN option beacon depends on mstamp, it may change mss */ | 
|---|
| 2905 | if (tcp_ecn_mode_accecn(tp) && tcp_accecn_option_beacon_check(sk)) | 
|---|
| 2906 | mss_now = tcp_current_mss(sk); | 
|---|
| 2907 |  | 
|---|
| 2908 | if (!push_one) { | 
|---|
| 2909 | /* Do MTU probing. */ | 
|---|
| 2910 | result = tcp_mtu_probe(sk); | 
|---|
| 2911 | if (!result) { | 
|---|
| 2912 | return false; | 
|---|
| 2913 | } else if (result > 0) { | 
|---|
| 2914 | sent_pkts = 1; | 
|---|
| 2915 | } | 
|---|
| 2916 | } | 
|---|
| 2917 |  | 
|---|
| 2918 | max_segs = tcp_tso_segs(sk, mss_now); | 
|---|
| 2919 | while ((skb = tcp_send_head(sk))) { | 
|---|
| 2920 | unsigned int limit; | 
|---|
| 2921 | int missing_bytes; | 
|---|
| 2922 |  | 
|---|
| 2923 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { | 
|---|
| 2924 | /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ | 
|---|
| 2925 | tp->tcp_wstamp_ns = tp->tcp_clock_cache; | 
|---|
| 2926 | skb_set_delivery_time(skb, kt: tp->tcp_wstamp_ns, tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 2927 | list_move_tail(list: &skb->tcp_tsorted_anchor, head: &tp->tsorted_sent_queue); | 
|---|
| 2928 | tcp_init_tso_segs(skb, mss_now); | 
|---|
| 2929 | goto repair; /* Skip network transmission */ | 
|---|
| 2930 | } | 
|---|
| 2931 |  | 
|---|
| 2932 | if (tcp_pacing_check(sk)) | 
|---|
| 2933 | break; | 
|---|
| 2934 |  | 
|---|
| 2935 | cwnd_quota = tcp_cwnd_test(tp); | 
|---|
| 2936 | if (!cwnd_quota) { | 
|---|
| 2937 | if (push_one == 2) | 
|---|
| 2938 | /* Force out a loss probe pkt. */ | 
|---|
| 2939 | cwnd_quota = 1; | 
|---|
| 2940 | else | 
|---|
| 2941 | break; | 
|---|
| 2942 | } | 
|---|
| 2943 | cwnd_quota = min(cwnd_quota, max_segs); | 
|---|
| 2944 | missing_bytes = cwnd_quota * mss_now - skb->len; | 
|---|
| 2945 | if (missing_bytes > 0) | 
|---|
| 2946 | tcp_grow_skb(sk, skb, amount: missing_bytes); | 
|---|
| 2947 |  | 
|---|
| 2948 | tso_segs = tcp_set_skb_tso_segs(skb, mss_now); | 
|---|
| 2949 |  | 
|---|
| 2950 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { | 
|---|
| 2951 | is_rwnd_limited = true; | 
|---|
| 2952 | break; | 
|---|
| 2953 | } | 
|---|
| 2954 |  | 
|---|
| 2955 | if (tso_segs == 1) { | 
|---|
| 2956 | if (unlikely(!tcp_nagle_test(tp, skb, mss_now, | 
|---|
| 2957 | (tcp_skb_is_last(sk, skb) ? | 
|---|
| 2958 | nonagle : TCP_NAGLE_PUSH)))) | 
|---|
| 2959 | break; | 
|---|
| 2960 | } else { | 
|---|
| 2961 | if (!push_one && | 
|---|
| 2962 | tcp_tso_should_defer(sk, skb, is_cwnd_limited: &is_cwnd_limited, | 
|---|
| 2963 | is_rwnd_limited: &is_rwnd_limited, max_segs)) | 
|---|
| 2964 | break; | 
|---|
| 2965 | } | 
|---|
| 2966 |  | 
|---|
| 2967 | limit = mss_now; | 
|---|
| 2968 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 
|---|
| 2969 | limit = tcp_mss_split_point(sk, skb, mss_now, | 
|---|
| 2970 | max_segs: cwnd_quota, | 
|---|
| 2971 | nonagle); | 
|---|
| 2972 |  | 
|---|
| 2973 | if (skb->len > limit && | 
|---|
| 2974 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 
|---|
| 2975 | break; | 
|---|
| 2976 |  | 
|---|
| 2977 | if (tcp_small_queue_check(sk, skb, factor: 0)) | 
|---|
| 2978 | break; | 
|---|
| 2979 |  | 
|---|
| 2980 | /* Argh, we hit an empty skb(), presumably a thread | 
|---|
| 2981 | * is sleeping in sendmsg()/sk_stream_wait_memory(). | 
|---|
| 2982 | * We do not want to send a pure-ack packet and have | 
|---|
| 2983 | * a strange looking rtx queue with empty packet(s). | 
|---|
| 2984 | */ | 
|---|
| 2985 | if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) | 
|---|
| 2986 | break; | 
|---|
| 2987 |  | 
|---|
| 2988 | if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) | 
|---|
| 2989 | break; | 
|---|
| 2990 |  | 
|---|
| 2991 | repair: | 
|---|
| 2992 | /* Advance the send_head.  This one is sent out. | 
|---|
| 2993 | * This call will increment packets_out. | 
|---|
| 2994 | */ | 
|---|
| 2995 | tcp_event_new_data_sent(sk, skb); | 
|---|
| 2996 |  | 
|---|
| 2997 | tcp_minshall_update(tp, mss_now, skb); | 
|---|
| 2998 | sent_pkts += tcp_skb_pcount(skb); | 
|---|
| 2999 |  | 
|---|
| 3000 | if (push_one) | 
|---|
| 3001 | break; | 
|---|
| 3002 | } | 
|---|
| 3003 |  | 
|---|
| 3004 | if (is_rwnd_limited) | 
|---|
| 3005 | tcp_chrono_start(sk, type: TCP_CHRONO_RWND_LIMITED); | 
|---|
| 3006 | else | 
|---|
| 3007 | tcp_chrono_stop(sk, type: TCP_CHRONO_RWND_LIMITED); | 
|---|
| 3008 |  | 
|---|
| 3009 | is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)); | 
|---|
| 3010 | if (likely(sent_pkts || is_cwnd_limited)) | 
|---|
| 3011 | tcp_cwnd_validate(sk, is_cwnd_limited); | 
|---|
| 3012 |  | 
|---|
| 3013 | if (likely(sent_pkts)) { | 
|---|
| 3014 | if (tcp_in_cwnd_reduction(sk)) | 
|---|
| 3015 | tp->prr_out += sent_pkts; | 
|---|
| 3016 |  | 
|---|
| 3017 | /* Send one loss probe per tail loss episode. */ | 
|---|
| 3018 | if (push_one != 2) | 
|---|
| 3019 | tcp_schedule_loss_probe(sk, advancing_rto: false); | 
|---|
| 3020 | return false; | 
|---|
| 3021 | } | 
|---|
| 3022 | return !tp->packets_out && !tcp_write_queue_empty(sk); | 
|---|
| 3023 | } | 
|---|
| 3024 |  | 
|---|
| 3025 | bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) | 
|---|
| 3026 | { | 
|---|
| 3027 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 3028 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3029 | u32 timeout, timeout_us, rto_delta_us; | 
|---|
| 3030 | int early_retrans; | 
|---|
| 3031 |  | 
|---|
| 3032 | /* Don't do any loss probe on a Fast Open connection before 3WHS | 
|---|
| 3033 | * finishes. | 
|---|
| 3034 | */ | 
|---|
| 3035 | if (rcu_access_pointer(tp->fastopen_rsk)) | 
|---|
| 3036 | return false; | 
|---|
| 3037 |  | 
|---|
| 3038 | early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); | 
|---|
| 3039 | /* Schedule a loss probe in 2*RTT for SACK capable connections | 
|---|
| 3040 | * not in loss recovery, that are either limited by cwnd or application. | 
|---|
| 3041 | */ | 
|---|
| 3042 | if ((early_retrans != 3 && early_retrans != 4) || | 
|---|
| 3043 | !tp->packets_out || !tcp_is_sack(tp) || | 
|---|
| 3044 | (icsk->icsk_ca_state != TCP_CA_Open && | 
|---|
| 3045 | icsk->icsk_ca_state != TCP_CA_CWR)) | 
|---|
| 3046 | return false; | 
|---|
| 3047 |  | 
|---|
| 3048 | /* Probe timeout is 2*rtt. Add minimum RTO to account | 
|---|
| 3049 | * for delayed ack when there's one outstanding packet. If no RTT | 
|---|
| 3050 | * sample is available then probe after TCP_TIMEOUT_INIT. | 
|---|
| 3051 | */ | 
|---|
| 3052 | if (tp->srtt_us) { | 
|---|
| 3053 | timeout_us = tp->srtt_us >> 2; | 
|---|
| 3054 | if (tp->packets_out == 1) | 
|---|
| 3055 | timeout_us += tcp_rto_min_us(sk); | 
|---|
| 3056 | else | 
|---|
| 3057 | timeout_us += TCP_TIMEOUT_MIN_US; | 
|---|
| 3058 | timeout = usecs_to_jiffies(u: timeout_us); | 
|---|
| 3059 | } else { | 
|---|
| 3060 | timeout = TCP_TIMEOUT_INIT; | 
|---|
| 3061 | } | 
|---|
| 3062 |  | 
|---|
| 3063 | /* If the RTO formula yields an earlier time, then use that time. */ | 
|---|
| 3064 | rto_delta_us = advancing_rto ? | 
|---|
| 3065 | jiffies_to_usecs(inet_csk(sk)->icsk_rto) : | 
|---|
| 3066 | tcp_rto_delta_us(sk);  /* How far in future is RTO? */ | 
|---|
| 3067 | if (rto_delta_us > 0) | 
|---|
| 3068 | timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); | 
|---|
| 3069 |  | 
|---|
| 3070 | tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, when: timeout, pace_delay: true); | 
|---|
| 3071 | return true; | 
|---|
| 3072 | } | 
|---|
| 3073 |  | 
|---|
| 3074 | /* Thanks to skb fast clones, we can detect if a prior transmit of | 
|---|
| 3075 | * a packet is still in a qdisc or driver queue. | 
|---|
| 3076 | * In this case, there is very little point doing a retransmit ! | 
|---|
| 3077 | */ | 
|---|
| 3078 | static bool skb_still_in_host_queue(struct sock *sk, | 
|---|
| 3079 | const struct sk_buff *skb) | 
|---|
| 3080 | { | 
|---|
| 3081 | if (unlikely(skb_fclone_busy(sk, skb))) { | 
|---|
| 3082 | set_bit(nr: TSQ_THROTTLED, addr: &sk->sk_tsq_flags); | 
|---|
| 3083 | smp_mb__after_atomic(); | 
|---|
| 3084 | if (skb_fclone_busy(sk, skb)) { | 
|---|
| 3085 | NET_INC_STATS(sock_net(sk), | 
|---|
| 3086 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); | 
|---|
| 3087 | return true; | 
|---|
| 3088 | } | 
|---|
| 3089 | } | 
|---|
| 3090 | return false; | 
|---|
| 3091 | } | 
|---|
| 3092 |  | 
|---|
| 3093 | /* When probe timeout (PTO) fires, try send a new segment if possible, else | 
|---|
| 3094 | * retransmit the last segment. | 
|---|
| 3095 | */ | 
|---|
| 3096 | void tcp_send_loss_probe(struct sock *sk) | 
|---|
| 3097 | { | 
|---|
| 3098 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3099 | struct sk_buff *skb; | 
|---|
| 3100 | int pcount; | 
|---|
| 3101 | int mss = tcp_current_mss(sk); | 
|---|
| 3102 |  | 
|---|
| 3103 | /* At most one outstanding TLP */ | 
|---|
| 3104 | if (tp->tlp_high_seq) | 
|---|
| 3105 | goto rearm_timer; | 
|---|
| 3106 |  | 
|---|
| 3107 | tp->tlp_retrans = 0; | 
|---|
| 3108 | skb = tcp_send_head(sk); | 
|---|
| 3109 | if (skb && tcp_snd_wnd_test(tp, skb, cur_mss: mss)) { | 
|---|
| 3110 | pcount = tp->packets_out; | 
|---|
| 3111 | tcp_write_xmit(sk, mss_now: mss, TCP_NAGLE_OFF, push_one: 2, GFP_ATOMIC); | 
|---|
| 3112 | if (tp->packets_out > pcount) | 
|---|
| 3113 | goto probe_sent; | 
|---|
| 3114 | goto rearm_timer; | 
|---|
| 3115 | } | 
|---|
| 3116 | skb = skb_rb_last(&sk->tcp_rtx_queue); | 
|---|
| 3117 | if (unlikely(!skb)) { | 
|---|
| 3118 | tcp_warn_once(sk, cond: tp->packets_out, str: "invalid inflight: "); | 
|---|
| 3119 | smp_store_release(&inet_csk(sk)->icsk_pending, 0); | 
|---|
| 3120 | return; | 
|---|
| 3121 | } | 
|---|
| 3122 |  | 
|---|
| 3123 | if (skb_still_in_host_queue(sk, skb)) | 
|---|
| 3124 | goto rearm_timer; | 
|---|
| 3125 |  | 
|---|
| 3126 | pcount = tcp_skb_pcount(skb); | 
|---|
| 3127 | if (WARN_ON(!pcount)) | 
|---|
| 3128 | goto rearm_timer; | 
|---|
| 3129 |  | 
|---|
| 3130 | if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { | 
|---|
| 3131 | if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, | 
|---|
| 3132 | (pcount - 1) * mss, mss, | 
|---|
| 3133 | GFP_ATOMIC))) | 
|---|
| 3134 | goto rearm_timer; | 
|---|
| 3135 | skb = skb_rb_next(skb); | 
|---|
| 3136 | } | 
|---|
| 3137 |  | 
|---|
| 3138 | if (WARN_ON(!skb || !tcp_skb_pcount(skb))) | 
|---|
| 3139 | goto rearm_timer; | 
|---|
| 3140 |  | 
|---|
| 3141 | if (__tcp_retransmit_skb(sk, skb, segs: 1)) | 
|---|
| 3142 | goto rearm_timer; | 
|---|
| 3143 |  | 
|---|
| 3144 | tp->tlp_retrans = 1; | 
|---|
| 3145 |  | 
|---|
| 3146 | probe_sent: | 
|---|
| 3147 | /* Record snd_nxt for loss detection. */ | 
|---|
| 3148 | tp->tlp_high_seq = tp->snd_nxt; | 
|---|
| 3149 |  | 
|---|
| 3150 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); | 
|---|
| 3151 | /* Reset s.t. tcp_rearm_rto will restart timer from now */ | 
|---|
| 3152 | smp_store_release(&inet_csk(sk)->icsk_pending, 0); | 
|---|
| 3153 | rearm_timer: | 
|---|
| 3154 | tcp_rearm_rto(sk); | 
|---|
| 3155 | } | 
|---|
| 3156 |  | 
|---|
| 3157 | /* Push out any pending frames which were held back due to | 
|---|
| 3158 | * TCP_CORK or attempt at coalescing tiny packets. | 
|---|
| 3159 | * The socket must be locked by the caller. | 
|---|
| 3160 | */ | 
|---|
| 3161 | void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | 
|---|
| 3162 | int nonagle) | 
|---|
| 3163 | { | 
|---|
| 3164 | /* If we are closed, the bytes will have to remain here. | 
|---|
| 3165 | * In time closedown will finish, we empty the write queue and | 
|---|
| 3166 | * all will be happy. | 
|---|
| 3167 | */ | 
|---|
| 3168 | if (unlikely(sk->sk_state == TCP_CLOSE)) | 
|---|
| 3169 | return; | 
|---|
| 3170 |  | 
|---|
| 3171 | if (tcp_write_xmit(sk, mss_now: cur_mss, nonagle, push_one: 0, | 
|---|
| 3172 | gfp: sk_gfp_mask(sk, GFP_ATOMIC))) | 
|---|
| 3173 | tcp_check_probe_timer(sk); | 
|---|
| 3174 | } | 
|---|
| 3175 |  | 
|---|
| 3176 | /* Send _single_ skb sitting at the send head. This function requires | 
|---|
| 3177 | * true push pending frames to setup probe timer etc. | 
|---|
| 3178 | */ | 
|---|
| 3179 | void tcp_push_one(struct sock *sk, unsigned int mss_now) | 
|---|
| 3180 | { | 
|---|
| 3181 | struct sk_buff *skb = tcp_send_head(sk); | 
|---|
| 3182 |  | 
|---|
| 3183 | BUG_ON(!skb || skb->len < mss_now); | 
|---|
| 3184 |  | 
|---|
| 3185 | tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, push_one: 1, gfp: sk->sk_allocation); | 
|---|
| 3186 | } | 
|---|
| 3187 |  | 
|---|
| 3188 | /* This function returns the amount that we can raise the | 
|---|
| 3189 | * usable window based on the following constraints | 
|---|
| 3190 | * | 
|---|
| 3191 | * 1. The window can never be shrunk once it is offered (RFC 793) | 
|---|
| 3192 | * 2. We limit memory per socket | 
|---|
| 3193 | * | 
|---|
| 3194 | * RFC 1122: | 
|---|
| 3195 | * "the suggested [SWS] avoidance algorithm for the receiver is to keep | 
|---|
| 3196 | *  RECV.NEXT + RCV.WIN fixed until: | 
|---|
| 3197 | *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" | 
|---|
| 3198 | * | 
|---|
| 3199 | * i.e. don't raise the right edge of the window until you can raise | 
|---|
| 3200 | * it at least MSS bytes. | 
|---|
| 3201 | * | 
|---|
| 3202 | * Unfortunately, the recommended algorithm breaks header prediction, | 
|---|
| 3203 | * since header prediction assumes th->window stays fixed. | 
|---|
| 3204 | * | 
|---|
| 3205 | * Strictly speaking, keeping th->window fixed violates the receiver | 
|---|
| 3206 | * side SWS prevention criteria. The problem is that under this rule | 
|---|
| 3207 | * a stream of single byte packets will cause the right side of the | 
|---|
| 3208 | * window to always advance by a single byte. | 
|---|
| 3209 | * | 
|---|
| 3210 | * Of course, if the sender implements sender side SWS prevention | 
|---|
| 3211 | * then this will not be a problem. | 
|---|
| 3212 | * | 
|---|
| 3213 | * BSD seems to make the following compromise: | 
|---|
| 3214 | * | 
|---|
| 3215 | *	If the free space is less than the 1/4 of the maximum | 
|---|
| 3216 | *	space available and the free space is less than 1/2 mss, | 
|---|
| 3217 | *	then set the window to 0. | 
|---|
| 3218 | *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ] | 
|---|
| 3219 | *	Otherwise, just prevent the window from shrinking | 
|---|
| 3220 | *	and from being larger than the largest representable value. | 
|---|
| 3221 | * | 
|---|
| 3222 | * This prevents incremental opening of the window in the regime | 
|---|
| 3223 | * where TCP is limited by the speed of the reader side taking | 
|---|
| 3224 | * data out of the TCP receive queue. It does nothing about | 
|---|
| 3225 | * those cases where the window is constrained on the sender side | 
|---|
| 3226 | * because the pipeline is full. | 
|---|
| 3227 | * | 
|---|
| 3228 | * BSD also seems to "accidentally" limit itself to windows that are a | 
|---|
| 3229 | * multiple of MSS, at least until the free space gets quite small. | 
|---|
| 3230 | * This would appear to be a side effect of the mbuf implementation. | 
|---|
| 3231 | * Combining these two algorithms results in the observed behavior | 
|---|
| 3232 | * of having a fixed window size at almost all times. | 
|---|
| 3233 | * | 
|---|
| 3234 | * Below we obtain similar behavior by forcing the offered window to | 
|---|
| 3235 | * a multiple of the mss when it is feasible to do so. | 
|---|
| 3236 | * | 
|---|
| 3237 | * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. | 
|---|
| 3238 | * Regular options like TIMESTAMP are taken into account. | 
|---|
| 3239 | */ | 
|---|
| 3240 | u32 __tcp_select_window(struct sock *sk) | 
|---|
| 3241 | { | 
|---|
| 3242 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 3243 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3244 | struct net *net = sock_net(sk); | 
|---|
| 3245 | /* MSS for the peer's data.  Previous versions used mss_clamp | 
|---|
| 3246 | * here.  I don't know if the value based on our guesses | 
|---|
| 3247 | * of peer's MSS is better for the performance.  It's more correct | 
|---|
| 3248 | * but may be worse for the performance because of rcv_mss | 
|---|
| 3249 | * fluctuations.  --SAW  1998/11/1 | 
|---|
| 3250 | */ | 
|---|
| 3251 | int mss = icsk->icsk_ack.rcv_mss; | 
|---|
| 3252 | int free_space = tcp_space(sk); | 
|---|
| 3253 | int allowed_space = tcp_full_space(sk); | 
|---|
| 3254 | int full_space, window; | 
|---|
| 3255 |  | 
|---|
| 3256 | if (sk_is_mptcp(sk)) | 
|---|
| 3257 | mptcp_space(ssk: sk, s: &free_space, fs: &allowed_space); | 
|---|
| 3258 |  | 
|---|
| 3259 | full_space = min_t(int, tp->window_clamp, allowed_space); | 
|---|
| 3260 |  | 
|---|
| 3261 | if (unlikely(mss > full_space)) { | 
|---|
| 3262 | mss = full_space; | 
|---|
| 3263 | if (mss <= 0) | 
|---|
| 3264 | return 0; | 
|---|
| 3265 | } | 
|---|
| 3266 |  | 
|---|
| 3267 | /* Only allow window shrink if the sysctl is enabled and we have | 
|---|
| 3268 | * a non-zero scaling factor in effect. | 
|---|
| 3269 | */ | 
|---|
| 3270 | if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) | 
|---|
| 3271 | goto shrink_window_allowed; | 
|---|
| 3272 |  | 
|---|
| 3273 | /* do not allow window to shrink */ | 
|---|
| 3274 |  | 
|---|
| 3275 | if (free_space < (full_space >> 1)) { | 
|---|
| 3276 | icsk->icsk_ack.quick = 0; | 
|---|
| 3277 |  | 
|---|
| 3278 | if (tcp_under_memory_pressure(sk)) | 
|---|
| 3279 | tcp_adjust_rcv_ssthresh(sk); | 
|---|
| 3280 |  | 
|---|
| 3281 | /* free_space might become our new window, make sure we don't | 
|---|
| 3282 | * increase it due to wscale. | 
|---|
| 3283 | */ | 
|---|
| 3284 | free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); | 
|---|
| 3285 |  | 
|---|
| 3286 | /* if free space is less than mss estimate, or is below 1/16th | 
|---|
| 3287 | * of the maximum allowed, try to move to zero-window, else | 
|---|
| 3288 | * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and | 
|---|
| 3289 | * new incoming data is dropped due to memory limits. | 
|---|
| 3290 | * With large window, mss test triggers way too late in order | 
|---|
| 3291 | * to announce zero window in time before rmem limit kicks in. | 
|---|
| 3292 | */ | 
|---|
| 3293 | if (free_space < (allowed_space >> 4) || free_space < mss) | 
|---|
| 3294 | return 0; | 
|---|
| 3295 | } | 
|---|
| 3296 |  | 
|---|
| 3297 | if (free_space > tp->rcv_ssthresh) | 
|---|
| 3298 | free_space = tp->rcv_ssthresh; | 
|---|
| 3299 |  | 
|---|
| 3300 | /* Don't do rounding if we are using window scaling, since the | 
|---|
| 3301 | * scaled window will not line up with the MSS boundary anyway. | 
|---|
| 3302 | */ | 
|---|
| 3303 | if (tp->rx_opt.rcv_wscale) { | 
|---|
| 3304 | window = free_space; | 
|---|
| 3305 |  | 
|---|
| 3306 | /* Advertise enough space so that it won't get scaled away. | 
|---|
| 3307 | * Import case: prevent zero window announcement if | 
|---|
| 3308 | * 1<<rcv_wscale > mss. | 
|---|
| 3309 | */ | 
|---|
| 3310 | window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); | 
|---|
| 3311 | } else { | 
|---|
| 3312 | window = tp->rcv_wnd; | 
|---|
| 3313 | /* Get the largest window that is a nice multiple of mss. | 
|---|
| 3314 | * Window clamp already applied above. | 
|---|
| 3315 | * If our current window offering is within 1 mss of the | 
|---|
| 3316 | * free space we just keep it. This prevents the divide | 
|---|
| 3317 | * and multiply from happening most of the time. | 
|---|
| 3318 | * We also don't do any window rounding when the free space | 
|---|
| 3319 | * is too small. | 
|---|
| 3320 | */ | 
|---|
| 3321 | if (window <= free_space - mss || window > free_space) | 
|---|
| 3322 | window = rounddown(free_space, mss); | 
|---|
| 3323 | else if (mss == full_space && | 
|---|
| 3324 | free_space > window + (full_space >> 1)) | 
|---|
| 3325 | window = free_space; | 
|---|
| 3326 | } | 
|---|
| 3327 |  | 
|---|
| 3328 | return window; | 
|---|
| 3329 |  | 
|---|
| 3330 | shrink_window_allowed: | 
|---|
| 3331 | /* new window should always be an exact multiple of scaling factor */ | 
|---|
| 3332 | free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); | 
|---|
| 3333 |  | 
|---|
| 3334 | if (free_space < (full_space >> 1)) { | 
|---|
| 3335 | icsk->icsk_ack.quick = 0; | 
|---|
| 3336 |  | 
|---|
| 3337 | if (tcp_under_memory_pressure(sk)) | 
|---|
| 3338 | tcp_adjust_rcv_ssthresh(sk); | 
|---|
| 3339 |  | 
|---|
| 3340 | /* if free space is too low, return a zero window */ | 
|---|
| 3341 | if (free_space < (allowed_space >> 4) || free_space < mss || | 
|---|
| 3342 | free_space < (1 << tp->rx_opt.rcv_wscale)) | 
|---|
| 3343 | return 0; | 
|---|
| 3344 | } | 
|---|
| 3345 |  | 
|---|
| 3346 | if (free_space > tp->rcv_ssthresh) { | 
|---|
| 3347 | free_space = tp->rcv_ssthresh; | 
|---|
| 3348 | /* new window should always be an exact multiple of scaling factor | 
|---|
| 3349 | * | 
|---|
| 3350 | * For this case, we ALIGN "up" (increase free_space) because | 
|---|
| 3351 | * we know free_space is not zero here, it has been reduced from | 
|---|
| 3352 | * the memory-based limit, and rcv_ssthresh is not a hard limit | 
|---|
| 3353 | * (unlike sk_rcvbuf). | 
|---|
| 3354 | */ | 
|---|
| 3355 | free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); | 
|---|
| 3356 | } | 
|---|
| 3357 |  | 
|---|
| 3358 | return free_space; | 
|---|
| 3359 | } | 
|---|
| 3360 |  | 
|---|
| 3361 | void tcp_skb_collapse_tstamp(struct sk_buff *skb, | 
|---|
| 3362 | const struct sk_buff *next_skb) | 
|---|
| 3363 | { | 
|---|
| 3364 | if (unlikely(tcp_has_tx_tstamp(next_skb))) { | 
|---|
| 3365 | const struct skb_shared_info *next_shinfo = | 
|---|
| 3366 | skb_shinfo(next_skb); | 
|---|
| 3367 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 
|---|
| 3368 |  | 
|---|
| 3369 | shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; | 
|---|
| 3370 | shinfo->tskey = next_shinfo->tskey; | 
|---|
| 3371 | TCP_SKB_CB(skb)->txstamp_ack |= | 
|---|
| 3372 | TCP_SKB_CB(next_skb)->txstamp_ack; | 
|---|
| 3373 | } | 
|---|
| 3374 | } | 
|---|
| 3375 |  | 
|---|
| 3376 | /* Collapses two adjacent SKB's during retransmission. */ | 
|---|
| 3377 | static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) | 
|---|
| 3378 | { | 
|---|
| 3379 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3380 | struct sk_buff *next_skb = skb_rb_next(skb); | 
|---|
| 3381 | int next_skb_size; | 
|---|
| 3382 |  | 
|---|
| 3383 | next_skb_size = next_skb->len; | 
|---|
| 3384 |  | 
|---|
| 3385 | BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); | 
|---|
| 3386 |  | 
|---|
| 3387 | if (next_skb_size && !tcp_skb_shift(to: skb, from: next_skb, pcount: 1, shiftlen: next_skb_size)) | 
|---|
| 3388 | return false; | 
|---|
| 3389 |  | 
|---|
| 3390 | tcp_highest_sack_replace(sk, old: next_skb, new: skb); | 
|---|
| 3391 |  | 
|---|
| 3392 | /* Update sequence range on original skb. */ | 
|---|
| 3393 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; | 
|---|
| 3394 |  | 
|---|
| 3395 | /* Merge over control information. This moves PSH/FIN etc. over */ | 
|---|
| 3396 | TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; | 
|---|
| 3397 |  | 
|---|
| 3398 | /* All done, get rid of second SKB and account for it so | 
|---|
| 3399 | * packet counting does not break. | 
|---|
| 3400 | */ | 
|---|
| 3401 | TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; | 
|---|
| 3402 | TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; | 
|---|
| 3403 |  | 
|---|
| 3404 | /* changed transmit queue under us so clear hints */ | 
|---|
| 3405 | if (next_skb == tp->retransmit_skb_hint) | 
|---|
| 3406 | tp->retransmit_skb_hint = skb; | 
|---|
| 3407 |  | 
|---|
| 3408 | tcp_adjust_pcount(sk, skb: next_skb, decr: tcp_skb_pcount(skb: next_skb)); | 
|---|
| 3409 |  | 
|---|
| 3410 | tcp_skb_collapse_tstamp(skb, next_skb); | 
|---|
| 3411 |  | 
|---|
| 3412 | tcp_rtx_queue_unlink_and_free(skb: next_skb, sk); | 
|---|
| 3413 | return true; | 
|---|
| 3414 | } | 
|---|
| 3415 |  | 
|---|
| 3416 | /* Check if coalescing SKBs is legal. */ | 
|---|
| 3417 | static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) | 
|---|
| 3418 | { | 
|---|
| 3419 | if (tcp_skb_pcount(skb) > 1) | 
|---|
| 3420 | return false; | 
|---|
| 3421 | if (skb_cloned(skb)) | 
|---|
| 3422 | return false; | 
|---|
| 3423 | if (!skb_frags_readable(skb)) | 
|---|
| 3424 | return false; | 
|---|
| 3425 | /* Some heuristics for collapsing over SACK'd could be invented */ | 
|---|
| 3426 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) | 
|---|
| 3427 | return false; | 
|---|
| 3428 |  | 
|---|
| 3429 | return true; | 
|---|
| 3430 | } | 
|---|
| 3431 |  | 
|---|
| 3432 | /* Collapse packets in the retransmit queue to make to create | 
|---|
| 3433 | * less packets on the wire. This is only done on retransmission. | 
|---|
| 3434 | */ | 
|---|
| 3435 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, | 
|---|
| 3436 | int space) | 
|---|
| 3437 | { | 
|---|
| 3438 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3439 | struct sk_buff *skb = to, *tmp; | 
|---|
| 3440 | bool first = true; | 
|---|
| 3441 |  | 
|---|
| 3442 | if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) | 
|---|
| 3443 | return; | 
|---|
| 3444 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) | 
|---|
| 3445 | return; | 
|---|
| 3446 |  | 
|---|
| 3447 | skb_rbtree_walk_from_safe(skb, tmp) { | 
|---|
| 3448 | if (!tcp_can_collapse(sk, skb)) | 
|---|
| 3449 | break; | 
|---|
| 3450 |  | 
|---|
| 3451 | if (!tcp_skb_can_collapse(to, from: skb)) | 
|---|
| 3452 | break; | 
|---|
| 3453 |  | 
|---|
| 3454 | space -= skb->len; | 
|---|
| 3455 |  | 
|---|
| 3456 | if (first) { | 
|---|
| 3457 | first = false; | 
|---|
| 3458 | continue; | 
|---|
| 3459 | } | 
|---|
| 3460 |  | 
|---|
| 3461 | if (space < 0) | 
|---|
| 3462 | break; | 
|---|
| 3463 |  | 
|---|
| 3464 | if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) | 
|---|
| 3465 | break; | 
|---|
| 3466 |  | 
|---|
| 3467 | if (!tcp_collapse_retrans(sk, skb: to)) | 
|---|
| 3468 | break; | 
|---|
| 3469 | } | 
|---|
| 3470 | } | 
|---|
| 3471 |  | 
|---|
| 3472 | /* This retransmits one SKB.  Policy decisions and retransmit queue | 
|---|
| 3473 | * state updates are done by the caller.  Returns non-zero if an | 
|---|
| 3474 | * error occurred which prevented the send. | 
|---|
| 3475 | */ | 
|---|
| 3476 | int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | 
|---|
| 3477 | { | 
|---|
| 3478 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 3479 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3480 | unsigned int cur_mss; | 
|---|
| 3481 | int diff, len, err; | 
|---|
| 3482 | int avail_wnd; | 
|---|
| 3483 |  | 
|---|
| 3484 | /* Inconclusive MTU probe */ | 
|---|
| 3485 | if (icsk->icsk_mtup.probe_size) | 
|---|
| 3486 | icsk->icsk_mtup.probe_size = 0; | 
|---|
| 3487 |  | 
|---|
| 3488 | if (skb_still_in_host_queue(sk, skb)) { | 
|---|
| 3489 | err = -EBUSY; | 
|---|
| 3490 | goto out; | 
|---|
| 3491 | } | 
|---|
| 3492 |  | 
|---|
| 3493 | start: | 
|---|
| 3494 | if (before(TCP_SKB_CB(skb)->seq, seq2: tp->snd_una)) { | 
|---|
| 3495 | if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { | 
|---|
| 3496 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; | 
|---|
| 3497 | TCP_SKB_CB(skb)->seq++; | 
|---|
| 3498 | goto start; | 
|---|
| 3499 | } | 
|---|
| 3500 | if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { | 
|---|
| 3501 | WARN_ON_ONCE(1); | 
|---|
| 3502 | err = -EINVAL; | 
|---|
| 3503 | goto out; | 
|---|
| 3504 | } | 
|---|
| 3505 | if (tcp_trim_head(sk, skb, len: tp->snd_una - TCP_SKB_CB(skb)->seq)) { | 
|---|
| 3506 | err = -ENOMEM; | 
|---|
| 3507 | goto out; | 
|---|
| 3508 | } | 
|---|
| 3509 | } | 
|---|
| 3510 |  | 
|---|
| 3511 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { | 
|---|
| 3512 | err = -EHOSTUNREACH; /* Routing failure or similar. */ | 
|---|
| 3513 | goto out; | 
|---|
| 3514 | } | 
|---|
| 3515 |  | 
|---|
| 3516 | cur_mss = tcp_current_mss(sk); | 
|---|
| 3517 | avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 
|---|
| 3518 |  | 
|---|
| 3519 | /* If receiver has shrunk his window, and skb is out of | 
|---|
| 3520 | * new window, do not retransmit it. The exception is the | 
|---|
| 3521 | * case, when window is shrunk to zero. In this case | 
|---|
| 3522 | * our retransmit of one segment serves as a zero window probe. | 
|---|
| 3523 | */ | 
|---|
| 3524 | if (avail_wnd <= 0) { | 
|---|
| 3525 | if (TCP_SKB_CB(skb)->seq != tp->snd_una) { | 
|---|
| 3526 | err = -EAGAIN; | 
|---|
| 3527 | goto out; | 
|---|
| 3528 | } | 
|---|
| 3529 | avail_wnd = cur_mss; | 
|---|
| 3530 | } | 
|---|
| 3531 |  | 
|---|
| 3532 | len = cur_mss * segs; | 
|---|
| 3533 | if (len > avail_wnd) { | 
|---|
| 3534 | len = rounddown(avail_wnd, cur_mss); | 
|---|
| 3535 | if (!len) | 
|---|
| 3536 | len = avail_wnd; | 
|---|
| 3537 | } | 
|---|
| 3538 | if (skb->len > len) { | 
|---|
| 3539 | if (tcp_fragment(sk, tcp_queue: TCP_FRAG_IN_RTX_QUEUE, skb, len, | 
|---|
| 3540 | mss_now: cur_mss, GFP_ATOMIC)) { | 
|---|
| 3541 | err = -ENOMEM;  /* We'll try again later. */ | 
|---|
| 3542 | goto out; | 
|---|
| 3543 | } | 
|---|
| 3544 | } else { | 
|---|
| 3545 | if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) { | 
|---|
| 3546 | err = -ENOMEM; | 
|---|
| 3547 | goto out; | 
|---|
| 3548 | } | 
|---|
| 3549 |  | 
|---|
| 3550 | diff = tcp_skb_pcount(skb); | 
|---|
| 3551 | tcp_set_skb_tso_segs(skb, mss_now: cur_mss); | 
|---|
| 3552 | diff -= tcp_skb_pcount(skb); | 
|---|
| 3553 | if (diff) | 
|---|
| 3554 | tcp_adjust_pcount(sk, skb, decr: diff); | 
|---|
| 3555 | avail_wnd = min_t(int, avail_wnd, cur_mss); | 
|---|
| 3556 | if (skb->len < avail_wnd) | 
|---|
| 3557 | tcp_retrans_try_collapse(sk, to: skb, space: avail_wnd); | 
|---|
| 3558 | } | 
|---|
| 3559 |  | 
|---|
| 3560 | /* RFC3168, section 6.1.1.1. ECN fallback | 
|---|
| 3561 | * As AccECN uses the same SYN flags (+ AE), this check covers both | 
|---|
| 3562 | * cases. | 
|---|
| 3563 | */ | 
|---|
| 3564 | if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) | 
|---|
| 3565 | tcp_ecn_clear_syn(sk, skb); | 
|---|
| 3566 |  | 
|---|
| 3567 | /* Update global and local TCP statistics. */ | 
|---|
| 3568 | segs = tcp_skb_pcount(skb); | 
|---|
| 3569 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); | 
|---|
| 3570 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) | 
|---|
| 3571 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); | 
|---|
| 3572 | tp->total_retrans += segs; | 
|---|
| 3573 | tp->bytes_retrans += skb->len; | 
|---|
| 3574 |  | 
|---|
| 3575 | /* make sure skb->data is aligned on arches that require it | 
|---|
| 3576 | * and check if ack-trimming & collapsing extended the headroom | 
|---|
| 3577 | * beyond what csum_start can cover. | 
|---|
| 3578 | */ | 
|---|
| 3579 | if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || | 
|---|
| 3580 | skb_headroom(skb) >= 0xFFFF)) { | 
|---|
| 3581 | struct sk_buff *nskb; | 
|---|
| 3582 |  | 
|---|
| 3583 | tcp_skb_tsorted_save(skb) { | 
|---|
| 3584 | nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); | 
|---|
| 3585 | if (nskb) { | 
|---|
| 3586 | nskb->dev = NULL; | 
|---|
| 3587 | err = tcp_transmit_skb(sk, skb: nskb, clone_it: 0, GFP_ATOMIC); | 
|---|
| 3588 | } else { | 
|---|
| 3589 | err = -ENOBUFS; | 
|---|
| 3590 | } | 
|---|
| 3591 | } tcp_skb_tsorted_restore(skb); | 
|---|
| 3592 |  | 
|---|
| 3593 | if (!err) { | 
|---|
| 3594 | tcp_update_skb_after_send(sk, skb, prior_wstamp: tp->tcp_wstamp_ns); | 
|---|
| 3595 | tcp_rate_skb_sent(sk, skb); | 
|---|
| 3596 | } | 
|---|
| 3597 | } else { | 
|---|
| 3598 | err = tcp_transmit_skb(sk, skb, clone_it: 1, GFP_ATOMIC); | 
|---|
| 3599 | } | 
|---|
| 3600 |  | 
|---|
| 3601 | if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) | 
|---|
| 3602 | tcp_call_bpf_3arg(sk, op: BPF_SOCK_OPS_RETRANS_CB, | 
|---|
| 3603 | TCP_SKB_CB(skb)->seq, arg2: segs, arg3: err); | 
|---|
| 3604 |  | 
|---|
| 3605 | if (unlikely(err) && err != -EBUSY) | 
|---|
| 3606 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); | 
|---|
| 3607 |  | 
|---|
| 3608 | /* To avoid taking spuriously low RTT samples based on a timestamp | 
|---|
| 3609 | * for a transmit that never happened, always mark EVER_RETRANS | 
|---|
| 3610 | */ | 
|---|
| 3611 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; | 
|---|
| 3612 |  | 
|---|
| 3613 | out: | 
|---|
| 3614 | trace_tcp_retransmit_skb(sk, skb, err); | 
|---|
| 3615 | return err; | 
|---|
| 3616 | } | 
|---|
| 3617 |  | 
|---|
| 3618 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | 
|---|
| 3619 | { | 
|---|
| 3620 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3621 | int err = __tcp_retransmit_skb(sk, skb, segs); | 
|---|
| 3622 |  | 
|---|
| 3623 | if (err == 0) { | 
|---|
| 3624 | #if FASTRETRANS_DEBUG > 0 | 
|---|
| 3625 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { | 
|---|
| 3626 | net_dbg_ratelimited( "retrans_out leaked\n"); | 
|---|
| 3627 | } | 
|---|
| 3628 | #endif | 
|---|
| 3629 | TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; | 
|---|
| 3630 | tp->retrans_out += tcp_skb_pcount(skb); | 
|---|
| 3631 | } | 
|---|
| 3632 |  | 
|---|
| 3633 | /* Save stamp of the first (attempted) retransmit. */ | 
|---|
| 3634 | if (!tp->retrans_stamp) | 
|---|
| 3635 | tp->retrans_stamp = tcp_skb_timestamp_ts(usec_ts: tp->tcp_usec_ts, skb); | 
|---|
| 3636 |  | 
|---|
| 3637 | if (tp->undo_retrans < 0) | 
|---|
| 3638 | tp->undo_retrans = 0; | 
|---|
| 3639 | tp->undo_retrans += tcp_skb_pcount(skb); | 
|---|
| 3640 | return err; | 
|---|
| 3641 | } | 
|---|
| 3642 |  | 
|---|
| 3643 | /* This gets called after a retransmit timeout, and the initially | 
|---|
| 3644 | * retransmitted data is acknowledged.  It tries to continue | 
|---|
| 3645 | * resending the rest of the retransmit queue, until either | 
|---|
| 3646 | * we've sent it all or the congestion window limit is reached. | 
|---|
| 3647 | */ | 
|---|
| 3648 | void tcp_xmit_retransmit_queue(struct sock *sk) | 
|---|
| 3649 | { | 
|---|
| 3650 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 3651 | struct sk_buff *skb, *rtx_head, *hole = NULL; | 
|---|
| 3652 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3653 | bool rearm_timer = false; | 
|---|
| 3654 | u32 max_segs; | 
|---|
| 3655 | int mib_idx; | 
|---|
| 3656 |  | 
|---|
| 3657 | if (!tp->packets_out) | 
|---|
| 3658 | return; | 
|---|
| 3659 |  | 
|---|
| 3660 | rtx_head = tcp_rtx_queue_head(sk); | 
|---|
| 3661 | skb = tp->retransmit_skb_hint ?: rtx_head; | 
|---|
| 3662 | max_segs = tcp_tso_segs(sk, mss_now: tcp_current_mss(sk)); | 
|---|
| 3663 | skb_rbtree_walk_from(skb) { | 
|---|
| 3664 | __u8 sacked; | 
|---|
| 3665 | int segs; | 
|---|
| 3666 |  | 
|---|
| 3667 | if (tcp_pacing_check(sk)) | 
|---|
| 3668 | break; | 
|---|
| 3669 |  | 
|---|
| 3670 | /* we could do better than to assign each time */ | 
|---|
| 3671 | if (!hole) | 
|---|
| 3672 | tp->retransmit_skb_hint = skb; | 
|---|
| 3673 |  | 
|---|
| 3674 | segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); | 
|---|
| 3675 | if (segs <= 0) | 
|---|
| 3676 | break; | 
|---|
| 3677 | sacked = TCP_SKB_CB(skb)->sacked; | 
|---|
| 3678 | /* In case tcp_shift_skb_data() have aggregated large skbs, | 
|---|
| 3679 | * we need to make sure not sending too bigs TSO packets | 
|---|
| 3680 | */ | 
|---|
| 3681 | segs = min_t(int, segs, max_segs); | 
|---|
| 3682 |  | 
|---|
| 3683 | if (tp->retrans_out >= tp->lost_out) { | 
|---|
| 3684 | break; | 
|---|
| 3685 | } else if (!(sacked & TCPCB_LOST)) { | 
|---|
| 3686 | if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) | 
|---|
| 3687 | hole = skb; | 
|---|
| 3688 | continue; | 
|---|
| 3689 |  | 
|---|
| 3690 | } else { | 
|---|
| 3691 | if (icsk->icsk_ca_state != TCP_CA_Loss) | 
|---|
| 3692 | mib_idx = LINUX_MIB_TCPFASTRETRANS; | 
|---|
| 3693 | else | 
|---|
| 3694 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; | 
|---|
| 3695 | } | 
|---|
| 3696 |  | 
|---|
| 3697 | if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) | 
|---|
| 3698 | continue; | 
|---|
| 3699 |  | 
|---|
| 3700 | if (tcp_small_queue_check(sk, skb, factor: 1)) | 
|---|
| 3701 | break; | 
|---|
| 3702 |  | 
|---|
| 3703 | if (tcp_retransmit_skb(sk, skb, segs)) | 
|---|
| 3704 | break; | 
|---|
| 3705 |  | 
|---|
| 3706 | NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); | 
|---|
| 3707 |  | 
|---|
| 3708 | if (tcp_in_cwnd_reduction(sk)) | 
|---|
| 3709 | tp->prr_out += tcp_skb_pcount(skb); | 
|---|
| 3710 |  | 
|---|
| 3711 | if (skb == rtx_head && | 
|---|
| 3712 | icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) | 
|---|
| 3713 | rearm_timer = true; | 
|---|
| 3714 |  | 
|---|
| 3715 | } | 
|---|
| 3716 | if (rearm_timer) | 
|---|
| 3717 | tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 
|---|
| 3718 | inet_csk(sk)->icsk_rto, pace_delay: true); | 
|---|
| 3719 | } | 
|---|
| 3720 |  | 
|---|
| 3721 | /* We allow to exceed memory limits for FIN packets to expedite | 
|---|
| 3722 | * connection tear down and (memory) recovery. | 
|---|
| 3723 | * Otherwise tcp_send_fin() could be tempted to either delay FIN | 
|---|
| 3724 | * or even be forced to close flow without any FIN. | 
|---|
| 3725 | * In general, we want to allow one skb per socket to avoid hangs | 
|---|
| 3726 | * with edge trigger epoll() | 
|---|
| 3727 | */ | 
|---|
| 3728 | void sk_forced_mem_schedule(struct sock *sk, int size) | 
|---|
| 3729 | { | 
|---|
| 3730 | int delta, amt; | 
|---|
| 3731 |  | 
|---|
| 3732 | delta = size - sk->sk_forward_alloc; | 
|---|
| 3733 | if (delta <= 0) | 
|---|
| 3734 | return; | 
|---|
| 3735 | amt = sk_mem_pages(amt: delta); | 
|---|
| 3736 | sk_forward_alloc_add(sk, val: amt << PAGE_SHIFT); | 
|---|
| 3737 | sk_memory_allocated_add(sk, val: amt); | 
|---|
| 3738 |  | 
|---|
| 3739 | if (mem_cgroup_sk_enabled(sk)) | 
|---|
| 3740 | mem_cgroup_sk_charge(sk, nr_pages: amt, gfp_mask: gfp_memcg_charge() | __GFP_NOFAIL); | 
|---|
| 3741 | } | 
|---|
| 3742 |  | 
|---|
| 3743 | /* Send a FIN. The caller locks the socket for us. | 
|---|
| 3744 | * We should try to send a FIN packet really hard, but eventually give up. | 
|---|
| 3745 | */ | 
|---|
| 3746 | void tcp_send_fin(struct sock *sk) | 
|---|
| 3747 | { | 
|---|
| 3748 | struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk); | 
|---|
| 3749 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3750 |  | 
|---|
| 3751 | /* Optimization, tack on the FIN if we have one skb in write queue and | 
|---|
| 3752 | * this skb was not yet sent, or we are under memory pressure. | 
|---|
| 3753 | * Note: in the latter case, FIN packet will be sent after a timeout, | 
|---|
| 3754 | * as TCP stack thinks it has already been transmitted. | 
|---|
| 3755 | */ | 
|---|
| 3756 | tskb = tail; | 
|---|
| 3757 | if (!tskb && tcp_under_memory_pressure(sk)) | 
|---|
| 3758 | tskb = skb_rb_last(&sk->tcp_rtx_queue); | 
|---|
| 3759 |  | 
|---|
| 3760 | if (tskb) { | 
|---|
| 3761 | TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; | 
|---|
| 3762 | TCP_SKB_CB(tskb)->end_seq++; | 
|---|
| 3763 | tp->write_seq++; | 
|---|
| 3764 | if (!tail) { | 
|---|
| 3765 | /* This means tskb was already sent. | 
|---|
| 3766 | * Pretend we included the FIN on previous transmit. | 
|---|
| 3767 | * We need to set tp->snd_nxt to the value it would have | 
|---|
| 3768 | * if FIN had been sent. This is because retransmit path | 
|---|
| 3769 | * does not change tp->snd_nxt. | 
|---|
| 3770 | */ | 
|---|
| 3771 | WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); | 
|---|
| 3772 | return; | 
|---|
| 3773 | } | 
|---|
| 3774 | } else { | 
|---|
| 3775 | skb = alloc_skb_fclone(MAX_TCP_HEADER, | 
|---|
| 3776 | priority: sk_gfp_mask(sk, GFP_ATOMIC | | 
|---|
| 3777 | __GFP_NOWARN)); | 
|---|
| 3778 | if (unlikely(!skb)) | 
|---|
| 3779 | return; | 
|---|
| 3780 |  | 
|---|
| 3781 | INIT_LIST_HEAD(list: &skb->tcp_tsorted_anchor); | 
|---|
| 3782 | skb_reserve(skb, MAX_TCP_HEADER); | 
|---|
| 3783 | sk_forced_mem_schedule(sk, size: skb->truesize); | 
|---|
| 3784 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ | 
|---|
| 3785 | tcp_init_nondata_skb(skb, sk, seq: tp->write_seq, | 
|---|
| 3786 | TCPHDR_ACK | TCPHDR_FIN); | 
|---|
| 3787 | tcp_queue_skb(sk, skb); | 
|---|
| 3788 | } | 
|---|
| 3789 | __tcp_push_pending_frames(sk, cur_mss: tcp_current_mss(sk), TCP_NAGLE_OFF); | 
|---|
| 3790 | } | 
|---|
| 3791 |  | 
|---|
| 3792 | /* We get here when a process closes a file descriptor (either due to | 
|---|
| 3793 | * an explicit close() or as a byproduct of exit()'ing) and there | 
|---|
| 3794 | * was unread data in the receive queue.  This behavior is recommended | 
|---|
| 3795 | * by RFC 2525, section 2.17.  -DaveM | 
|---|
| 3796 | */ | 
|---|
| 3797 | void tcp_send_active_reset(struct sock *sk, gfp_t priority, | 
|---|
| 3798 | enum sk_rst_reason reason) | 
|---|
| 3799 | { | 
|---|
| 3800 | struct sk_buff *skb; | 
|---|
| 3801 |  | 
|---|
| 3802 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); | 
|---|
| 3803 |  | 
|---|
| 3804 | /* NOTE: No TCP options attached and we never retransmit this. */ | 
|---|
| 3805 | skb = alloc_skb(MAX_TCP_HEADER, priority); | 
|---|
| 3806 | if (!skb) { | 
|---|
| 3807 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); | 
|---|
| 3808 | return; | 
|---|
| 3809 | } | 
|---|
| 3810 |  | 
|---|
| 3811 | /* Reserve space for headers and prepare control bits. */ | 
|---|
| 3812 | skb_reserve(skb, MAX_TCP_HEADER); | 
|---|
| 3813 | tcp_init_nondata_skb(skb, sk, seq: tcp_acceptable_seq(sk), | 
|---|
| 3814 | TCPHDR_ACK | TCPHDR_RST); | 
|---|
| 3815 | tcp_mstamp_refresh(tcp_sk(sk)); | 
|---|
| 3816 | /* Send it off. */ | 
|---|
| 3817 | if (tcp_transmit_skb(sk, skb, clone_it: 0, gfp_mask: priority)) | 
|---|
| 3818 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); | 
|---|
| 3819 |  | 
|---|
| 3820 | /* skb of trace_tcp_send_reset() keeps the skb that caused RST, | 
|---|
| 3821 | * skb here is different to the troublesome skb, so use NULL | 
|---|
| 3822 | */ | 
|---|
| 3823 | trace_tcp_send_reset(sk, NULL, reason); | 
|---|
| 3824 | } | 
|---|
| 3825 |  | 
|---|
| 3826 | /* Send a crossed SYN-ACK during socket establishment. | 
|---|
| 3827 | * WARNING: This routine must only be called when we have already sent | 
|---|
| 3828 | * a SYN packet that crossed the incoming SYN that caused this routine | 
|---|
| 3829 | * to get called. If this assumption fails then the initial rcv_wnd | 
|---|
| 3830 | * and rcv_wscale values will not be correct. | 
|---|
| 3831 | */ | 
|---|
| 3832 | int tcp_send_synack(struct sock *sk) | 
|---|
| 3833 | { | 
|---|
| 3834 | struct sk_buff *skb; | 
|---|
| 3835 |  | 
|---|
| 3836 | skb = tcp_rtx_queue_head(sk); | 
|---|
| 3837 | if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { | 
|---|
| 3838 | pr_err( "%s: wrong queue state\n", __func__); | 
|---|
| 3839 | return -EFAULT; | 
|---|
| 3840 | } | 
|---|
| 3841 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { | 
|---|
| 3842 | if (skb_cloned(skb)) { | 
|---|
| 3843 | struct sk_buff *nskb; | 
|---|
| 3844 |  | 
|---|
| 3845 | tcp_skb_tsorted_save(skb) { | 
|---|
| 3846 | nskb = skb_copy(skb, GFP_ATOMIC); | 
|---|
| 3847 | } tcp_skb_tsorted_restore(skb); | 
|---|
| 3848 | if (!nskb) | 
|---|
| 3849 | return -ENOMEM; | 
|---|
| 3850 | INIT_LIST_HEAD(list: &nskb->tcp_tsorted_anchor); | 
|---|
| 3851 | tcp_highest_sack_replace(sk, old: skb, new: nskb); | 
|---|
| 3852 | tcp_rtx_queue_unlink_and_free(skb, sk); | 
|---|
| 3853 | __skb_header_release(skb: nskb); | 
|---|
| 3854 | tcp_rbtree_insert(root: &sk->tcp_rtx_queue, skb: nskb); | 
|---|
| 3855 | sk_wmem_queued_add(sk, val: nskb->truesize); | 
|---|
| 3856 | sk_mem_charge(sk, size: nskb->truesize); | 
|---|
| 3857 | skb = nskb; | 
|---|
| 3858 | } | 
|---|
| 3859 |  | 
|---|
| 3860 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; | 
|---|
| 3861 | tcp_ecn_send_synack(sk, skb); | 
|---|
| 3862 | } | 
|---|
| 3863 | return tcp_transmit_skb(sk, skb, clone_it: 1, GFP_ATOMIC); | 
|---|
| 3864 | } | 
|---|
| 3865 |  | 
|---|
| 3866 | /** | 
|---|
| 3867 | * tcp_make_synack - Allocate one skb and build a SYNACK packet. | 
|---|
| 3868 | * @sk: listener socket | 
|---|
| 3869 | * @dst: dst entry attached to the SYNACK. It is consumed and caller | 
|---|
| 3870 | *       should not use it again. | 
|---|
| 3871 | * @req: request_sock pointer | 
|---|
| 3872 | * @foc: cookie for tcp fast open | 
|---|
| 3873 | * @synack_type: Type of synack to prepare | 
|---|
| 3874 | * @syn_skb: SYN packet just received.  It could be NULL for rtx case. | 
|---|
| 3875 | */ | 
|---|
| 3876 | struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, | 
|---|
| 3877 | struct request_sock *req, | 
|---|
| 3878 | struct tcp_fastopen_cookie *foc, | 
|---|
| 3879 | enum tcp_synack_type synack_type, | 
|---|
| 3880 | struct sk_buff *syn_skb) | 
|---|
| 3881 | { | 
|---|
| 3882 | struct inet_request_sock *ireq = inet_rsk(sk: req); | 
|---|
| 3883 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 3884 | struct tcp_out_options opts; | 
|---|
| 3885 | struct tcp_key key = {}; | 
|---|
| 3886 | struct sk_buff *skb; | 
|---|
| 3887 | int ; | 
|---|
| 3888 | struct tcphdr *th; | 
|---|
| 3889 | int mss; | 
|---|
| 3890 | u64 now; | 
|---|
| 3891 |  | 
|---|
| 3892 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 
|---|
| 3893 | if (unlikely(!skb)) { | 
|---|
| 3894 | dst_release(dst); | 
|---|
| 3895 | return NULL; | 
|---|
| 3896 | } | 
|---|
| 3897 | /* Reserve space for headers. */ | 
|---|
| 3898 | skb_reserve(skb, MAX_TCP_HEADER); | 
|---|
| 3899 |  | 
|---|
| 3900 | switch (synack_type) { | 
|---|
| 3901 | case TCP_SYNACK_NORMAL: | 
|---|
| 3902 | skb_set_owner_edemux(skb, sk: req_to_sk(req)); | 
|---|
| 3903 | break; | 
|---|
| 3904 | case TCP_SYNACK_COOKIE: | 
|---|
| 3905 | /* Under synflood, we do not attach skb to a socket, | 
|---|
| 3906 | * to avoid false sharing. | 
|---|
| 3907 | */ | 
|---|
| 3908 | break; | 
|---|
| 3909 | case TCP_SYNACK_FASTOPEN: | 
|---|
| 3910 | /* sk is a const pointer, because we want to express multiple | 
|---|
| 3911 | * cpu might call us concurrently. | 
|---|
| 3912 | * sk->sk_wmem_alloc in an atomic, we can promote to rw. | 
|---|
| 3913 | */ | 
|---|
| 3914 | skb_set_owner_w(skb, sk: (struct sock *)sk); | 
|---|
| 3915 | break; | 
|---|
| 3916 | } | 
|---|
| 3917 | skb_dst_set(skb, dst); | 
|---|
| 3918 |  | 
|---|
| 3919 | mss = tcp_mss_clamp(tp, mss: dst_metric_advmss(dst)); | 
|---|
| 3920 |  | 
|---|
| 3921 | memset(s: &opts, c: 0, n: sizeof(opts)); | 
|---|
| 3922 | now = tcp_clock_ns(); | 
|---|
| 3923 | #ifdef CONFIG_SYN_COOKIES | 
|---|
| 3924 | if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) | 
|---|
| 3925 | skb_set_delivery_time(skb, kt: cookie_init_timestamp(req, now), | 
|---|
| 3926 | tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 3927 | else | 
|---|
| 3928 | #endif | 
|---|
| 3929 | { | 
|---|
| 3930 | skb_set_delivery_time(skb, kt: now, tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 3931 | if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ | 
|---|
| 3932 | tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); | 
|---|
| 3933 | } | 
|---|
| 3934 |  | 
|---|
| 3935 | #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) | 
|---|
| 3936 | rcu_read_lock(); | 
|---|
| 3937 | #endif | 
|---|
| 3938 | if (tcp_rsk_used_ao(req)) { | 
|---|
| 3939 | #ifdef CONFIG_TCP_AO | 
|---|
| 3940 | struct tcp_ao_key *ao_key = NULL; | 
|---|
| 3941 | u8 keyid = tcp_rsk(req)->ao_keyid; | 
|---|
| 3942 | u8 rnext = tcp_rsk(req)->ao_rcv_next; | 
|---|
| 3943 |  | 
|---|
| 3944 | ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req), | 
|---|
| 3945 | keyid, -1); | 
|---|
| 3946 | /* If there is no matching key - avoid sending anything, | 
|---|
| 3947 | * especially usigned segments. It could try harder and lookup | 
|---|
| 3948 | * for another peer-matching key, but the peer has requested | 
|---|
| 3949 | * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here. | 
|---|
| 3950 | */ | 
|---|
| 3951 | if (unlikely(!ao_key)) { | 
|---|
| 3952 | trace_tcp_ao_synack_no_key(sk, keyid, rnext); | 
|---|
| 3953 | rcu_read_unlock(); | 
|---|
| 3954 | kfree_skb(skb); | 
|---|
| 3955 | net_warn_ratelimited( "TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n", | 
|---|
| 3956 | keyid); | 
|---|
| 3957 | return NULL; | 
|---|
| 3958 | } | 
|---|
| 3959 | key.ao_key = ao_key; | 
|---|
| 3960 | key.type = TCP_KEY_AO; | 
|---|
| 3961 | #endif | 
|---|
| 3962 | } else { | 
|---|
| 3963 | #ifdef CONFIG_TCP_MD5SIG | 
|---|
| 3964 | key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk, | 
|---|
| 3965 | req_to_sk(req)); | 
|---|
| 3966 | if (key.md5_key) | 
|---|
| 3967 | key.type = TCP_KEY_MD5; | 
|---|
| 3968 | #endif | 
|---|
| 3969 | } | 
|---|
| 3970 | skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), type: PKT_HASH_TYPE_L4); | 
|---|
| 3971 | /* bpf program will be interested in the tcp_flags */ | 
|---|
| 3972 | TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; | 
|---|
| 3973 | tcp_header_size = tcp_synack_options(sk, req, mss, skb, opts: &opts, | 
|---|
| 3974 | key: &key, foc, synack_type, syn_skb) | 
|---|
| 3975 | + sizeof(*th); | 
|---|
| 3976 |  | 
|---|
| 3977 | skb_push(skb, len: tcp_header_size); | 
|---|
| 3978 | skb_reset_transport_header(skb); | 
|---|
| 3979 |  | 
|---|
| 3980 | th = (struct tcphdr *)skb->data; | 
|---|
| 3981 | memset(s: th, c: 0, n: sizeof(struct tcphdr)); | 
|---|
| 3982 | th->syn = 1; | 
|---|
| 3983 | th->ack = 1; | 
|---|
| 3984 | tcp_ecn_make_synack(req, th); | 
|---|
| 3985 | th->source = htons(ireq->ir_num); | 
|---|
| 3986 | th->dest = ireq->ir_rmt_port; | 
|---|
| 3987 | skb->mark = ireq->ir_mark; | 
|---|
| 3988 | skb->ip_summed = CHECKSUM_PARTIAL; | 
|---|
| 3989 | th->seq = htonl(tcp_rsk(req)->snt_isn); | 
|---|
| 3990 | /* XXX data is queued and acked as is. No buffer/window check */ | 
|---|
| 3991 | th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); | 
|---|
| 3992 |  | 
|---|
| 3993 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 
|---|
| 3994 | th->window = htons(min(req->rsk_rcv_wnd, 65535U)); | 
|---|
| 3995 | tcp_options_write(th, NULL, tcprsk: tcp_rsk(req), opts: &opts, key: &key); | 
|---|
| 3996 | th->doff = (tcp_header_size >> 2); | 
|---|
| 3997 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); | 
|---|
| 3998 |  | 
|---|
| 3999 | /* Okay, we have all we need - do the md5 hash if needed */ | 
|---|
| 4000 | if (tcp_key_is_md5(key: &key)) { | 
|---|
| 4001 | #ifdef CONFIG_TCP_MD5SIG | 
|---|
| 4002 | tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, | 
|---|
| 4003 | key.md5_key, req_to_sk(req), skb); | 
|---|
| 4004 | #endif | 
|---|
| 4005 | } else if (tcp_key_is_ao(key: &key)) { | 
|---|
| 4006 | #ifdef CONFIG_TCP_AO | 
|---|
| 4007 | tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location, | 
|---|
| 4008 | key.ao_key, req, skb, | 
|---|
| 4009 | opts.hash_location - (u8 *)th, 0); | 
|---|
| 4010 | #endif | 
|---|
| 4011 | } | 
|---|
| 4012 | #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) | 
|---|
| 4013 | rcu_read_unlock(); | 
|---|
| 4014 | #endif | 
|---|
| 4015 |  | 
|---|
| 4016 | bpf_skops_write_hdr_opt(sk: (struct sock *)sk, skb, req, syn_skb, | 
|---|
| 4017 | synack_type, opts: &opts); | 
|---|
| 4018 |  | 
|---|
| 4019 | skb_set_delivery_time(skb, kt: now, tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 4020 | tcp_add_tx_delay(skb, tp); | 
|---|
| 4021 |  | 
|---|
| 4022 | return skb; | 
|---|
| 4023 | } | 
|---|
| 4024 | EXPORT_IPV6_MOD(tcp_make_synack); | 
|---|
| 4025 |  | 
|---|
| 4026 | static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) | 
|---|
| 4027 | { | 
|---|
| 4028 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 4029 | const struct tcp_congestion_ops *ca; | 
|---|
| 4030 | u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); | 
|---|
| 4031 |  | 
|---|
| 4032 | if (ca_key == TCP_CA_UNSPEC) | 
|---|
| 4033 | return; | 
|---|
| 4034 |  | 
|---|
| 4035 | rcu_read_lock(); | 
|---|
| 4036 | ca = tcp_ca_find_key(key: ca_key); | 
|---|
| 4037 | if (likely(ca && bpf_try_module_get(ca, ca->owner))) { | 
|---|
| 4038 | bpf_module_put(data: icsk->icsk_ca_ops, owner: icsk->icsk_ca_ops->owner); | 
|---|
| 4039 | icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); | 
|---|
| 4040 | icsk->icsk_ca_ops = ca; | 
|---|
| 4041 | } | 
|---|
| 4042 | rcu_read_unlock(); | 
|---|
| 4043 | } | 
|---|
| 4044 |  | 
|---|
| 4045 | /* Do all connect socket setups that can be done AF independent. */ | 
|---|
| 4046 | static void tcp_connect_init(struct sock *sk) | 
|---|
| 4047 | { | 
|---|
| 4048 | const struct dst_entry *dst = __sk_dst_get(sk); | 
|---|
| 4049 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4050 | __u8 rcv_wscale; | 
|---|
| 4051 | u16 user_mss; | 
|---|
| 4052 | u32 rcv_wnd; | 
|---|
| 4053 |  | 
|---|
| 4054 | /* We'll fix this up when we get a response from the other end. | 
|---|
| 4055 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. | 
|---|
| 4056 | */ | 
|---|
| 4057 | tp->tcp_header_len = sizeof(struct tcphdr); | 
|---|
| 4058 | if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) | 
|---|
| 4059 | tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; | 
|---|
| 4060 |  | 
|---|
| 4061 | tcp_ao_connect_init(sk); | 
|---|
| 4062 |  | 
|---|
| 4063 | /* If user gave his TCP_MAXSEG, record it to clamp */ | 
|---|
| 4064 | user_mss = READ_ONCE(tp->rx_opt.user_mss); | 
|---|
| 4065 | if (user_mss) | 
|---|
| 4066 | tp->rx_opt.mss_clamp = user_mss; | 
|---|
| 4067 | tp->max_window = 0; | 
|---|
| 4068 | tcp_mtup_init(sk); | 
|---|
| 4069 | tcp_sync_mss(sk, pmtu: dst_mtu(dst)); | 
|---|
| 4070 |  | 
|---|
| 4071 | tcp_ca_dst_init(sk, dst); | 
|---|
| 4072 |  | 
|---|
| 4073 | if (!tp->window_clamp) | 
|---|
| 4074 | WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); | 
|---|
| 4075 | tp->advmss = tcp_mss_clamp(tp, mss: dst_metric_advmss(dst)); | 
|---|
| 4076 |  | 
|---|
| 4077 | tcp_initialize_rcv_mss(sk); | 
|---|
| 4078 |  | 
|---|
| 4079 | /* limit the window selection if the user enforce a smaller rx buffer */ | 
|---|
| 4080 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && | 
|---|
| 4081 | (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) | 
|---|
| 4082 | WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); | 
|---|
| 4083 |  | 
|---|
| 4084 | rcv_wnd = tcp_rwnd_init_bpf(sk); | 
|---|
| 4085 | if (rcv_wnd == 0) | 
|---|
| 4086 | rcv_wnd = dst_metric(dst, RTAX_INITRWND); | 
|---|
| 4087 |  | 
|---|
| 4088 | tcp_select_initial_window(sk, space: tcp_full_space(sk), | 
|---|
| 4089 | mss: tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), | 
|---|
| 4090 | rcv_wnd: &tp->rcv_wnd, | 
|---|
| 4091 | window_clamp: &tp->window_clamp, | 
|---|
| 4092 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), | 
|---|
| 4093 | rcv_wscale: &rcv_wscale, | 
|---|
| 4094 | init_rcv_wnd: rcv_wnd); | 
|---|
| 4095 |  | 
|---|
| 4096 | tp->rx_opt.rcv_wscale = rcv_wscale; | 
|---|
| 4097 | tp->rcv_ssthresh = tp->rcv_wnd; | 
|---|
| 4098 |  | 
|---|
| 4099 | WRITE_ONCE(sk->sk_err, 0); | 
|---|
| 4100 | sock_reset_flag(sk, flag: SOCK_DONE); | 
|---|
| 4101 | tp->snd_wnd = 0; | 
|---|
| 4102 | tcp_init_wl(tp, seq: 0); | 
|---|
| 4103 | tcp_write_queue_purge(sk); | 
|---|
| 4104 | tp->snd_una = tp->write_seq; | 
|---|
| 4105 | tp->snd_sml = tp->write_seq; | 
|---|
| 4106 | tp->snd_up = tp->write_seq; | 
|---|
| 4107 | WRITE_ONCE(tp->snd_nxt, tp->write_seq); | 
|---|
| 4108 |  | 
|---|
| 4109 | if (likely(!tp->repair)) | 
|---|
| 4110 | tp->rcv_nxt = 0; | 
|---|
| 4111 | else | 
|---|
| 4112 | tp->rcv_tstamp = tcp_jiffies32; | 
|---|
| 4113 | tp->rcv_wup = tp->rcv_nxt; | 
|---|
| 4114 | WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); | 
|---|
| 4115 |  | 
|---|
| 4116 | inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); | 
|---|
| 4117 | WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0); | 
|---|
| 4118 | tcp_clear_retrans(tp); | 
|---|
| 4119 | } | 
|---|
| 4120 |  | 
|---|
| 4121 | static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) | 
|---|
| 4122 | { | 
|---|
| 4123 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4124 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); | 
|---|
| 4125 |  | 
|---|
| 4126 | tcb->end_seq += skb->len; | 
|---|
| 4127 | __skb_header_release(skb); | 
|---|
| 4128 | sk_wmem_queued_add(sk, val: skb->truesize); | 
|---|
| 4129 | sk_mem_charge(sk, size: skb->truesize); | 
|---|
| 4130 | WRITE_ONCE(tp->write_seq, tcb->end_seq); | 
|---|
| 4131 | tp->packets_out += tcp_skb_pcount(skb); | 
|---|
| 4132 | } | 
|---|
| 4133 |  | 
|---|
| 4134 | /* Build and send a SYN with data and (cached) Fast Open cookie. However, | 
|---|
| 4135 | * queue a data-only packet after the regular SYN, such that regular SYNs | 
|---|
| 4136 | * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges | 
|---|
| 4137 | * only the SYN sequence, the data are retransmitted in the first ACK. | 
|---|
| 4138 | * If cookie is not cached or other error occurs, falls back to send a | 
|---|
| 4139 | * regular SYN with Fast Open cookie request option. | 
|---|
| 4140 | */ | 
|---|
| 4141 | static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | 
|---|
| 4142 | { | 
|---|
| 4143 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 4144 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4145 | struct tcp_fastopen_request *fo = tp->fastopen_req; | 
|---|
| 4146 | struct page_frag *pfrag = sk_page_frag(sk); | 
|---|
| 4147 | struct sk_buff *syn_data; | 
|---|
| 4148 | int space, err = 0; | 
|---|
| 4149 |  | 
|---|
| 4150 | tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */ | 
|---|
| 4151 | if (!tcp_fastopen_cookie_check(sk, mss: &tp->rx_opt.mss_clamp, cookie: &fo->cookie)) | 
|---|
| 4152 | goto fallback; | 
|---|
| 4153 |  | 
|---|
| 4154 | /* MSS for SYN-data is based on cached MSS and bounded by PMTU and | 
|---|
| 4155 | * user-MSS. Reserve maximum option space for middleboxes that add | 
|---|
| 4156 | * private TCP options. The cost is reduced data space in SYN :( | 
|---|
| 4157 | */ | 
|---|
| 4158 | tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, mss: tp->rx_opt.mss_clamp); | 
|---|
| 4159 | /* Sync mss_cache after updating the mss_clamp */ | 
|---|
| 4160 | tcp_sync_mss(sk, pmtu: icsk->icsk_pmtu_cookie); | 
|---|
| 4161 |  | 
|---|
| 4162 | space = __tcp_mtu_to_mss(sk, pmtu: icsk->icsk_pmtu_cookie) - | 
|---|
| 4163 | MAX_TCP_OPTION_SPACE; | 
|---|
| 4164 |  | 
|---|
| 4165 | space = min_t(size_t, space, fo->size); | 
|---|
| 4166 |  | 
|---|
| 4167 | if (space && | 
|---|
| 4168 | !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE), | 
|---|
| 4169 | pfrag, prio: sk->sk_allocation)) | 
|---|
| 4170 | goto fallback; | 
|---|
| 4171 | syn_data = tcp_stream_alloc_skb(sk, gfp: sk->sk_allocation, force_schedule: false); | 
|---|
| 4172 | if (!syn_data) | 
|---|
| 4173 | goto fallback; | 
|---|
| 4174 | memcpy(to: syn_data->cb, from: syn->cb, len: sizeof(syn->cb)); | 
|---|
| 4175 | if (space) { | 
|---|
| 4176 | space = min_t(size_t, space, pfrag->size - pfrag->offset); | 
|---|
| 4177 | space = tcp_wmem_schedule(sk, copy: space); | 
|---|
| 4178 | } | 
|---|
| 4179 | if (space) { | 
|---|
| 4180 | space = copy_page_from_iter(page: pfrag->page, offset: pfrag->offset, | 
|---|
| 4181 | bytes: space, i: &fo->data->msg_iter); | 
|---|
| 4182 | if (unlikely(!space)) { | 
|---|
| 4183 | tcp_skb_tsorted_anchor_cleanup(skb: syn_data); | 
|---|
| 4184 | kfree_skb(skb: syn_data); | 
|---|
| 4185 | goto fallback; | 
|---|
| 4186 | } | 
|---|
| 4187 | skb_fill_page_desc(skb: syn_data, i: 0, page: pfrag->page, | 
|---|
| 4188 | off: pfrag->offset, size: space); | 
|---|
| 4189 | page_ref_inc(page: pfrag->page); | 
|---|
| 4190 | pfrag->offset += space; | 
|---|
| 4191 | skb_len_add(skb: syn_data, delta: space); | 
|---|
| 4192 | skb_zcopy_set(skb: syn_data, uarg: fo->uarg, NULL); | 
|---|
| 4193 | } | 
|---|
| 4194 | /* No more data pending in inet_wait_for_connect() */ | 
|---|
| 4195 | if (space == fo->size) | 
|---|
| 4196 | fo->data = NULL; | 
|---|
| 4197 | fo->copied = space; | 
|---|
| 4198 |  | 
|---|
| 4199 | tcp_connect_queue_skb(sk, skb: syn_data); | 
|---|
| 4200 | if (syn_data->len) | 
|---|
| 4201 | tcp_chrono_start(sk, type: TCP_CHRONO_BUSY); | 
|---|
| 4202 |  | 
|---|
| 4203 | err = tcp_transmit_skb(sk, skb: syn_data, clone_it: 1, gfp_mask: sk->sk_allocation); | 
|---|
| 4204 |  | 
|---|
| 4205 | skb_set_delivery_time(skb: syn, kt: syn_data->skb_mstamp_ns, tstamp_type: SKB_CLOCK_MONOTONIC); | 
|---|
| 4206 |  | 
|---|
| 4207 | /* Now full SYN+DATA was cloned and sent (or not), | 
|---|
| 4208 | * remove the SYN from the original skb (syn_data) | 
|---|
| 4209 | * we keep in write queue in case of a retransmit, as we | 
|---|
| 4210 | * also have the SYN packet (with no data) in the same queue. | 
|---|
| 4211 | */ | 
|---|
| 4212 | TCP_SKB_CB(syn_data)->seq++; | 
|---|
| 4213 | TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; | 
|---|
| 4214 | if (!err) { | 
|---|
| 4215 | tp->syn_data = (fo->copied > 0); | 
|---|
| 4216 | tcp_rbtree_insert(root: &sk->tcp_rtx_queue, skb: syn_data); | 
|---|
| 4217 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); | 
|---|
| 4218 | goto done; | 
|---|
| 4219 | } | 
|---|
| 4220 |  | 
|---|
| 4221 | /* data was not sent, put it in write_queue */ | 
|---|
| 4222 | __skb_queue_tail(list: &sk->sk_write_queue, newsk: syn_data); | 
|---|
| 4223 | tp->packets_out -= tcp_skb_pcount(skb: syn_data); | 
|---|
| 4224 |  | 
|---|
| 4225 | fallback: | 
|---|
| 4226 | /* Send a regular SYN with Fast Open cookie request option */ | 
|---|
| 4227 | if (fo->cookie.len > 0) | 
|---|
| 4228 | fo->cookie.len = 0; | 
|---|
| 4229 | err = tcp_transmit_skb(sk, skb: syn, clone_it: 1, gfp_mask: sk->sk_allocation); | 
|---|
| 4230 | if (err) | 
|---|
| 4231 | tp->syn_fastopen = 0; | 
|---|
| 4232 | done: | 
|---|
| 4233 | fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */ | 
|---|
| 4234 | return err; | 
|---|
| 4235 | } | 
|---|
| 4236 |  | 
|---|
| 4237 | /* Build a SYN and send it off. */ | 
|---|
| 4238 | int tcp_connect(struct sock *sk) | 
|---|
| 4239 | { | 
|---|
| 4240 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4241 | struct sk_buff *buff; | 
|---|
| 4242 | int err; | 
|---|
| 4243 |  | 
|---|
| 4244 | tcp_call_bpf(sk, op: BPF_SOCK_OPS_TCP_CONNECT_CB, nargs: 0, NULL); | 
|---|
| 4245 |  | 
|---|
| 4246 | #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO) | 
|---|
| 4247 | /* Has to be checked late, after setting daddr/saddr/ops. | 
|---|
| 4248 | * Return error if the peer has both a md5 and a tcp-ao key | 
|---|
| 4249 | * configured as this is ambiguous. | 
|---|
| 4250 | */ | 
|---|
| 4251 | if (unlikely(rcu_dereference_protected(tp->md5sig_info, | 
|---|
| 4252 | lockdep_sock_is_held(sk)))) { | 
|---|
| 4253 | bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); | 
|---|
| 4254 | bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); | 
|---|
| 4255 | struct tcp_ao_info *ao_info; | 
|---|
| 4256 |  | 
|---|
| 4257 | ao_info = rcu_dereference_check(tp->ao_info, | 
|---|
| 4258 | lockdep_sock_is_held(sk)); | 
|---|
| 4259 | if (ao_info) { | 
|---|
| 4260 | /* This is an extra check: tcp_ao_required() in | 
|---|
| 4261 | * tcp_v{4,6}_parse_md5_keys() should prevent adding | 
|---|
| 4262 | * md5 keys on ao_required socket. | 
|---|
| 4263 | */ | 
|---|
| 4264 | needs_ao |= ao_info->ao_required; | 
|---|
| 4265 | WARN_ON_ONCE(ao_info->ao_required && needs_md5); | 
|---|
| 4266 | } | 
|---|
| 4267 | if (needs_md5 && needs_ao) | 
|---|
| 4268 | return -EKEYREJECTED; | 
|---|
| 4269 |  | 
|---|
| 4270 | /* If we have a matching md5 key and no matching tcp-ao key | 
|---|
| 4271 | * then free up ao_info if allocated. | 
|---|
| 4272 | */ | 
|---|
| 4273 | if (needs_md5) { | 
|---|
| 4274 | tcp_ao_destroy_sock(sk, false); | 
|---|
| 4275 | } else if (needs_ao) { | 
|---|
| 4276 | tcp_clear_md5_list(sk); | 
|---|
| 4277 | kfree(rcu_replace_pointer(tp->md5sig_info, NULL, | 
|---|
| 4278 | lockdep_sock_is_held(sk))); | 
|---|
| 4279 | } | 
|---|
| 4280 | } | 
|---|
| 4281 | #endif | 
|---|
| 4282 | #ifdef CONFIG_TCP_AO | 
|---|
| 4283 | if (unlikely(rcu_dereference_protected(tp->ao_info, | 
|---|
| 4284 | lockdep_sock_is_held(sk)))) { | 
|---|
| 4285 | /* Don't allow connecting if ao is configured but no | 
|---|
| 4286 | * matching key is found. | 
|---|
| 4287 | */ | 
|---|
| 4288 | if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) | 
|---|
| 4289 | return -EKEYREJECTED; | 
|---|
| 4290 | } | 
|---|
| 4291 | #endif | 
|---|
| 4292 |  | 
|---|
| 4293 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | 
|---|
| 4294 | return -EHOSTUNREACH; /* Routing failure or similar. */ | 
|---|
| 4295 |  | 
|---|
| 4296 | tcp_connect_init(sk); | 
|---|
| 4297 |  | 
|---|
| 4298 | if (unlikely(tp->repair)) { | 
|---|
| 4299 | tcp_finish_connect(sk, NULL); | 
|---|
| 4300 | return 0; | 
|---|
| 4301 | } | 
|---|
| 4302 |  | 
|---|
| 4303 | buff = tcp_stream_alloc_skb(sk, gfp: sk->sk_allocation, force_schedule: true); | 
|---|
| 4304 | if (unlikely(!buff)) | 
|---|
| 4305 | return -ENOBUFS; | 
|---|
| 4306 |  | 
|---|
| 4307 | /* SYN eats a sequence byte, write_seq updated by | 
|---|
| 4308 | * tcp_connect_queue_skb(). | 
|---|
| 4309 | */ | 
|---|
| 4310 | tcp_init_nondata_skb(skb: buff, sk, seq: tp->write_seq, TCPHDR_SYN); | 
|---|
| 4311 | tcp_mstamp_refresh(tp); | 
|---|
| 4312 | tp->retrans_stamp = tcp_time_stamp_ts(tp); | 
|---|
| 4313 | tcp_connect_queue_skb(sk, skb: buff); | 
|---|
| 4314 | tcp_ecn_send_syn(sk, skb: buff); | 
|---|
| 4315 | tcp_rbtree_insert(root: &sk->tcp_rtx_queue, skb: buff); | 
|---|
| 4316 |  | 
|---|
| 4317 | /* Send off SYN; include data in Fast Open. */ | 
|---|
| 4318 | err = tp->fastopen_req ? tcp_send_syn_data(sk, syn: buff) : | 
|---|
| 4319 | tcp_transmit_skb(sk, skb: buff, clone_it: 1, gfp_mask: sk->sk_allocation); | 
|---|
| 4320 | if (err == -ECONNREFUSED) | 
|---|
| 4321 | return err; | 
|---|
| 4322 |  | 
|---|
| 4323 | /* We change tp->snd_nxt after the tcp_transmit_skb() call | 
|---|
| 4324 | * in order to make this packet get counted in tcpOutSegs. | 
|---|
| 4325 | */ | 
|---|
| 4326 | WRITE_ONCE(tp->snd_nxt, tp->write_seq); | 
|---|
| 4327 | tp->pushed_seq = tp->write_seq; | 
|---|
| 4328 | buff = tcp_send_head(sk); | 
|---|
| 4329 | if (unlikely(buff)) { | 
|---|
| 4330 | WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); | 
|---|
| 4331 | tp->pushed_seq	= TCP_SKB_CB(buff)->seq; | 
|---|
| 4332 | } | 
|---|
| 4333 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); | 
|---|
| 4334 |  | 
|---|
| 4335 | /* Timer for repeating the SYN until an answer. */ | 
|---|
| 4336 | tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 
|---|
| 4337 | inet_csk(sk)->icsk_rto, pace_delay: false); | 
|---|
| 4338 | return 0; | 
|---|
| 4339 | } | 
|---|
| 4340 | EXPORT_SYMBOL(tcp_connect); | 
|---|
| 4341 |  | 
|---|
| 4342 | u32 tcp_delack_max(const struct sock *sk) | 
|---|
| 4343 | { | 
|---|
| 4344 | u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1; | 
|---|
| 4345 |  | 
|---|
| 4346 | return min(READ_ONCE(inet_csk(sk)->icsk_delack_max), delack_from_rto_min); | 
|---|
| 4347 | } | 
|---|
| 4348 |  | 
|---|
| 4349 | /* Send out a delayed ack, the caller does the policy checking | 
|---|
| 4350 | * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check() | 
|---|
| 4351 | * for details. | 
|---|
| 4352 | */ | 
|---|
| 4353 | void tcp_send_delayed_ack(struct sock *sk) | 
|---|
| 4354 | { | 
|---|
| 4355 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 4356 | int ato = icsk->icsk_ack.ato; | 
|---|
| 4357 | unsigned long timeout; | 
|---|
| 4358 |  | 
|---|
| 4359 | if (ato > TCP_DELACK_MIN) { | 
|---|
| 4360 | const struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4361 | int max_ato = HZ / 2; | 
|---|
| 4362 |  | 
|---|
| 4363 | if (inet_csk_in_pingpong_mode(sk) || | 
|---|
| 4364 | (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) | 
|---|
| 4365 | max_ato = TCP_DELACK_MAX; | 
|---|
| 4366 |  | 
|---|
| 4367 | /* Slow path, intersegment interval is "high". */ | 
|---|
| 4368 |  | 
|---|
| 4369 | /* If some rtt estimate is known, use it to bound delayed ack. | 
|---|
| 4370 | * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements | 
|---|
| 4371 | * directly. | 
|---|
| 4372 | */ | 
|---|
| 4373 | if (tp->srtt_us) { | 
|---|
| 4374 | int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), | 
|---|
| 4375 | TCP_DELACK_MIN); | 
|---|
| 4376 |  | 
|---|
| 4377 | if (rtt < max_ato) | 
|---|
| 4378 | max_ato = rtt; | 
|---|
| 4379 | } | 
|---|
| 4380 |  | 
|---|
| 4381 | ato = min(ato, max_ato); | 
|---|
| 4382 | } | 
|---|
| 4383 |  | 
|---|
| 4384 | ato = min_t(u32, ato, tcp_delack_max(sk)); | 
|---|
| 4385 |  | 
|---|
| 4386 | /* Stay within the limit we were given */ | 
|---|
| 4387 | timeout = jiffies + ato; | 
|---|
| 4388 |  | 
|---|
| 4389 | /* Use new timeout only if there wasn't a older one earlier. */ | 
|---|
| 4390 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { | 
|---|
| 4391 | /* If delack timer is about to expire, send ACK now. */ | 
|---|
| 4392 | if (time_before_eq(icsk_delack_timeout(icsk), jiffies + (ato >> 2))) { | 
|---|
| 4393 | tcp_send_ack(sk); | 
|---|
| 4394 | return; | 
|---|
| 4395 | } | 
|---|
| 4396 |  | 
|---|
| 4397 | if (!time_before(timeout, icsk_delack_timeout(icsk))) | 
|---|
| 4398 | timeout = icsk_delack_timeout(icsk); | 
|---|
| 4399 | } | 
|---|
| 4400 | smp_store_release(&icsk->icsk_ack.pending, | 
|---|
| 4401 | icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); | 
|---|
| 4402 | sk_reset_timer(sk, timer: &icsk->icsk_delack_timer, expires: timeout); | 
|---|
| 4403 | } | 
|---|
| 4404 |  | 
|---|
| 4405 | /* This routine sends an ack and also updates the window. */ | 
|---|
| 4406 | void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags) | 
|---|
| 4407 | { | 
|---|
| 4408 | struct sk_buff *buff; | 
|---|
| 4409 |  | 
|---|
| 4410 | /* If we have been reset, we may not send again. */ | 
|---|
| 4411 | if (sk->sk_state == TCP_CLOSE) | 
|---|
| 4412 | return; | 
|---|
| 4413 |  | 
|---|
| 4414 | /* We are not putting this on the write queue, so | 
|---|
| 4415 | * tcp_transmit_skb() will set the ownership to this | 
|---|
| 4416 | * sock. | 
|---|
| 4417 | */ | 
|---|
| 4418 | buff = alloc_skb(MAX_TCP_HEADER, | 
|---|
| 4419 | priority: sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); | 
|---|
| 4420 | if (unlikely(!buff)) { | 
|---|
| 4421 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 4422 | unsigned long delay; | 
|---|
| 4423 |  | 
|---|
| 4424 | delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; | 
|---|
| 4425 | if (delay < tcp_rto_max(sk)) | 
|---|
| 4426 | icsk->icsk_ack.retry++; | 
|---|
| 4427 | inet_csk_schedule_ack(sk); | 
|---|
| 4428 | icsk->icsk_ack.ato = TCP_ATO_MIN; | 
|---|
| 4429 | tcp_reset_xmit_timer(sk, ICSK_TIME_DACK, when: delay, pace_delay: false); | 
|---|
| 4430 | return; | 
|---|
| 4431 | } | 
|---|
| 4432 |  | 
|---|
| 4433 | /* Reserve space for headers and prepare control bits. */ | 
|---|
| 4434 | skb_reserve(skb: buff, MAX_TCP_HEADER); | 
|---|
| 4435 | tcp_init_nondata_skb(skb: buff, sk, | 
|---|
| 4436 | seq: tcp_acceptable_seq(sk), TCPHDR_ACK | flags); | 
|---|
| 4437 |  | 
|---|
| 4438 | /* We do not want pure acks influencing TCP Small Queues or fq/pacing | 
|---|
| 4439 | * too much. | 
|---|
| 4440 | * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 | 
|---|
| 4441 | */ | 
|---|
| 4442 | skb_set_tcp_pure_ack(skb: buff); | 
|---|
| 4443 |  | 
|---|
| 4444 | /* Send it off, this clears delayed acks for us. */ | 
|---|
| 4445 | __tcp_transmit_skb(sk, skb: buff, clone_it: 0, gfp_mask: (__force gfp_t)0, rcv_nxt); | 
|---|
| 4446 | } | 
|---|
| 4447 | EXPORT_SYMBOL_GPL(__tcp_send_ack); | 
|---|
| 4448 |  | 
|---|
| 4449 | void tcp_send_ack(struct sock *sk) | 
|---|
| 4450 | { | 
|---|
| 4451 | __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt, 0); | 
|---|
| 4452 | } | 
|---|
| 4453 |  | 
|---|
| 4454 | /* This routine sends a packet with an out of date sequence | 
|---|
| 4455 | * number. It assumes the other end will try to ack it. | 
|---|
| 4456 | * | 
|---|
| 4457 | * Question: what should we make while urgent mode? | 
|---|
| 4458 | * 4.4BSD forces sending single byte of data. We cannot send | 
|---|
| 4459 | * out of window data, because we have SND.NXT==SND.MAX... | 
|---|
| 4460 | * | 
|---|
| 4461 | * Current solution: to send TWO zero-length segments in urgent mode: | 
|---|
| 4462 | * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is | 
|---|
| 4463 | * out-of-date with SND.UNA-1 to probe window. | 
|---|
| 4464 | */ | 
|---|
| 4465 | static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) | 
|---|
| 4466 | { | 
|---|
| 4467 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4468 | struct sk_buff *skb; | 
|---|
| 4469 |  | 
|---|
| 4470 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 
|---|
| 4471 | skb = alloc_skb(MAX_TCP_HEADER, | 
|---|
| 4472 | priority: sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); | 
|---|
| 4473 | if (!skb) | 
|---|
| 4474 | return -1; | 
|---|
| 4475 |  | 
|---|
| 4476 | /* Reserve space for headers and set control bits. */ | 
|---|
| 4477 | skb_reserve(skb, MAX_TCP_HEADER); | 
|---|
| 4478 | /* Use a previous sequence.  This should cause the other | 
|---|
| 4479 | * end to send an ack.  Don't queue or clone SKB, just | 
|---|
| 4480 | * send it. | 
|---|
| 4481 | */ | 
|---|
| 4482 | tcp_init_nondata_skb(skb, sk, seq: tp->snd_una - !urgent, TCPHDR_ACK); | 
|---|
| 4483 | NET_INC_STATS(sock_net(sk), mib); | 
|---|
| 4484 | return tcp_transmit_skb(sk, skb, clone_it: 0, gfp_mask: (__force gfp_t)0); | 
|---|
| 4485 | } | 
|---|
| 4486 |  | 
|---|
| 4487 | /* Called from setsockopt( ... TCP_REPAIR ) */ | 
|---|
| 4488 | void tcp_send_window_probe(struct sock *sk) | 
|---|
| 4489 | { | 
|---|
| 4490 | if (sk->sk_state == TCP_ESTABLISHED) { | 
|---|
| 4491 | tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; | 
|---|
| 4492 | tcp_mstamp_refresh(tcp_sk(sk)); | 
|---|
| 4493 | tcp_xmit_probe_skb(sk, urgent: 0, mib: LINUX_MIB_TCPWINPROBE); | 
|---|
| 4494 | } | 
|---|
| 4495 | } | 
|---|
| 4496 |  | 
|---|
| 4497 | /* Initiate keepalive or window probe from timer. */ | 
|---|
| 4498 | int tcp_write_wakeup(struct sock *sk, int mib) | 
|---|
| 4499 | { | 
|---|
| 4500 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4501 | struct sk_buff *skb; | 
|---|
| 4502 |  | 
|---|
| 4503 | if (sk->sk_state == TCP_CLOSE) | 
|---|
| 4504 | return -1; | 
|---|
| 4505 |  | 
|---|
| 4506 | skb = tcp_send_head(sk); | 
|---|
| 4507 | if (skb && before(TCP_SKB_CB(skb)->seq, seq2: tcp_wnd_end(tp))) { | 
|---|
| 4508 | int err; | 
|---|
| 4509 | unsigned int mss = tcp_current_mss(sk); | 
|---|
| 4510 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 
|---|
| 4511 |  | 
|---|
| 4512 | if (before(seq1: tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) | 
|---|
| 4513 | tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; | 
|---|
| 4514 |  | 
|---|
| 4515 | /* We are probing the opening of a window | 
|---|
| 4516 | * but the window size is != 0 | 
|---|
| 4517 | * must have been a result SWS avoidance ( sender ) | 
|---|
| 4518 | */ | 
|---|
| 4519 | if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || | 
|---|
| 4520 | skb->len > mss) { | 
|---|
| 4521 | seg_size = min(seg_size, mss); | 
|---|
| 4522 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; | 
|---|
| 4523 | if (tcp_fragment(sk, tcp_queue: TCP_FRAG_IN_WRITE_QUEUE, | 
|---|
| 4524 | skb, len: seg_size, mss_now: mss, GFP_ATOMIC)) | 
|---|
| 4525 | return -1; | 
|---|
| 4526 | } else if (!tcp_skb_pcount(skb)) | 
|---|
| 4527 | tcp_set_skb_tso_segs(skb, mss_now: mss); | 
|---|
| 4528 |  | 
|---|
| 4529 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; | 
|---|
| 4530 | err = tcp_transmit_skb(sk, skb, clone_it: 1, GFP_ATOMIC); | 
|---|
| 4531 | if (!err) | 
|---|
| 4532 | tcp_event_new_data_sent(sk, skb); | 
|---|
| 4533 | return err; | 
|---|
| 4534 | } else { | 
|---|
| 4535 | if (between(seq1: tp->snd_up, seq2: tp->snd_una + 1, seq3: tp->snd_una + 0xFFFF)) | 
|---|
| 4536 | tcp_xmit_probe_skb(sk, urgent: 1, mib); | 
|---|
| 4537 | return tcp_xmit_probe_skb(sk, urgent: 0, mib); | 
|---|
| 4538 | } | 
|---|
| 4539 | } | 
|---|
| 4540 |  | 
|---|
| 4541 | /* A window probe timeout has occurred.  If window is not closed send | 
|---|
| 4542 | * a partial packet else a zero probe. | 
|---|
| 4543 | */ | 
|---|
| 4544 | void tcp_send_probe0(struct sock *sk) | 
|---|
| 4545 | { | 
|---|
| 4546 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|---|
| 4547 | struct tcp_sock *tp = tcp_sk(sk); | 
|---|
| 4548 | struct net *net = sock_net(sk); | 
|---|
| 4549 | unsigned long timeout; | 
|---|
| 4550 | int err; | 
|---|
| 4551 |  | 
|---|
| 4552 | err = tcp_write_wakeup(sk, mib: LINUX_MIB_TCPWINPROBE); | 
|---|
| 4553 |  | 
|---|
| 4554 | if (tp->packets_out || tcp_write_queue_empty(sk)) { | 
|---|
| 4555 | /* Cancel probe timer, if it is not required. */ | 
|---|
| 4556 | WRITE_ONCE(icsk->icsk_probes_out, 0); | 
|---|
| 4557 | icsk->icsk_backoff = 0; | 
|---|
| 4558 | icsk->icsk_probes_tstamp = 0; | 
|---|
| 4559 | return; | 
|---|
| 4560 | } | 
|---|
| 4561 |  | 
|---|
| 4562 | WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1); | 
|---|
| 4563 | if (err <= 0) { | 
|---|
| 4564 | if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) | 
|---|
| 4565 | icsk->icsk_backoff++; | 
|---|
| 4566 | timeout = tcp_probe0_when(sk, max_when: tcp_rto_max(sk)); | 
|---|
| 4567 | } else { | 
|---|
| 4568 | /* If packet was not sent due to local congestion, | 
|---|
| 4569 | * Let senders fight for local resources conservatively. | 
|---|
| 4570 | */ | 
|---|
| 4571 | timeout = TCP_RESOURCE_PROBE_INTERVAL; | 
|---|
| 4572 | } | 
|---|
| 4573 |  | 
|---|
| 4574 | timeout = tcp_clamp_probe0_to_user_timeout(sk, when: timeout); | 
|---|
| 4575 | tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when: timeout, pace_delay: true); | 
|---|
| 4576 | } | 
|---|
| 4577 |  | 
|---|
| 4578 | int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) | 
|---|
| 4579 | { | 
|---|
| 4580 | const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; | 
|---|
| 4581 | struct flowi fl; | 
|---|
| 4582 | int res; | 
|---|
| 4583 |  | 
|---|
| 4584 | /* Paired with WRITE_ONCE() in sock_setsockopt() */ | 
|---|
| 4585 | if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) | 
|---|
| 4586 | WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); | 
|---|
| 4587 | res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, | 
|---|
| 4588 | NULL); | 
|---|
| 4589 | if (!res) { | 
|---|
| 4590 | TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); | 
|---|
| 4591 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); | 
|---|
| 4592 | if (unlikely(tcp_passive_fastopen(sk))) { | 
|---|
| 4593 | /* sk has const attribute because listeners are lockless. | 
|---|
| 4594 | * However in this case, we are dealing with a passive fastopen | 
|---|
| 4595 | * socket thus we can change total_retrans value. | 
|---|
| 4596 | */ | 
|---|
| 4597 | tcp_sk_rw(sk)->total_retrans++; | 
|---|
| 4598 | } | 
|---|
| 4599 | trace_tcp_retransmit_synack(sk, req); | 
|---|
| 4600 | WRITE_ONCE(req->num_retrans, req->num_retrans + 1); | 
|---|
| 4601 | } | 
|---|
| 4602 | return res; | 
|---|
| 4603 | } | 
|---|
| 4604 | EXPORT_IPV6_MOD(tcp_rtx_synack); | 
|---|
| 4605 |  | 
|---|