| 1 | /* |
| 2 | * Copyright (c) 2004-2016 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 |
| 30 | * The Regents of the University of California. All rights reserved. |
| 31 | * |
| 32 | * Redistribution and use in source and binary forms, with or without |
| 33 | * modification, are permitted provided that the following conditions |
| 34 | * are met: |
| 35 | * 1. Redistributions of source code must retain the above copyright |
| 36 | * notice, this list of conditions and the following disclaimer. |
| 37 | * 2. Redistributions in binary form must reproduce the above copyright |
| 38 | * notice, this list of conditions and the following disclaimer in the |
| 39 | * documentation and/or other materials provided with the distribution. |
| 40 | * 3. All advertising materials mentioning features or use of this software |
| 41 | * must display the following acknowledgement: |
| 42 | * This product includes software developed by the University of |
| 43 | * California, Berkeley and its contributors. |
| 44 | * 4. Neither the name of the University nor the names of its contributors |
| 45 | * may be used to endorse or promote products derived from this software |
| 46 | * without specific prior written permission. |
| 47 | * |
| 48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 58 | * SUCH DAMAGE. |
| 59 | * |
| 60 | */ |
| 61 | |
| 62 | #define _IP_VHL |
| 63 | |
| 64 | |
| 65 | #include <sys/param.h> |
| 66 | #include <sys/systm.h> |
| 67 | #include <sys/kernel.h> |
| 68 | #include <sys/sysctl.h> |
| 69 | #include <sys/mbuf.h> |
| 70 | #include <sys/domain.h> |
| 71 | #include <sys/protosw.h> |
| 72 | #include <sys/socket.h> |
| 73 | #include <sys/socketvar.h> |
| 74 | |
| 75 | #include <kern/zalloc.h> |
| 76 | |
| 77 | #include <net/route.h> |
| 78 | |
| 79 | #include <netinet/in.h> |
| 80 | #include <netinet/in_systm.h> |
| 81 | #include <netinet/ip.h> |
| 82 | #include <netinet/in_pcb.h> |
| 83 | #include <netinet/ip_var.h> |
| 84 | #if INET6 |
| 85 | #include <netinet6/in6_pcb.h> |
| 86 | #include <netinet/ip6.h> |
| 87 | #include <netinet6/ip6_var.h> |
| 88 | #endif |
| 89 | #include <netinet/tcp.h> |
| 90 | //#define TCPOUTFLAGS |
| 91 | #include <netinet/tcp_fsm.h> |
| 92 | #include <netinet/tcp_seq.h> |
| 93 | #include <netinet/tcp_timer.h> |
| 94 | #include <netinet/tcp_var.h> |
| 95 | #include <netinet/tcpip.h> |
| 96 | #include <netinet/tcp_cache.h> |
| 97 | #if TCPDEBUG |
| 98 | #include <netinet/tcp_debug.h> |
| 99 | #endif |
| 100 | #include <sys/kdebug.h> |
| 101 | |
| 102 | #if IPSEC |
| 103 | #include <netinet6/ipsec.h> |
| 104 | #endif /*IPSEC*/ |
| 105 | |
| 106 | #include <libkern/OSAtomic.h> |
| 107 | |
| 108 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack, CTLFLAG_RW | CTLFLAG_LOCKED, |
| 109 | int, tcp_do_sack, 1, "Enable/Disable TCP SACK support" ); |
| 110 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_maxholes, CTLFLAG_RW | CTLFLAG_LOCKED, |
| 111 | static int, tcp_sack_maxholes, 128, |
| 112 | "Maximum number of TCP SACK holes allowed per connection" ); |
| 113 | |
| 114 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_globalmaxholes, |
| 115 | CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_sack_globalmaxholes, 65536, |
| 116 | "Global maximum number of TCP SACK holes" ); |
| 117 | |
| 118 | static SInt32 tcp_sack_globalholes = 0; |
| 119 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_globalholes, CTLFLAG_RD | CTLFLAG_LOCKED, |
| 120 | &tcp_sack_globalholes, 0, |
| 121 | "Global number of TCP SACK holes currently allocated" ); |
| 122 | |
| 123 | static int tcp_detect_reordering = 1; |
| 124 | static int tcp_dsack_ignore_hw_duplicates = 0; |
| 125 | |
| 126 | #if (DEVELOPMENT || DEBUG) |
| 127 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, detect_reordering, |
| 128 | CTLFLAG_RW | CTLFLAG_LOCKED, |
| 129 | &tcp_detect_reordering, 0, "" ); |
| 130 | |
| 131 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_hw_duplicates, |
| 132 | CTLFLAG_RW | CTLFLAG_LOCKED, |
| 133 | &tcp_dsack_ignore_hw_duplicates, 0, "" ); |
| 134 | #endif /* (DEVELOPMENT || DEBUG) */ |
| 135 | |
| 136 | extern struct zone *sack_hole_zone; |
| 137 | |
| 138 | #define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \ |
| 139 | (SEQ_GT((_sb_)->end, (_sb_)->start) && \ |
| 140 | SEQ_GT((_sb_)->start, (_tp_)->snd_una) && \ |
| 141 | SEQ_GT((_sb_)->start, (_ack_)) && \ |
| 142 | SEQ_LT((_sb_)->start, (_tp_)->snd_max) && \ |
| 143 | SEQ_GT((_sb_)->end, (_tp_)->snd_una) && \ |
| 144 | SEQ_LEQ((_sb_)->end, (_tp_)->snd_max)) |
| 145 | |
| 146 | /* |
| 147 | * This function is called upon receipt of new valid data (while not in header |
| 148 | * prediction mode), and it updates the ordered list of sacks. |
| 149 | */ |
| 150 | void |
| 151 | tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) |
| 152 | { |
| 153 | /* |
| 154 | * First reported block MUST be the most recent one. Subsequent |
| 155 | * blocks SHOULD be in the order in which they arrived at the |
| 156 | * receiver. These two conditions make the implementation fully |
| 157 | * compliant with RFC 2018. |
| 158 | */ |
| 159 | struct sackblk head_blk, saved_blks[MAX_SACK_BLKS]; |
| 160 | int num_head, num_saved, i; |
| 161 | |
| 162 | /* SACK block for the received segment. */ |
| 163 | head_blk.start = rcv_start; |
| 164 | head_blk.end = rcv_end; |
| 165 | |
| 166 | /* |
| 167 | * Merge updated SACK blocks into head_blk, and |
| 168 | * save unchanged SACK blocks into saved_blks[]. |
| 169 | * num_saved will have the number of the saved SACK blocks. |
| 170 | */ |
| 171 | num_saved = 0; |
| 172 | for (i = 0; i < tp->rcv_numsacks; i++) { |
| 173 | tcp_seq start = tp->sackblks[i].start; |
| 174 | tcp_seq end = tp->sackblks[i].end; |
| 175 | if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) { |
| 176 | /* |
| 177 | * Discard this SACK block. |
| 178 | */ |
| 179 | } else if (SEQ_LEQ(head_blk.start, end) && |
| 180 | SEQ_GEQ(head_blk.end, start)) { |
| 181 | /* |
| 182 | * Merge this SACK block into head_blk. |
| 183 | * This SACK block itself will be discarded. |
| 184 | */ |
| 185 | if (SEQ_GT(head_blk.start, start)) |
| 186 | head_blk.start = start; |
| 187 | if (SEQ_LT(head_blk.end, end)) |
| 188 | head_blk.end = end; |
| 189 | } else { |
| 190 | /* |
| 191 | * Save this SACK block. |
| 192 | */ |
| 193 | saved_blks[num_saved].start = start; |
| 194 | saved_blks[num_saved].end = end; |
| 195 | num_saved++; |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | /* |
| 200 | * Update SACK list in tp->sackblks[]. |
| 201 | */ |
| 202 | num_head = 0; |
| 203 | if (SEQ_GT(head_blk.start, tp->rcv_nxt)) { |
| 204 | /* |
| 205 | * The received data segment is an out-of-order segment. |
| 206 | * Put head_blk at the top of SACK list. |
| 207 | */ |
| 208 | tp->sackblks[0] = head_blk; |
| 209 | num_head = 1; |
| 210 | /* |
| 211 | * If the number of saved SACK blocks exceeds its limit, |
| 212 | * discard the last SACK block. |
| 213 | */ |
| 214 | if (num_saved >= MAX_SACK_BLKS) |
| 215 | num_saved--; |
| 216 | } |
| 217 | if (num_saved > 0) { |
| 218 | /* |
| 219 | * Copy the saved SACK blocks back. |
| 220 | */ |
| 221 | bcopy(saved_blks, &tp->sackblks[num_head], |
| 222 | sizeof(struct sackblk) * num_saved); |
| 223 | } |
| 224 | |
| 225 | /* Save the number of SACK blocks. */ |
| 226 | tp->rcv_numsacks = num_head + num_saved; |
| 227 | |
| 228 | /* If we are requesting SACK recovery, reset the stretch-ack state |
| 229 | * so that connection will generate more acks after recovery and |
| 230 | * sender's cwnd will open. |
| 231 | */ |
| 232 | if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0) |
| 233 | tcp_reset_stretch_ack(tp); |
| 234 | |
| 235 | #if TRAFFIC_MGT |
| 236 | if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) |
| 237 | reset_acc_iaj(tp); |
| 238 | #endif /* TRAFFIC_MGT */ |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * Delete all receiver-side SACK information. |
| 243 | */ |
| 244 | void |
| 245 | tcp_clean_sackreport( struct tcpcb *tp) |
| 246 | { |
| 247 | |
| 248 | tp->rcv_numsacks = 0; |
| 249 | bzero(&tp->sackblks[0], sizeof (struct sackblk) * MAX_SACK_BLKS); |
| 250 | } |
| 251 | |
| 252 | /* |
| 253 | * Allocate struct sackhole. |
| 254 | */ |
| 255 | static struct sackhole * |
| 256 | tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end) |
| 257 | { |
| 258 | struct sackhole *hole; |
| 259 | |
| 260 | if (tp->snd_numholes >= tcp_sack_maxholes || |
| 261 | tcp_sack_globalholes >= tcp_sack_globalmaxholes) { |
| 262 | tcpstat.tcps_sack_sboverflow++; |
| 263 | return NULL; |
| 264 | } |
| 265 | |
| 266 | hole = (struct sackhole *)zalloc(sack_hole_zone); |
| 267 | if (hole == NULL) |
| 268 | return NULL; |
| 269 | |
| 270 | hole->start = start; |
| 271 | hole->end = end; |
| 272 | hole->rxmit = start; |
| 273 | |
| 274 | tp->snd_numholes++; |
| 275 | OSIncrementAtomic(&tcp_sack_globalholes); |
| 276 | |
| 277 | return hole; |
| 278 | } |
| 279 | |
| 280 | /* |
| 281 | * Free struct sackhole. |
| 282 | */ |
| 283 | static void |
| 284 | tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole) |
| 285 | { |
| 286 | zfree(sack_hole_zone, hole); |
| 287 | |
| 288 | tp->snd_numholes--; |
| 289 | OSDecrementAtomic(&tcp_sack_globalholes); |
| 290 | } |
| 291 | |
| 292 | /* |
| 293 | * Insert new SACK hole into scoreboard. |
| 294 | */ |
| 295 | static struct sackhole * |
| 296 | tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end, |
| 297 | struct sackhole *after) |
| 298 | { |
| 299 | struct sackhole *hole; |
| 300 | |
| 301 | /* Allocate a new SACK hole. */ |
| 302 | hole = tcp_sackhole_alloc(tp, start, end); |
| 303 | if (hole == NULL) |
| 304 | return NULL; |
| 305 | hole->rxmit_start = tcp_now; |
| 306 | /* Insert the new SACK hole into scoreboard */ |
| 307 | if (after != NULL) |
| 308 | TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink); |
| 309 | else |
| 310 | TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink); |
| 311 | |
| 312 | /* Update SACK hint. */ |
| 313 | if (tp->sackhint.nexthole == NULL) |
| 314 | tp->sackhint.nexthole = hole; |
| 315 | |
| 316 | return(hole); |
| 317 | } |
| 318 | |
| 319 | /* |
| 320 | * Remove SACK hole from scoreboard. |
| 321 | */ |
| 322 | static void |
| 323 | tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole) |
| 324 | { |
| 325 | /* Update SACK hint. */ |
| 326 | if (tp->sackhint.nexthole == hole) |
| 327 | tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink); |
| 328 | |
| 329 | /* Remove this SACK hole. */ |
| 330 | TAILQ_REMOVE(&tp->snd_holes, hole, scblink); |
| 331 | |
| 332 | /* Free this SACK hole. */ |
| 333 | tcp_sackhole_free(tp, hole); |
| 334 | } |
| 335 | /* |
| 336 | * When a new ack with SACK is received, check if it indicates packet |
| 337 | * reordering. If there is packet reordering, the socket is marked and |
| 338 | * the late time offset by which the packet was reordered with |
| 339 | * respect to its closest neighboring packets is computed. |
| 340 | */ |
| 341 | static void |
| 342 | tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, |
| 343 | tcp_seq sacked_seq, tcp_seq snd_fack) |
| 344 | { |
| 345 | int32_t rext = 0, reordered = 0; |
| 346 | |
| 347 | /* |
| 348 | * If the SACK hole is past snd_fack, this is from new SACK |
| 349 | * information, so we can ignore it. |
| 350 | */ |
| 351 | if (SEQ_GT(s->end, snd_fack)) |
| 352 | return; |
| 353 | /* |
| 354 | * If there has been a retransmit timeout, then the timestamp on |
| 355 | * the SACK segment will be newer. This might lead to a |
| 356 | * false-positive. Avoid re-ordering detection in this case. |
| 357 | */ |
| 358 | if (tp->t_rxtshift > 0) |
| 359 | return; |
| 360 | |
| 361 | /* |
| 362 | * Detect reordering from SACK information by checking |
| 363 | * if recently sacked data was never retransmitted from this hole. |
| 364 | */ |
| 365 | if (SEQ_LT(s->rxmit, sacked_seq)) { |
| 366 | reordered = 1; |
| 367 | tcpstat.tcps_avoid_rxmt++; |
| 368 | } |
| 369 | |
| 370 | if (reordered) { |
| 371 | if (tcp_detect_reordering == 1 && |
| 372 | !(tp->t_flagsext & TF_PKTS_REORDERED)) { |
| 373 | tp->t_flagsext |= TF_PKTS_REORDERED; |
| 374 | tcpstat.tcps_detect_reordering++; |
| 375 | } |
| 376 | |
| 377 | tcpstat.tcps_reordered_pkts++; |
| 378 | tp->t_reordered_pkts++; |
| 379 | |
| 380 | /* |
| 381 | * If reordering is seen on a connection wth ECN enabled, |
| 382 | * increment the heuristic |
| 383 | */ |
| 384 | if (TCP_ECN_ENABLED(tp)) { |
| 385 | INP_INC_IFNET_STAT(tp->t_inpcb, ecn_fallback_reorder); |
| 386 | tcpstat.tcps_ecn_fallback_reorder++; |
| 387 | tcp_heuristic_ecn_aggressive(tp); |
| 388 | } |
| 389 | |
| 390 | VERIFY(SEQ_GEQ(snd_fack, s->rxmit)); |
| 391 | |
| 392 | if (s->rxmit_start > 0) { |
| 393 | rext = timer_diff(tcp_now, 0, s->rxmit_start, 0); |
| 394 | if (rext < 0) |
| 395 | return; |
| 396 | |
| 397 | /* |
| 398 | * We take the maximum reorder window to schedule |
| 399 | * DELAYFR timer as that will take care of jitter |
| 400 | * on the network path. |
| 401 | * |
| 402 | * Computing average and standard deviation seems |
| 403 | * to cause unnecessary retransmissions when there |
| 404 | * is high jitter. |
| 405 | * |
| 406 | * We set a maximum of SRTT/2 and a minimum of |
| 407 | * 10 ms on the reorder window. |
| 408 | */ |
| 409 | tp->t_reorderwin = max(tp->t_reorderwin, rext); |
| 410 | tp->t_reorderwin = min(tp->t_reorderwin, |
| 411 | (tp->t_srtt >> (TCP_RTT_SHIFT - 1))); |
| 412 | tp->t_reorderwin = max(tp->t_reorderwin, 10); |
| 413 | } |
| 414 | } |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Process cumulative ACK and the TCP SACK option to update the scoreboard. |
| 419 | * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of |
| 420 | * the sequence space). |
| 421 | */ |
| 422 | void |
| 423 | tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, |
| 424 | u_int32_t *newbytes_acked) |
| 425 | { |
| 426 | struct sackhole *cur, *temp; |
| 427 | struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp; |
| 428 | int i, j, num_sack_blks; |
| 429 | tcp_seq old_snd_fack = 0, th_ack = th->th_ack; |
| 430 | |
| 431 | num_sack_blks = 0; |
| 432 | /* |
| 433 | * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist, |
| 434 | * treat [SND.UNA, SEG.ACK) as if it is a SACK block. |
| 435 | */ |
| 436 | if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) { |
| 437 | sack_blocks[num_sack_blks].start = tp->snd_una; |
| 438 | sack_blocks[num_sack_blks++].end = th_ack; |
| 439 | } |
| 440 | /* |
| 441 | * Append received valid SACK blocks to sack_blocks[]. |
| 442 | * Check that the SACK block range is valid. |
| 443 | */ |
| 444 | for (i = 0; i < to->to_nsacks; i++) { |
| 445 | bcopy((to->to_sacks + i * TCPOLEN_SACK), |
| 446 | &sack, sizeof(sack)); |
| 447 | sack.start = ntohl(sack.start); |
| 448 | sack.end = ntohl(sack.end); |
| 449 | if (TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &sack, th_ack)) |
| 450 | sack_blocks[num_sack_blks++] = sack; |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * Return if SND.UNA is not advanced and no valid SACK block |
| 455 | * is received. |
| 456 | */ |
| 457 | if (num_sack_blks == 0) |
| 458 | return; |
| 459 | |
| 460 | VERIFY(num_sack_blks <= (TCP_MAX_SACK + 1)); |
| 461 | /* |
| 462 | * Sort the SACK blocks so we can update the scoreboard |
| 463 | * with just one pass. The overhead of sorting upto 4+1 elements |
| 464 | * is less than making upto 4+1 passes over the scoreboard. |
| 465 | */ |
| 466 | for (i = 0; i < num_sack_blks; i++) { |
| 467 | for (j = i + 1; j < num_sack_blks; j++) { |
| 468 | if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { |
| 469 | sack = sack_blocks[i]; |
| 470 | sack_blocks[i] = sack_blocks[j]; |
| 471 | sack_blocks[j] = sack; |
| 472 | } |
| 473 | } |
| 474 | } |
| 475 | if (TAILQ_EMPTY(&tp->snd_holes)) { |
| 476 | /* |
| 477 | * Empty scoreboard. Need to initialize snd_fack (it may be |
| 478 | * uninitialized or have a bogus value). Scoreboard holes |
| 479 | * (from the sack blocks received) are created later below (in |
| 480 | * the logic that adds holes to the tail of the scoreboard). |
| 481 | */ |
| 482 | tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack); |
| 483 | *newbytes_acked += (tp->snd_fack - tp->snd_una); |
| 484 | } |
| 485 | |
| 486 | old_snd_fack = tp->snd_fack; |
| 487 | /* |
| 488 | * In the while-loop below, incoming SACK blocks (sack_blocks[]) |
| 489 | * and SACK holes (snd_holes) are traversed from their tails with |
| 490 | * just one pass in order to reduce the number of compares especially |
| 491 | * when the bandwidth-delay product is large. |
| 492 | * Note: Typically, in the first RTT of SACK recovery, the highest |
| 493 | * three or four SACK blocks with the same ack number are received. |
| 494 | * In the second RTT, if retransmitted data segments are not lost, |
| 495 | * the highest three or four SACK blocks with ack number advancing |
| 496 | * are received. |
| 497 | */ |
| 498 | sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */ |
| 499 | if (SEQ_LT(tp->snd_fack, sblkp->start)) { |
| 500 | /* |
| 501 | * The highest SACK block is beyond fack. |
| 502 | * Append new SACK hole at the tail. |
| 503 | * If the second or later highest SACK blocks are also |
| 504 | * beyond the current fack, they will be inserted by |
| 505 | * way of hole splitting in the while-loop below. |
| 506 | */ |
| 507 | temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL); |
| 508 | if (temp != NULL) { |
| 509 | tp->snd_fack = sblkp->end; |
| 510 | *newbytes_acked += (sblkp->end - sblkp->start); |
| 511 | |
| 512 | /* Go to the previous sack block. */ |
| 513 | sblkp--; |
| 514 | } else { |
| 515 | /* |
| 516 | * We failed to add a new hole based on the current |
| 517 | * sack block. Skip over all the sack blocks that |
| 518 | * fall completely to the right of snd_fack and proceed |
| 519 | * to trim the scoreboard based on the remaining sack |
| 520 | * blocks. This also trims the scoreboard for th_ack |
| 521 | * (which is sack_blocks[0]). |
| 522 | */ |
| 523 | while (sblkp >= sack_blocks && |
| 524 | SEQ_LT(tp->snd_fack, sblkp->start)) |
| 525 | sblkp--; |
| 526 | if (sblkp >= sack_blocks && |
| 527 | SEQ_LT(tp->snd_fack, sblkp->end)) { |
| 528 | *newbytes_acked += (sblkp->end - tp->snd_fack); |
| 529 | tp->snd_fack = sblkp->end; |
| 530 | } |
| 531 | } |
| 532 | } else if (SEQ_LT(tp->snd_fack, sblkp->end)) { |
| 533 | /* fack is advanced. */ |
| 534 | *newbytes_acked += (sblkp->end - tp->snd_fack); |
| 535 | tp->snd_fack = sblkp->end; |
| 536 | } |
| 537 | /* We must have at least one SACK hole in scoreboard */ |
| 538 | cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole */ |
| 539 | /* |
| 540 | * Since the incoming sack blocks are sorted, we can process them |
| 541 | * making one sweep of the scoreboard. |
| 542 | */ |
| 543 | while (sblkp >= sack_blocks && cur != NULL) { |
| 544 | if (SEQ_GEQ(sblkp->start, cur->end)) { |
| 545 | /* |
| 546 | * SACKs data beyond the current hole. |
| 547 | * Go to the previous sack block. |
| 548 | */ |
| 549 | sblkp--; |
| 550 | continue; |
| 551 | } |
| 552 | if (SEQ_LEQ(sblkp->end, cur->start)) { |
| 553 | /* |
| 554 | * SACKs data before the current hole. |
| 555 | * Go to the previous hole. |
| 556 | */ |
| 557 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
| 558 | continue; |
| 559 | } |
| 560 | tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start); |
| 561 | if (SEQ_LEQ(sblkp->start, cur->start)) { |
| 562 | /* Data acks at least the beginning of hole */ |
| 563 | if (SEQ_GEQ(sblkp->end, cur->end)) { |
| 564 | /* Acks entire hole, so delete hole */ |
| 565 | *newbytes_acked += (cur->end - cur->start); |
| 566 | |
| 567 | tcp_sack_detect_reordering(tp, cur, |
| 568 | cur->end, old_snd_fack); |
| 569 | temp = cur; |
| 570 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
| 571 | tcp_sackhole_remove(tp, temp); |
| 572 | /* |
| 573 | * The sack block may ack all or part of the next |
| 574 | * hole too, so continue onto the next hole. |
| 575 | */ |
| 576 | continue; |
| 577 | } else { |
| 578 | /* Move start of hole forward */ |
| 579 | *newbytes_acked += (sblkp->end - cur->start); |
| 580 | tcp_sack_detect_reordering(tp, cur, |
| 581 | sblkp->end, old_snd_fack); |
| 582 | cur->start = sblkp->end; |
| 583 | cur->rxmit = SEQ_MAX(cur->rxmit, cur->start); |
| 584 | } |
| 585 | } else { |
| 586 | /* Data acks at least the end of hole */ |
| 587 | if (SEQ_GEQ(sblkp->end, cur->end)) { |
| 588 | /* Move end of hole backward */ |
| 589 | *newbytes_acked += (cur->end - sblkp->start); |
| 590 | tcp_sack_detect_reordering(tp, cur, |
| 591 | cur->end, old_snd_fack); |
| 592 | cur->end = sblkp->start; |
| 593 | cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); |
| 594 | } else { |
| 595 | /* |
| 596 | * ACKs some data in the middle of a hole; |
| 597 | * need to split current hole |
| 598 | */ |
| 599 | *newbytes_acked += (sblkp->end - sblkp->start); |
| 600 | tcp_sack_detect_reordering(tp, cur, |
| 601 | sblkp->end, old_snd_fack); |
| 602 | temp = tcp_sackhole_insert(tp, sblkp->end, |
| 603 | cur->end, cur); |
| 604 | if (temp != NULL) { |
| 605 | if (SEQ_GT(cur->rxmit, temp->rxmit)) { |
| 606 | temp->rxmit = cur->rxmit; |
| 607 | tp->sackhint.sack_bytes_rexmit |
| 608 | += (temp->rxmit |
| 609 | - temp->start); |
| 610 | } |
| 611 | cur->end = sblkp->start; |
| 612 | cur->rxmit = SEQ_MIN(cur->rxmit, |
| 613 | cur->end); |
| 614 | /* |
| 615 | * Reset the rxmit_start to that of |
| 616 | * the current hole as that will |
| 617 | * help to compute the reorder |
| 618 | * window correctly |
| 619 | */ |
| 620 | temp->rxmit_start = cur->rxmit_start; |
| 621 | } |
| 622 | } |
| 623 | } |
| 624 | tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start); |
| 625 | /* |
| 626 | * Testing sblkp->start against cur->start tells us whether |
| 627 | * we're done with the sack block or the sack hole. |
| 628 | * Accordingly, we advance one or the other. |
| 629 | */ |
| 630 | if (SEQ_LEQ(sblkp->start, cur->start)) |
| 631 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
| 632 | else |
| 633 | sblkp--; |
| 634 | } |
| 635 | } |
| 636 | |
| 637 | /* |
| 638 | * Free all SACK holes to clear the scoreboard. |
| 639 | */ |
| 640 | void |
| 641 | tcp_free_sackholes(struct tcpcb *tp) |
| 642 | { |
| 643 | struct sackhole *q; |
| 644 | |
| 645 | while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) |
| 646 | tcp_sackhole_remove(tp, q); |
| 647 | tp->sackhint.sack_bytes_rexmit = 0; |
| 648 | tp->sackhint.nexthole = NULL; |
| 649 | tp->sack_newdata = 0; |
| 650 | |
| 651 | } |
| 652 | |
| 653 | /* |
| 654 | * Partial ack handling within a sack recovery episode. |
| 655 | * Keeping this very simple for now. When a partial ack |
| 656 | * is received, force snd_cwnd to a value that will allow |
| 657 | * the sender to transmit no more than 2 segments. |
| 658 | * If necessary, a better scheme can be adopted at a |
| 659 | * later point, but for now, the goal is to prevent the |
| 660 | * sender from bursting a large amount of data in the midst |
| 661 | * of sack recovery. |
| 662 | */ |
| 663 | void |
| 664 | tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) |
| 665 | { |
| 666 | int num_segs = 1; |
| 667 | |
| 668 | tp->t_timer[TCPT_REXMT] = 0; |
| 669 | tp->t_rtttime = 0; |
| 670 | /* send one or 2 segments based on how much new data was acked */ |
| 671 | if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) |
| 672 | num_segs = 2; |
| 673 | tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit + |
| 674 | (tp->snd_nxt - tp->sack_newdata) + |
| 675 | num_segs * tp->t_maxseg); |
| 676 | if (tp->snd_cwnd > tp->snd_ssthresh) |
| 677 | tp->snd_cwnd = tp->snd_ssthresh; |
| 678 | if (SEQ_LT(tp->snd_fack, tp->snd_recover) && |
| 679 | tp->snd_fack == th->th_ack && TAILQ_EMPTY(&tp->snd_holes)) { |
| 680 | struct sackhole *temp; |
| 681 | /* |
| 682 | * we received a partial ack but there is no sack_hole |
| 683 | * that will cover the remaining seq space. In this case, |
| 684 | * create a hole from snd_fack to snd_recover so that |
| 685 | * the sack recovery will continue. |
| 686 | */ |
| 687 | temp = tcp_sackhole_insert(tp, tp->snd_fack, |
| 688 | tp->snd_recover, NULL); |
| 689 | if (temp != NULL) |
| 690 | tp->snd_fack = tp->snd_recover; |
| 691 | } |
| 692 | (void) tcp_output(tp); |
| 693 | } |
| 694 | |
| 695 | /* |
| 696 | * Debug version of tcp_sack_output() that walks the scoreboard. Used for |
| 697 | * now to sanity check the hint. |
| 698 | */ |
| 699 | static struct sackhole * |
| 700 | tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt) |
| 701 | { |
| 702 | struct sackhole *p; |
| 703 | |
| 704 | *sack_bytes_rexmt = 0; |
| 705 | TAILQ_FOREACH(p, &tp->snd_holes, scblink) { |
| 706 | if (SEQ_LT(p->rxmit, p->end)) { |
| 707 | if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */ |
| 708 | continue; |
| 709 | } |
| 710 | *sack_bytes_rexmt += (p->rxmit - p->start); |
| 711 | break; |
| 712 | } |
| 713 | *sack_bytes_rexmt += (p->rxmit - p->start); |
| 714 | } |
| 715 | return (p); |
| 716 | } |
| 717 | |
| 718 | /* |
| 719 | * Returns the next hole to retransmit and the number of retransmitted bytes |
| 720 | * from the scoreboard. We store both the next hole and the number of |
| 721 | * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK |
| 722 | * reception). This avoids scoreboard traversals completely. |
| 723 | * |
| 724 | * The loop here will traverse *at most* one link. Here's the argument. |
| 725 | * For the loop to traverse more than 1 link before finding the next hole to |
| 726 | * retransmit, we would need to have at least 1 node following the current hint |
| 727 | * with (rxmit == end). But, for all holes following the current hint, |
| 728 | * (start == rxmit), since we have not yet retransmitted from them. Therefore, |
| 729 | * in order to traverse more 1 link in the loop below, we need to have at least |
| 730 | * one node following the current hint with (start == rxmit == end). |
| 731 | * But that can't happen, (start == end) means that all the data in that hole |
| 732 | * has been sacked, in which case, the hole would have been removed from the |
| 733 | * scoreboard. |
| 734 | */ |
| 735 | struct sackhole * |
| 736 | tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt) |
| 737 | { |
| 738 | struct sackhole *hole = NULL, *dbg_hole = NULL; |
| 739 | int dbg_bytes_rexmt; |
| 740 | |
| 741 | dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt); |
| 742 | *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit; |
| 743 | hole = tp->sackhint.nexthole; |
| 744 | if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) |
| 745 | goto out; |
| 746 | while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) { |
| 747 | if (SEQ_LT(hole->rxmit, hole->end)) { |
| 748 | tp->sackhint.nexthole = hole; |
| 749 | break; |
| 750 | } |
| 751 | } |
| 752 | out: |
| 753 | if (dbg_hole != hole) { |
| 754 | printf("%s: Computed sack hole not the same as cached value\n" , __func__); |
| 755 | hole = dbg_hole; |
| 756 | } |
| 757 | if (*sack_bytes_rexmt != dbg_bytes_rexmt) { |
| 758 | printf("%s: Computed sack_bytes_retransmitted (%d) not " |
| 759 | "the same as cached value (%d)\n" , |
| 760 | __func__, dbg_bytes_rexmt, *sack_bytes_rexmt); |
| 761 | *sack_bytes_rexmt = dbg_bytes_rexmt; |
| 762 | } |
| 763 | return (hole); |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * After a timeout, the SACK list may be rebuilt. This SACK information |
| 768 | * should be used to avoid retransmitting SACKed data. This function |
| 769 | * traverses the SACK list to see if snd_nxt should be moved forward. |
| 770 | */ |
| 771 | void |
| 772 | tcp_sack_adjust(struct tcpcb *tp) |
| 773 | { |
| 774 | struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes); |
| 775 | |
| 776 | if (cur == NULL) |
| 777 | return; /* No holes */ |
| 778 | if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) |
| 779 | return; /* We're already beyond any SACKed blocks */ |
| 780 | /* |
| 781 | * Two cases for which we want to advance snd_nxt: |
| 782 | * i) snd_nxt lies between end of one hole and beginning of another |
| 783 | * ii) snd_nxt lies between end of last hole and snd_fack |
| 784 | */ |
| 785 | while ((p = TAILQ_NEXT(cur, scblink)) != NULL) { |
| 786 | if (SEQ_LT(tp->snd_nxt, cur->end)) |
| 787 | return; |
| 788 | if (SEQ_GEQ(tp->snd_nxt, p->start)) |
| 789 | cur = p; |
| 790 | else { |
| 791 | tp->snd_nxt = p->start; |
| 792 | return; |
| 793 | } |
| 794 | } |
| 795 | if (SEQ_LT(tp->snd_nxt, cur->end)) |
| 796 | return; |
| 797 | tp->snd_nxt = tp->snd_fack; |
| 798 | return; |
| 799 | } |
| 800 | |
| 801 | /* |
| 802 | * This function returns TRUE if more than (tcprexmtthresh - 1) * SMSS |
| 803 | * bytes with sequence numbers greater than snd_una have been SACKed. |
| 804 | */ |
| 805 | boolean_t |
| 806 | tcp_sack_byte_islost(struct tcpcb *tp) |
| 807 | { |
| 808 | u_int32_t unacked_bytes, sndhole_bytes = 0; |
| 809 | struct sackhole *sndhole; |
| 810 | if (!SACK_ENABLED(tp) || IN_FASTRECOVERY(tp) || |
| 811 | TAILQ_EMPTY(&tp->snd_holes) || |
| 812 | (tp->t_flagsext & TF_PKTS_REORDERED)) |
| 813 | return (FALSE); |
| 814 | |
| 815 | unacked_bytes = tp->snd_max - tp->snd_una; |
| 816 | |
| 817 | TAILQ_FOREACH(sndhole, &tp->snd_holes, scblink) { |
| 818 | sndhole_bytes += (sndhole->end - sndhole->start); |
| 819 | } |
| 820 | |
| 821 | VERIFY(unacked_bytes >= sndhole_bytes); |
| 822 | return ((unacked_bytes - sndhole_bytes) > |
| 823 | ((tcprexmtthresh - 1) * tp->t_maxseg)); |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * Process any DSACK options that might be present on an input packet |
| 828 | */ |
| 829 | |
| 830 | boolean_t |
| 831 | tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, |
| 832 | struct tcphdr *th) |
| 833 | { |
| 834 | struct sackblk first_sack, second_sack; |
| 835 | struct tcp_rxt_seg *rxseg; |
| 836 | |
| 837 | bcopy(to->to_sacks, &first_sack, sizeof(first_sack)); |
| 838 | first_sack.start = ntohl(first_sack.start); |
| 839 | first_sack.end = ntohl(first_sack.end); |
| 840 | |
| 841 | if (to->to_nsacks > 1) { |
| 842 | bcopy((to->to_sacks + TCPOLEN_SACK), &second_sack, |
| 843 | sizeof(second_sack)); |
| 844 | second_sack.start = ntohl(second_sack.start); |
| 845 | second_sack.end = ntohl(second_sack.end); |
| 846 | } |
| 847 | |
| 848 | if (SEQ_LT(first_sack.start, th->th_ack) && |
| 849 | SEQ_LEQ(first_sack.end, th->th_ack)) { |
| 850 | /* |
| 851 | * There is a dsack option reporting a duplicate segment |
| 852 | * also covered by cumulative acknowledgement. |
| 853 | * |
| 854 | * Validate the sequence numbers before looking at dsack |
| 855 | * option. The duplicate notification can come after |
| 856 | * snd_una moves forward. In order to set a window of valid |
| 857 | * sequence numbers to look for, we set a maximum send |
| 858 | * window within which the DSACK option will be processed. |
| 859 | */ |
| 860 | if (!(TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.start, th->th_ack) && |
| 861 | TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.end, th->th_ack))) { |
| 862 | to->to_nsacks--; |
| 863 | to->to_sacks += TCPOLEN_SACK; |
| 864 | tcpstat.tcps_dsack_recvd_old++; |
| 865 | |
| 866 | /* |
| 867 | * returning true here so that the ack will not be |
| 868 | * treated as duplicate ack. |
| 869 | */ |
| 870 | return (TRUE); |
| 871 | } |
| 872 | } else if (to->to_nsacks > 1 && |
| 873 | SEQ_LEQ(second_sack.start, first_sack.start) && |
| 874 | SEQ_GEQ(second_sack.end, first_sack.end)) { |
| 875 | /* |
| 876 | * there is a dsack option in the first block not |
| 877 | * covered by the cumulative acknowledgement but covered |
| 878 | * by the second sack block. |
| 879 | * |
| 880 | * verify the sequence numbes on the second sack block |
| 881 | * before processing the DSACK option. Returning false |
| 882 | * here will treat the ack as a duplicate ack. |
| 883 | */ |
| 884 | if (!TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &second_sack, |
| 885 | th->th_ack)) { |
| 886 | to->to_nsacks--; |
| 887 | to->to_sacks += TCPOLEN_SACK; |
| 888 | tcpstat.tcps_dsack_recvd_old++; |
| 889 | return (TRUE); |
| 890 | } |
| 891 | } else { |
| 892 | /* no dsack options, proceed with processing the sack */ |
| 893 | return (FALSE); |
| 894 | } |
| 895 | |
| 896 | /* Update the tcpopt pointer to exclude dsack block */ |
| 897 | to->to_nsacks--; |
| 898 | to->to_sacks += TCPOLEN_SACK; |
| 899 | tcpstat.tcps_dsack_recvd++; |
| 900 | tp->t_dsack_recvd++; |
| 901 | |
| 902 | /* ignore DSACK option, if DSACK is disabled */ |
| 903 | if (tp->t_flagsext & TF_DISABLE_DSACK) |
| 904 | return (TRUE); |
| 905 | |
| 906 | /* If the DSACK is for TLP mark it as such */ |
| 907 | if ((tp->t_flagsext & TF_SENT_TLPROBE) && |
| 908 | first_sack.end == tp->t_tlphighrxt) { |
| 909 | if ((rxseg = tcp_rxtseg_find(tp, first_sack.start, |
| 910 | (first_sack.end - 1))) != NULL) |
| 911 | rxseg->rx_flags |= TCP_RXT_DSACK_FOR_TLP; |
| 912 | } |
| 913 | /* Update the sender's retransmit segment state */ |
| 914 | if (((tp->t_rxtshift == 1 && first_sack.start == tp->snd_una) || |
| 915 | ((tp->t_flagsext & TF_SENT_TLPROBE) && |
| 916 | first_sack.end == tp->t_tlphighrxt)) && |
| 917 | TAILQ_EMPTY(&tp->snd_holes) && |
| 918 | SEQ_GT(th->th_ack, tp->snd_una)) { |
| 919 | /* |
| 920 | * If the dsack is for a retransmitted packet and one of |
| 921 | * the two cases is true, it indicates ack loss: |
| 922 | * - retransmit timeout and first_sack.start == snd_una |
| 923 | * - TLP probe and first_sack.end == tlphighrxt |
| 924 | * |
| 925 | * Ignore dsack and do not update state when there is |
| 926 | * ack loss |
| 927 | */ |
| 928 | tcpstat.tcps_dsack_ackloss++; |
| 929 | |
| 930 | return (TRUE); |
| 931 | } else if ((rxseg = tcp_rxtseg_find(tp, first_sack.start, |
| 932 | (first_sack.end - 1))) == NULL) { |
| 933 | /* |
| 934 | * Duplicate notification was not triggered by a |
| 935 | * retransmission. This might be due to network duplication, |
| 936 | * disable further DSACK processing. |
| 937 | */ |
| 938 | if (!tcp_dsack_ignore_hw_duplicates) { |
| 939 | tp->t_flagsext |= TF_DISABLE_DSACK; |
| 940 | tcpstat.tcps_dsack_disable++; |
| 941 | } |
| 942 | } else { |
| 943 | /* |
| 944 | * If the segment was retransmitted only once, mark it as |
| 945 | * spurious. Otherwise ignore the duplicate notification. |
| 946 | */ |
| 947 | if (rxseg->rx_count == 1) |
| 948 | rxseg->rx_flags |= TCP_RXT_SPURIOUS; |
| 949 | else |
| 950 | rxseg->rx_flags &= ~TCP_RXT_SPURIOUS; |
| 951 | } |
| 952 | return (TRUE); |
| 953 | } |
| 954 | |