| 1 | /* |
| 2 | * Copyright (c) 2000-2015 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ |
| 29 | /* |
| 30 | * Copyright (c) 1989, 1993 |
| 31 | * The Regents of the University of California. All rights reserved. |
| 32 | * |
| 33 | * This code is derived from software contributed to Berkeley by |
| 34 | * Rick Macklem at The University of Guelph. |
| 35 | * |
| 36 | * Redistribution and use in source and binary forms, with or without |
| 37 | * modification, are permitted provided that the following conditions |
| 38 | * are met: |
| 39 | * 1. Redistributions of source code must retain the above copyright |
| 40 | * notice, this list of conditions and the following disclaimer. |
| 41 | * 2. Redistributions in binary form must reproduce the above copyright |
| 42 | * notice, this list of conditions and the following disclaimer in the |
| 43 | * documentation and/or other materials provided with the distribution. |
| 44 | * 3. All advertising materials mentioning features or use of this software |
| 45 | * must display the following acknowledgement: |
| 46 | * This product includes software developed by the University of |
| 47 | * California, Berkeley and its contributors. |
| 48 | * 4. Neither the name of the University nor the names of its contributors |
| 49 | * may be used to endorse or promote products derived from this software |
| 50 | * without specific prior written permission. |
| 51 | * |
| 52 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 53 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 54 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 55 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 56 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 57 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 58 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 59 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 60 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 61 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 62 | * SUCH DAMAGE. |
| 63 | * |
| 64 | * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 |
| 65 | * FreeBSD-Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $ |
| 66 | */ |
| 67 | #include <sys/param.h> |
| 68 | #include <sys/systm.h> |
| 69 | #include <sys/resourcevar.h> |
| 70 | #include <sys/signalvar.h> |
| 71 | #include <sys/proc_internal.h> |
| 72 | #include <sys/kauth.h> |
| 73 | #include <sys/malloc.h> |
| 74 | #include <sys/vnode.h> |
| 75 | #include <sys/dirent.h> |
| 76 | #include <sys/mount_internal.h> |
| 77 | #include <sys/kernel.h> |
| 78 | #include <sys/ubc_internal.h> |
| 79 | #include <sys/uio_internal.h> |
| 80 | #include <sys/kpi_mbuf.h> |
| 81 | |
| 82 | #include <sys/vm.h> |
| 83 | #include <sys/vmparam.h> |
| 84 | |
| 85 | #include <sys/time.h> |
| 86 | #include <kern/clock.h> |
| 87 | #include <libkern/OSAtomic.h> |
| 88 | #include <kern/kalloc.h> |
| 89 | #include <kern/thread_call.h> |
| 90 | |
| 91 | #include <nfs/rpcv2.h> |
| 92 | #include <nfs/nfsproto.h> |
| 93 | #include <nfs/nfs.h> |
| 94 | #include <nfs/nfs_gss.h> |
| 95 | #include <nfs/nfsmount.h> |
| 96 | #include <nfs/nfsnode.h> |
| 97 | #include <sys/buf_internal.h> |
| 98 | #include <libkern/OSAtomic.h> |
| 99 | |
| 100 | #define NFS_BIO_DBG(...) NFS_DBG(NFS_FAC_BIO, 7, ## __VA_ARGS__) |
| 101 | |
| 102 | kern_return_t thread_terminate(thread_t); /* XXX */ |
| 103 | |
| 104 | #define NFSBUFHASH(np, lbn) \ |
| 105 | (&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash]) |
| 106 | LIST_HEAD(nfsbufhashhead, nfsbuf) *nfsbufhashtbl; |
| 107 | struct nfsbuffreehead nfsbuffree, nfsbuffreemeta, nfsbufdelwri; |
| 108 | u_long nfsbufhash; |
| 109 | int nfsbufcnt, nfsbufmin, nfsbufmax, nfsbufmetacnt, nfsbufmetamax; |
| 110 | int nfsbuffreecnt, nfsbuffreemetacnt, nfsbufdelwricnt, nfsneedbuffer; |
| 111 | int nfs_nbdwrite; |
| 112 | int nfs_buf_timer_on = 0; |
| 113 | thread_t nfsbufdelwrithd = NULL; |
| 114 | |
| 115 | lck_grp_t *nfs_buf_lck_grp; |
| 116 | lck_mtx_t *nfs_buf_mutex; |
| 117 | |
| 118 | #define NFSBUF_FREE_PERIOD 30 /* seconds */ |
| 119 | #define NFSBUF_LRU_STALE 120 |
| 120 | #define NFSBUF_META_STALE 240 |
| 121 | |
| 122 | /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */ |
| 123 | #define LRU_TO_FREEUP 6 |
| 124 | /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */ |
| 125 | #define META_TO_FREEUP 3 |
| 126 | /* total number of nfsbufs nfs_buf_freeup() should attempt to free */ |
| 127 | #define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP) |
| 128 | /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from timer */ |
| 129 | #define LRU_FREEUP_FRAC_ON_TIMER 8 |
| 130 | /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from timer */ |
| 131 | #define META_FREEUP_FRAC_ON_TIMER 16 |
| 132 | /* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */ |
| 133 | #define LRU_FREEUP_MIN_FRAC 4 |
| 134 | /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */ |
| 135 | #define META_FREEUP_MIN_FRAC 2 |
| 136 | |
| 137 | #define NFS_BUF_FREEUP() \ |
| 138 | do { \ |
| 139 | /* only call nfs_buf_freeup() if it has work to do: */ \ |
| 140 | if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \ |
| 141 | (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \ |
| 142 | ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \ |
| 143 | nfs_buf_freeup(0); \ |
| 144 | } while (0) |
| 145 | |
| 146 | /* |
| 147 | * Initialize nfsbuf lists |
| 148 | */ |
| 149 | void |
| 150 | nfs_nbinit(void) |
| 151 | { |
| 152 | nfs_buf_lck_grp = lck_grp_alloc_init("nfs_buf" , LCK_GRP_ATTR_NULL); |
| 153 | nfs_buf_mutex = lck_mtx_alloc_init(nfs_buf_lck_grp, LCK_ATTR_NULL); |
| 154 | |
| 155 | nfsbufcnt = nfsbufmetacnt = |
| 156 | nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0; |
| 157 | nfsbufmin = 128; |
| 158 | /* size nfsbufmax to cover at most half sane_size (w/default buf size) */ |
| 159 | nfsbufmax = (sane_size >> PAGE_SHIFT) / (2 * (NFS_RWSIZE >> PAGE_SHIFT)); |
| 160 | nfsbufmetamax = nfsbufmax / 4; |
| 161 | nfsneedbuffer = 0; |
| 162 | nfs_nbdwrite = 0; |
| 163 | |
| 164 | nfsbufhashtbl = hashinit(nfsbufmax/4, M_TEMP, &nfsbufhash); |
| 165 | TAILQ_INIT(&nfsbuffree); |
| 166 | TAILQ_INIT(&nfsbuffreemeta); |
| 167 | TAILQ_INIT(&nfsbufdelwri); |
| 168 | |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * Check periodically for stale/unused nfs bufs |
| 173 | */ |
| 174 | void |
| 175 | nfs_buf_timer(__unused void *param0, __unused void *param1) |
| 176 | { |
| 177 | nfs_buf_freeup(1); |
| 178 | |
| 179 | lck_mtx_lock(nfs_buf_mutex); |
| 180 | if (nfsbufcnt <= nfsbufmin) { |
| 181 | nfs_buf_timer_on = 0; |
| 182 | lck_mtx_unlock(nfs_buf_mutex); |
| 183 | return; |
| 184 | } |
| 185 | lck_mtx_unlock(nfs_buf_mutex); |
| 186 | |
| 187 | nfs_interval_timer_start(nfs_buf_timer_call, |
| 188 | NFSBUF_FREE_PERIOD * 1000); |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * try to free up some excess, unused nfsbufs |
| 193 | */ |
| 194 | void |
| 195 | nfs_buf_freeup(int timer) |
| 196 | { |
| 197 | struct nfsbuf *fbp; |
| 198 | struct timeval now; |
| 199 | int count; |
| 200 | struct nfsbuffreehead nfsbuffreeup; |
| 201 | |
| 202 | TAILQ_INIT(&nfsbuffreeup); |
| 203 | |
| 204 | lck_mtx_lock(nfs_buf_mutex); |
| 205 | |
| 206 | microuptime(&now); |
| 207 | |
| 208 | FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0); |
| 209 | |
| 210 | count = timer ? nfsbuffreecnt/LRU_FREEUP_FRAC_ON_TIMER : LRU_TO_FREEUP; |
| 211 | while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) { |
| 212 | fbp = TAILQ_FIRST(&nfsbuffree); |
| 213 | if (!fbp) |
| 214 | break; |
| 215 | if (fbp->nb_refs) |
| 216 | break; |
| 217 | if (NBUFSTAMPVALID(fbp) && |
| 218 | (fbp->nb_timestamp + (2*NFSBUF_LRU_STALE)) > now.tv_sec) |
| 219 | break; |
| 220 | nfs_buf_remfree(fbp); |
| 221 | /* disassociate buffer from any nfsnode */ |
| 222 | if (fbp->nb_np) { |
| 223 | if (fbp->nb_vnbufs.le_next != NFSNOLIST) { |
| 224 | LIST_REMOVE(fbp, nb_vnbufs); |
| 225 | fbp->nb_vnbufs.le_next = NFSNOLIST; |
| 226 | } |
| 227 | fbp->nb_np = NULL; |
| 228 | } |
| 229 | LIST_REMOVE(fbp, nb_hash); |
| 230 | TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free); |
| 231 | nfsbufcnt--; |
| 232 | } |
| 233 | |
| 234 | count = timer ? nfsbuffreemetacnt/META_FREEUP_FRAC_ON_TIMER : META_TO_FREEUP; |
| 235 | while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) { |
| 236 | fbp = TAILQ_FIRST(&nfsbuffreemeta); |
| 237 | if (!fbp) |
| 238 | break; |
| 239 | if (fbp->nb_refs) |
| 240 | break; |
| 241 | if (NBUFSTAMPVALID(fbp) && |
| 242 | (fbp->nb_timestamp + (2*NFSBUF_META_STALE)) > now.tv_sec) |
| 243 | break; |
| 244 | nfs_buf_remfree(fbp); |
| 245 | /* disassociate buffer from any nfsnode */ |
| 246 | if (fbp->nb_np) { |
| 247 | if (fbp->nb_vnbufs.le_next != NFSNOLIST) { |
| 248 | LIST_REMOVE(fbp, nb_vnbufs); |
| 249 | fbp->nb_vnbufs.le_next = NFSNOLIST; |
| 250 | } |
| 251 | fbp->nb_np = NULL; |
| 252 | } |
| 253 | LIST_REMOVE(fbp, nb_hash); |
| 254 | TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free); |
| 255 | nfsbufcnt--; |
| 256 | nfsbufmetacnt--; |
| 257 | } |
| 258 | |
| 259 | FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0); |
| 260 | NFSBUFCNTCHK(); |
| 261 | |
| 262 | lck_mtx_unlock(nfs_buf_mutex); |
| 263 | |
| 264 | while ((fbp = TAILQ_FIRST(&nfsbuffreeup))) { |
| 265 | TAILQ_REMOVE(&nfsbuffreeup, fbp, nb_free); |
| 266 | /* nuke any creds */ |
| 267 | if (IS_VALID_CRED(fbp->nb_rcred)) |
| 268 | kauth_cred_unref(&fbp->nb_rcred); |
| 269 | if (IS_VALID_CRED(fbp->nb_wcred)) |
| 270 | kauth_cred_unref(&fbp->nb_wcred); |
| 271 | /* if buf was NB_META, dump buffer */ |
| 272 | if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) |
| 273 | kfree(fbp->nb_data, fbp->nb_bufsize); |
| 274 | FREE(fbp, M_TEMP); |
| 275 | } |
| 276 | |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * remove a buffer from the freelist |
| 281 | * (must be called with nfs_buf_mutex held) |
| 282 | */ |
| 283 | void |
| 284 | nfs_buf_remfree(struct nfsbuf *bp) |
| 285 | { |
| 286 | if (bp->nb_free.tqe_next == NFSNOLIST) |
| 287 | panic("nfsbuf not on free list" ); |
| 288 | if (ISSET(bp->nb_flags, NB_DELWRI)) { |
| 289 | nfsbufdelwricnt--; |
| 290 | TAILQ_REMOVE(&nfsbufdelwri, bp, nb_free); |
| 291 | } else if (ISSET(bp->nb_flags, NB_META)) { |
| 292 | nfsbuffreemetacnt--; |
| 293 | TAILQ_REMOVE(&nfsbuffreemeta, bp, nb_free); |
| 294 | } else { |
| 295 | nfsbuffreecnt--; |
| 296 | TAILQ_REMOVE(&nfsbuffree, bp, nb_free); |
| 297 | } |
| 298 | bp->nb_free.tqe_next = NFSNOLIST; |
| 299 | NFSBUFCNTCHK(); |
| 300 | } |
| 301 | |
| 302 | /* |
| 303 | * check for existence of nfsbuf in cache |
| 304 | */ |
| 305 | boolean_t |
| 306 | nfs_buf_is_incore(nfsnode_t np, daddr64_t blkno) |
| 307 | { |
| 308 | boolean_t rv; |
| 309 | lck_mtx_lock(nfs_buf_mutex); |
| 310 | if (nfs_buf_incore(np, blkno)) |
| 311 | rv = TRUE; |
| 312 | else |
| 313 | rv = FALSE; |
| 314 | lck_mtx_unlock(nfs_buf_mutex); |
| 315 | return (rv); |
| 316 | } |
| 317 | |
| 318 | /* |
| 319 | * return incore buffer (must be called with nfs_buf_mutex held) |
| 320 | */ |
| 321 | struct nfsbuf * |
| 322 | nfs_buf_incore(nfsnode_t np, daddr64_t blkno) |
| 323 | { |
| 324 | /* Search hash chain */ |
| 325 | struct nfsbuf * bp = NFSBUFHASH(np, blkno)->lh_first; |
| 326 | for (; bp != NULL; bp = bp->nb_hash.le_next) |
| 327 | if ((bp->nb_lblkno == blkno) && (bp->nb_np == np)) { |
| 328 | if (!ISSET(bp->nb_flags, NB_INVAL)) { |
| 329 | FSDBG(547, bp, blkno, bp->nb_flags, bp->nb_np); |
| 330 | return (bp); |
| 331 | } |
| 332 | } |
| 333 | return (NULL); |
| 334 | } |
| 335 | |
| 336 | /* |
| 337 | * Check if it's OK to drop a page. |
| 338 | * |
| 339 | * Called by vnode_pager() on pageout request of non-dirty page. |
| 340 | * We need to make sure that it's not part of a delayed write. |
| 341 | * If it is, we can't let the VM drop it because we may need it |
| 342 | * later when/if we need to write the data (again). |
| 343 | */ |
| 344 | int |
| 345 | nfs_buf_page_inval(vnode_t vp, off_t offset) |
| 346 | { |
| 347 | struct nfsmount *nmp = VTONMP(vp); |
| 348 | struct nfsbuf *bp; |
| 349 | int error = 0; |
| 350 | |
| 351 | if (nfs_mount_gone(nmp)) |
| 352 | return (ENXIO); |
| 353 | |
| 354 | lck_mtx_lock(nfs_buf_mutex); |
| 355 | bp = nfs_buf_incore(VTONFS(vp), (daddr64_t)(offset / nmp->nm_biosize)); |
| 356 | if (!bp) |
| 357 | goto out; |
| 358 | FSDBG(325, bp, bp->nb_flags, bp->nb_dirtyoff, bp->nb_dirtyend); |
| 359 | if (ISSET(bp->nb_lflags, NBL_BUSY)) { |
| 360 | error = EBUSY; |
| 361 | goto out; |
| 362 | } |
| 363 | /* |
| 364 | * If there's a dirty range in the buffer, check to |
| 365 | * see if this page intersects with the dirty range. |
| 366 | * If it does, we can't let the pager drop the page. |
| 367 | */ |
| 368 | if (bp->nb_dirtyend > 0) { |
| 369 | int start = offset - NBOFF(bp); |
| 370 | if ((bp->nb_dirtyend > start) && |
| 371 | (bp->nb_dirtyoff < (start + PAGE_SIZE))) { |
| 372 | /* |
| 373 | * Before returning the bad news, move the |
| 374 | * buffer to the start of the delwri list and |
| 375 | * give the list a push to try to flush the |
| 376 | * buffer out. |
| 377 | */ |
| 378 | error = EBUSY; |
| 379 | nfs_buf_remfree(bp); |
| 380 | TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free); |
| 381 | nfsbufdelwricnt++; |
| 382 | nfs_buf_delwri_push(1); |
| 383 | } |
| 384 | } |
| 385 | out: |
| 386 | lck_mtx_unlock(nfs_buf_mutex); |
| 387 | return (error); |
| 388 | } |
| 389 | |
| 390 | /* |
| 391 | * set up the UPL for a buffer |
| 392 | * (must NOT be called with nfs_buf_mutex held) |
| 393 | */ |
| 394 | int |
| 395 | nfs_buf_upl_setup(struct nfsbuf *bp) |
| 396 | { |
| 397 | kern_return_t kret; |
| 398 | upl_t upl; |
| 399 | int upl_flags; |
| 400 | |
| 401 | if (ISSET(bp->nb_flags, NB_PAGELIST)) |
| 402 | return (0); |
| 403 | |
| 404 | upl_flags = UPL_PRECIOUS; |
| 405 | if (!ISSET(bp->nb_flags, NB_READ)) { |
| 406 | /* |
| 407 | * We're doing a "write", so we intend to modify |
| 408 | * the pages we're gathering. |
| 409 | */ |
| 410 | upl_flags |= UPL_WILL_MODIFY; |
| 411 | } |
| 412 | kret = ubc_create_upl_kernel(NFSTOV(bp->nb_np), NBOFF(bp), bp->nb_bufsize, |
| 413 | &upl, NULL, upl_flags, VM_KERN_MEMORY_FILE); |
| 414 | if (kret == KERN_INVALID_ARGUMENT) { |
| 415 | /* vm object probably doesn't exist any more */ |
| 416 | bp->nb_pagelist = NULL; |
| 417 | return (EINVAL); |
| 418 | } |
| 419 | if (kret != KERN_SUCCESS) { |
| 420 | printf("nfs_buf_upl_setup(): failed to get pagelist %d\n" , kret); |
| 421 | bp->nb_pagelist = NULL; |
| 422 | return (EIO); |
| 423 | } |
| 424 | |
| 425 | FSDBG(538, bp, NBOFF(bp), bp->nb_bufsize, bp->nb_np); |
| 426 | |
| 427 | bp->nb_pagelist = upl; |
| 428 | SET(bp->nb_flags, NB_PAGELIST); |
| 429 | return (0); |
| 430 | } |
| 431 | |
| 432 | /* |
| 433 | * update buffer's valid/dirty info from UBC |
| 434 | * (must NOT be called with nfs_buf_mutex held) |
| 435 | */ |
| 436 | void |
| 437 | nfs_buf_upl_check(struct nfsbuf *bp) |
| 438 | { |
| 439 | upl_page_info_t *pl; |
| 440 | off_t filesize, fileoffset; |
| 441 | int i, npages; |
| 442 | |
| 443 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) |
| 444 | return; |
| 445 | |
| 446 | npages = round_page_32(bp->nb_bufsize) / PAGE_SIZE; |
| 447 | filesize = ubc_getsize(NFSTOV(bp->nb_np)); |
| 448 | fileoffset = NBOFF(bp); |
| 449 | if (fileoffset < filesize) |
| 450 | SET(bp->nb_flags, NB_CACHE); |
| 451 | else |
| 452 | CLR(bp->nb_flags, NB_CACHE); |
| 453 | |
| 454 | pl = ubc_upl_pageinfo(bp->nb_pagelist); |
| 455 | bp->nb_valid = bp->nb_dirty = 0; |
| 456 | |
| 457 | for (i=0; i < npages; i++, fileoffset += PAGE_SIZE_64) { |
| 458 | /* anything beyond the end of the file is not valid or dirty */ |
| 459 | if (fileoffset >= filesize) |
| 460 | break; |
| 461 | if (!upl_valid_page(pl, i)) { |
| 462 | CLR(bp->nb_flags, NB_CACHE); |
| 463 | continue; |
| 464 | } |
| 465 | NBPGVALID_SET(bp,i); |
| 466 | if (upl_dirty_page(pl, i)) |
| 467 | NBPGDIRTY_SET(bp, i); |
| 468 | } |
| 469 | fileoffset = NBOFF(bp); |
| 470 | if (ISSET(bp->nb_flags, NB_CACHE)) { |
| 471 | bp->nb_validoff = 0; |
| 472 | bp->nb_validend = bp->nb_bufsize; |
| 473 | if (fileoffset + bp->nb_validend > filesize) |
| 474 | bp->nb_validend = filesize - fileoffset; |
| 475 | } else { |
| 476 | bp->nb_validoff = bp->nb_validend = -1; |
| 477 | } |
| 478 | FSDBG(539, bp, fileoffset, bp->nb_valid, bp->nb_dirty); |
| 479 | FSDBG(539, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend); |
| 480 | } |
| 481 | |
| 482 | /* |
| 483 | * make sure that a buffer is mapped |
| 484 | * (must NOT be called with nfs_buf_mutex held) |
| 485 | */ |
| 486 | int |
| 487 | nfs_buf_map(struct nfsbuf *bp) |
| 488 | { |
| 489 | kern_return_t kret; |
| 490 | |
| 491 | if (bp->nb_data) |
| 492 | return (0); |
| 493 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) |
| 494 | return (EINVAL); |
| 495 | |
| 496 | kret = ubc_upl_map(bp->nb_pagelist, (vm_offset_t *)&(bp->nb_data)); |
| 497 | if (kret != KERN_SUCCESS) |
| 498 | panic("nfs_buf_map: ubc_upl_map() failed with (%d)" , kret); |
| 499 | if (bp->nb_data == 0) |
| 500 | panic("ubc_upl_map mapped 0" ); |
| 501 | FSDBG(540, bp, bp->nb_flags, NBOFF(bp), bp->nb_data); |
| 502 | return (0); |
| 503 | } |
| 504 | |
| 505 | /* |
| 506 | * normalize an nfsbuf's valid range |
| 507 | * |
| 508 | * the read/write code guarantees that we'll always have a valid |
| 509 | * region that is an integral number of pages. If either end |
| 510 | * of the valid range isn't page-aligned, it gets corrected |
| 511 | * here as we extend the valid range through all of the |
| 512 | * contiguous valid pages. |
| 513 | */ |
| 514 | void |
| 515 | nfs_buf_normalize_valid_range(nfsnode_t np, struct nfsbuf *bp) |
| 516 | { |
| 517 | int pg, npg; |
| 518 | /* pull validoff back to start of contiguous valid page range */ |
| 519 | pg = bp->nb_validoff/PAGE_SIZE; |
| 520 | while (pg >= 0 && NBPGVALID(bp,pg)) |
| 521 | pg--; |
| 522 | bp->nb_validoff = (pg+1) * PAGE_SIZE; |
| 523 | /* push validend forward to end of contiguous valid page range */ |
| 524 | npg = bp->nb_bufsize/PAGE_SIZE; |
| 525 | pg = bp->nb_validend/PAGE_SIZE; |
| 526 | while (pg < npg && NBPGVALID(bp,pg)) |
| 527 | pg++; |
| 528 | bp->nb_validend = pg * PAGE_SIZE; |
| 529 | /* clip to EOF */ |
| 530 | if (NBOFF(bp) + bp->nb_validend > (off_t)np->n_size) |
| 531 | bp->nb_validend = np->n_size % bp->nb_bufsize; |
| 532 | } |
| 533 | |
| 534 | /* |
| 535 | * process some entries on the delayed write queue |
| 536 | * (must be called with nfs_buf_mutex held) |
| 537 | */ |
| 538 | void |
| 539 | nfs_buf_delwri_service(void) |
| 540 | { |
| 541 | struct nfsbuf *bp; |
| 542 | nfsnode_t np; |
| 543 | int error, i = 0; |
| 544 | |
| 545 | while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) { |
| 546 | np = bp->nb_np; |
| 547 | nfs_buf_remfree(bp); |
| 548 | nfs_buf_refget(bp); |
| 549 | while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN); |
| 550 | nfs_buf_refrele(bp); |
| 551 | if (error) |
| 552 | break; |
| 553 | if (!bp->nb_np) { |
| 554 | /* buffer is no longer valid */ |
| 555 | nfs_buf_drop(bp); |
| 556 | continue; |
| 557 | } |
| 558 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) |
| 559 | nfs_buf_check_write_verifier(np, bp); |
| 560 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
| 561 | /* put buffer at end of delwri list */ |
| 562 | TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); |
| 563 | nfsbufdelwricnt++; |
| 564 | nfs_buf_drop(bp); |
| 565 | lck_mtx_unlock(nfs_buf_mutex); |
| 566 | nfs_flushcommits(np, 1); |
| 567 | } else { |
| 568 | SET(bp->nb_flags, NB_ASYNC); |
| 569 | lck_mtx_unlock(nfs_buf_mutex); |
| 570 | nfs_buf_write(bp); |
| 571 | } |
| 572 | i++; |
| 573 | lck_mtx_lock(nfs_buf_mutex); |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | /* |
| 578 | * thread to service the delayed write queue when asked |
| 579 | */ |
| 580 | void |
| 581 | nfs_buf_delwri_thread(__unused void *arg, __unused wait_result_t wr) |
| 582 | { |
| 583 | struct timespec ts = { 30, 0 }; |
| 584 | int error = 0; |
| 585 | |
| 586 | lck_mtx_lock(nfs_buf_mutex); |
| 587 | while (!error) { |
| 588 | nfs_buf_delwri_service(); |
| 589 | error = msleep(&nfsbufdelwrithd, nfs_buf_mutex, 0, "nfsbufdelwri" , &ts); |
| 590 | } |
| 591 | nfsbufdelwrithd = NULL; |
| 592 | lck_mtx_unlock(nfs_buf_mutex); |
| 593 | thread_terminate(nfsbufdelwrithd); |
| 594 | } |
| 595 | |
| 596 | /* |
| 597 | * try to push out some delayed/uncommitted writes |
| 598 | * ("locked" indicates whether nfs_buf_mutex is already held) |
| 599 | */ |
| 600 | void |
| 601 | nfs_buf_delwri_push(int locked) |
| 602 | { |
| 603 | if (TAILQ_EMPTY(&nfsbufdelwri)) |
| 604 | return; |
| 605 | if (!locked) |
| 606 | lck_mtx_lock(nfs_buf_mutex); |
| 607 | /* wake up the delayed write service thread */ |
| 608 | if (nfsbufdelwrithd) |
| 609 | wakeup(&nfsbufdelwrithd); |
| 610 | else if (kernel_thread_start(nfs_buf_delwri_thread, NULL, &nfsbufdelwrithd) == KERN_SUCCESS) |
| 611 | thread_deallocate(nfsbufdelwrithd); |
| 612 | /* otherwise, try to do some of the work ourselves */ |
| 613 | if (!nfsbufdelwrithd) |
| 614 | nfs_buf_delwri_service(); |
| 615 | if (!locked) |
| 616 | lck_mtx_unlock(nfs_buf_mutex); |
| 617 | } |
| 618 | |
| 619 | /* |
| 620 | * Get an nfs buffer. |
| 621 | * |
| 622 | * Returns errno on error, 0 otherwise. |
| 623 | * Any buffer is returned in *bpp. |
| 624 | * |
| 625 | * If NBLK_ONLYVALID is set, only return buffer if found in cache. |
| 626 | * If NBLK_NOWAIT is set, don't wait for the buffer if it's marked BUSY. |
| 627 | * |
| 628 | * Check for existence of buffer in cache. |
| 629 | * Or attempt to reuse a buffer from one of the free lists. |
| 630 | * Or allocate a new buffer if we haven't already hit max allocation. |
| 631 | * Or wait for a free buffer. |
| 632 | * |
| 633 | * If available buffer found, prepare it, and return it. |
| 634 | * |
| 635 | * If the calling process is interrupted by a signal for |
| 636 | * an interruptible mount point, return EINTR. |
| 637 | */ |
| 638 | int |
| 639 | nfs_buf_get( |
| 640 | nfsnode_t np, |
| 641 | daddr64_t blkno, |
| 642 | uint32_t size, |
| 643 | thread_t thd, |
| 644 | int flags, |
| 645 | struct nfsbuf **bpp) |
| 646 | { |
| 647 | vnode_t vp = NFSTOV(np); |
| 648 | struct nfsmount *nmp = VTONMP(vp); |
| 649 | struct nfsbuf *bp; |
| 650 | uint32_t bufsize; |
| 651 | int slpflag = PCATCH; |
| 652 | int operation = (flags & NBLK_OPMASK); |
| 653 | int error = 0; |
| 654 | struct timespec ts; |
| 655 | |
| 656 | FSDBG_TOP(541, np, blkno, size, flags); |
| 657 | *bpp = NULL; |
| 658 | |
| 659 | bufsize = size; |
| 660 | if (bufsize > NFS_MAXBSIZE) |
| 661 | panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested" ); |
| 662 | |
| 663 | if (nfs_mount_gone(nmp)) { |
| 664 | FSDBG_BOT(541, np, blkno, 0, ENXIO); |
| 665 | return (ENXIO); |
| 666 | } |
| 667 | |
| 668 | if (!UBCINFOEXISTS(vp)) { |
| 669 | operation = NBLK_META; |
| 670 | } else if (bufsize < (uint32_t)nmp->nm_biosize) { |
| 671 | /* reg files should always have biosize blocks */ |
| 672 | bufsize = nmp->nm_biosize; |
| 673 | } |
| 674 | |
| 675 | /* if NBLK_WRITE, check for too many delayed/uncommitted writes */ |
| 676 | if ((operation == NBLK_WRITE) && (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES)) { |
| 677 | FSDBG_TOP(542, np, blkno, nfs_nbdwrite, NFS_A_LOT_OF_DELAYED_WRITES); |
| 678 | |
| 679 | /* poke the delwri list */ |
| 680 | nfs_buf_delwri_push(0); |
| 681 | |
| 682 | /* sleep to let other threads run... */ |
| 683 | tsleep(&nfs_nbdwrite, PCATCH, "nfs_nbdwrite" , 1); |
| 684 | FSDBG_BOT(542, np, blkno, nfs_nbdwrite, NFS_A_LOT_OF_DELAYED_WRITES); |
| 685 | } |
| 686 | |
| 687 | loop: |
| 688 | lck_mtx_lock(nfs_buf_mutex); |
| 689 | |
| 690 | /* wait for any buffer invalidation/flushing to complete */ |
| 691 | while (np->n_bflag & NBINVALINPROG) { |
| 692 | np->n_bflag |= NBINVALWANT; |
| 693 | ts.tv_sec = 2; |
| 694 | ts.tv_nsec = 0; |
| 695 | msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_buf_get_invalwait" , &ts); |
| 696 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { |
| 697 | lck_mtx_unlock(nfs_buf_mutex); |
| 698 | FSDBG_BOT(541, np, blkno, 0, error); |
| 699 | return (error); |
| 700 | } |
| 701 | if (np->n_bflag & NBINVALINPROG) |
| 702 | slpflag = 0; |
| 703 | } |
| 704 | |
| 705 | /* check for existence of nfsbuf in cache */ |
| 706 | if ((bp = nfs_buf_incore(np, blkno))) { |
| 707 | /* if busy, set wanted and wait */ |
| 708 | if (ISSET(bp->nb_lflags, NBL_BUSY)) { |
| 709 | if (flags & NBLK_NOWAIT) { |
| 710 | lck_mtx_unlock(nfs_buf_mutex); |
| 711 | FSDBG_BOT(541, np, blkno, bp, 0xbcbcbcbc); |
| 712 | return (0); |
| 713 | } |
| 714 | FSDBG_TOP(543, np, blkno, bp, bp->nb_flags); |
| 715 | SET(bp->nb_lflags, NBL_WANTED); |
| 716 | |
| 717 | ts.tv_sec = 2; |
| 718 | ts.tv_nsec = 0; |
| 719 | msleep(bp, nfs_buf_mutex, slpflag|(PRIBIO+1)|PDROP, |
| 720 | "nfsbufget" , (slpflag == PCATCH) ? NULL : &ts); |
| 721 | slpflag = 0; |
| 722 | FSDBG_BOT(543, np, blkno, bp, bp->nb_flags); |
| 723 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { |
| 724 | FSDBG_BOT(541, np, blkno, 0, error); |
| 725 | return (error); |
| 726 | } |
| 727 | goto loop; |
| 728 | } |
| 729 | if (bp->nb_bufsize != bufsize) |
| 730 | panic("nfsbuf size mismatch" ); |
| 731 | SET(bp->nb_lflags, NBL_BUSY); |
| 732 | SET(bp->nb_flags, NB_CACHE); |
| 733 | nfs_buf_remfree(bp); |
| 734 | /* additional paranoia: */ |
| 735 | if (ISSET(bp->nb_flags, NB_PAGELIST)) |
| 736 | panic("pagelist buffer was not busy" ); |
| 737 | goto buffer_setup; |
| 738 | } |
| 739 | |
| 740 | if (flags & NBLK_ONLYVALID) { |
| 741 | lck_mtx_unlock(nfs_buf_mutex); |
| 742 | FSDBG_BOT(541, np, blkno, 0, 0x0000cace); |
| 743 | return (0); |
| 744 | } |
| 745 | |
| 746 | /* |
| 747 | * where to get a free buffer: |
| 748 | * - if meta and maxmeta reached, must reuse meta |
| 749 | * - alloc new if we haven't reached min bufs |
| 750 | * - if free lists are NOT empty |
| 751 | * - if free list is stale, use it |
| 752 | * - else if freemeta list is stale, use it |
| 753 | * - else if max bufs allocated, use least-time-to-stale |
| 754 | * - alloc new if we haven't reached max allowed |
| 755 | * - start clearing out delwri list and try again |
| 756 | */ |
| 757 | |
| 758 | if ((operation == NBLK_META) && (nfsbufmetacnt >= nfsbufmetamax)) { |
| 759 | /* if we've hit max meta buffers, must reuse a meta buffer */ |
| 760 | bp = TAILQ_FIRST(&nfsbuffreemeta); |
| 761 | } else if ((nfsbufcnt > nfsbufmin) && |
| 762 | (!TAILQ_EMPTY(&nfsbuffree) || !TAILQ_EMPTY(&nfsbuffreemeta))) { |
| 763 | /* try to pull an nfsbuf off a free list */ |
| 764 | struct nfsbuf *lrubp, *metabp; |
| 765 | struct timeval now; |
| 766 | microuptime(&now); |
| 767 | |
| 768 | /* if the next LRU or META buffer is invalid or stale, use it */ |
| 769 | lrubp = TAILQ_FIRST(&nfsbuffree); |
| 770 | if (lrubp && (!NBUFSTAMPVALID(lrubp) || |
| 771 | ((lrubp->nb_timestamp + NFSBUF_LRU_STALE) < now.tv_sec))) |
| 772 | bp = lrubp; |
| 773 | metabp = TAILQ_FIRST(&nfsbuffreemeta); |
| 774 | if (!bp && metabp && (!NBUFSTAMPVALID(metabp) || |
| 775 | ((metabp->nb_timestamp + NFSBUF_META_STALE) < now.tv_sec))) |
| 776 | bp = metabp; |
| 777 | |
| 778 | if (!bp && (nfsbufcnt >= nfsbufmax)) { |
| 779 | /* we've already allocated all bufs, so */ |
| 780 | /* choose the buffer that'll go stale first */ |
| 781 | if (!metabp) |
| 782 | bp = lrubp; |
| 783 | else if (!lrubp) |
| 784 | bp = metabp; |
| 785 | else { |
| 786 | int32_t lru_stale_time, meta_stale_time; |
| 787 | lru_stale_time = lrubp->nb_timestamp + NFSBUF_LRU_STALE; |
| 788 | meta_stale_time = metabp->nb_timestamp + NFSBUF_META_STALE; |
| 789 | if (lru_stale_time <= meta_stale_time) |
| 790 | bp = lrubp; |
| 791 | else |
| 792 | bp = metabp; |
| 793 | } |
| 794 | } |
| 795 | } |
| 796 | |
| 797 | if (bp) { |
| 798 | /* we have a buffer to reuse */ |
| 799 | FSDBG(544, np, blkno, bp, bp->nb_flags); |
| 800 | nfs_buf_remfree(bp); |
| 801 | if (ISSET(bp->nb_flags, NB_DELWRI)) |
| 802 | panic("nfs_buf_get: delwri" ); |
| 803 | SET(bp->nb_lflags, NBL_BUSY); |
| 804 | /* disassociate buffer from previous nfsnode */ |
| 805 | if (bp->nb_np) { |
| 806 | if (bp->nb_vnbufs.le_next != NFSNOLIST) { |
| 807 | LIST_REMOVE(bp, nb_vnbufs); |
| 808 | bp->nb_vnbufs.le_next = NFSNOLIST; |
| 809 | } |
| 810 | bp->nb_np = NULL; |
| 811 | } |
| 812 | LIST_REMOVE(bp, nb_hash); |
| 813 | /* nuke any creds we're holding */ |
| 814 | if (IS_VALID_CRED(bp->nb_rcred)) |
| 815 | kauth_cred_unref(&bp->nb_rcred); |
| 816 | if (IS_VALID_CRED(bp->nb_wcred)) |
| 817 | kauth_cred_unref(&bp->nb_wcred); |
| 818 | /* if buf will no longer be NB_META, dump old buffer */ |
| 819 | if (operation == NBLK_META) { |
| 820 | if (!ISSET(bp->nb_flags, NB_META)) |
| 821 | nfsbufmetacnt++; |
| 822 | } else if (ISSET(bp->nb_flags, NB_META)) { |
| 823 | if (bp->nb_data) { |
| 824 | kfree(bp->nb_data, bp->nb_bufsize); |
| 825 | bp->nb_data = NULL; |
| 826 | } |
| 827 | nfsbufmetacnt--; |
| 828 | } |
| 829 | /* re-init buf fields */ |
| 830 | bp->nb_error = 0; |
| 831 | bp->nb_validoff = bp->nb_validend = -1; |
| 832 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 833 | bp->nb_valid = 0; |
| 834 | bp->nb_dirty = 0; |
| 835 | bp->nb_verf = 0; |
| 836 | } else { |
| 837 | /* no buffer to reuse */ |
| 838 | if ((nfsbufcnt < nfsbufmax) && |
| 839 | ((operation != NBLK_META) || (nfsbufmetacnt < nfsbufmetamax))) { |
| 840 | /* just alloc a new one */ |
| 841 | MALLOC(bp, struct nfsbuf *, sizeof(struct nfsbuf), M_TEMP, M_WAITOK); |
| 842 | if (!bp) { |
| 843 | lck_mtx_unlock(nfs_buf_mutex); |
| 844 | FSDBG_BOT(541, np, blkno, 0, error); |
| 845 | return (ENOMEM); |
| 846 | } |
| 847 | nfsbufcnt++; |
| 848 | |
| 849 | /* |
| 850 | * If any excess bufs, make sure the timer |
| 851 | * is running to free them up later. |
| 852 | */ |
| 853 | if (nfsbufcnt > nfsbufmin && !nfs_buf_timer_on) { |
| 854 | nfs_buf_timer_on = 1; |
| 855 | nfs_interval_timer_start(nfs_buf_timer_call, |
| 856 | NFSBUF_FREE_PERIOD * 1000); |
| 857 | } |
| 858 | |
| 859 | if (operation == NBLK_META) |
| 860 | nfsbufmetacnt++; |
| 861 | NFSBUFCNTCHK(); |
| 862 | /* init nfsbuf */ |
| 863 | bzero(bp, sizeof(*bp)); |
| 864 | bp->nb_free.tqe_next = NFSNOLIST; |
| 865 | bp->nb_validoff = bp->nb_validend = -1; |
| 866 | FSDBG(545, np, blkno, bp, 0); |
| 867 | } else { |
| 868 | /* too many bufs... wait for buffers to free up */ |
| 869 | FSDBG_TOP(546, np, blkno, nfsbufcnt, nfsbufmax); |
| 870 | |
| 871 | /* poke the delwri list */ |
| 872 | nfs_buf_delwri_push(1); |
| 873 | |
| 874 | nfsneedbuffer = 1; |
| 875 | msleep(&nfsneedbuffer, nfs_buf_mutex, PCATCH|PDROP, "nfsbufget" , NULL); |
| 876 | FSDBG_BOT(546, np, blkno, nfsbufcnt, nfsbufmax); |
| 877 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { |
| 878 | FSDBG_BOT(541, np, blkno, 0, error); |
| 879 | return (error); |
| 880 | } |
| 881 | goto loop; |
| 882 | } |
| 883 | } |
| 884 | |
| 885 | /* set up nfsbuf */ |
| 886 | SET(bp->nb_lflags, NBL_BUSY); |
| 887 | bp->nb_flags = 0; |
| 888 | bp->nb_lblkno = blkno; |
| 889 | /* insert buf in hash */ |
| 890 | LIST_INSERT_HEAD(NFSBUFHASH(np, blkno), bp, nb_hash); |
| 891 | /* associate buffer with new nfsnode */ |
| 892 | bp->nb_np = np; |
| 893 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
| 894 | |
| 895 | buffer_setup: |
| 896 | |
| 897 | /* unlock hash */ |
| 898 | lck_mtx_unlock(nfs_buf_mutex); |
| 899 | |
| 900 | switch (operation) { |
| 901 | case NBLK_META: |
| 902 | SET(bp->nb_flags, NB_META); |
| 903 | if ((bp->nb_bufsize != bufsize) && bp->nb_data) { |
| 904 | kfree(bp->nb_data, bp->nb_bufsize); |
| 905 | bp->nb_data = NULL; |
| 906 | bp->nb_validoff = bp->nb_validend = -1; |
| 907 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 908 | bp->nb_valid = 0; |
| 909 | bp->nb_dirty = 0; |
| 910 | CLR(bp->nb_flags, NB_CACHE); |
| 911 | } |
| 912 | if (!bp->nb_data) |
| 913 | bp->nb_data = kalloc(bufsize); |
| 914 | if (!bp->nb_data) { |
| 915 | /* Ack! couldn't allocate the data buffer! */ |
| 916 | /* clean up buffer and return error */ |
| 917 | lck_mtx_lock(nfs_buf_mutex); |
| 918 | LIST_REMOVE(bp, nb_vnbufs); |
| 919 | bp->nb_vnbufs.le_next = NFSNOLIST; |
| 920 | bp->nb_np = NULL; |
| 921 | /* invalidate usage timestamp to allow immediate freeing */ |
| 922 | NBUFSTAMPINVALIDATE(bp); |
| 923 | if (bp->nb_free.tqe_next != NFSNOLIST) |
| 924 | panic("nfsbuf on freelist" ); |
| 925 | TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); |
| 926 | nfsbuffreecnt++; |
| 927 | lck_mtx_unlock(nfs_buf_mutex); |
| 928 | FSDBG_BOT(541, np, blkno, 0xb00, ENOMEM); |
| 929 | return (ENOMEM); |
| 930 | } |
| 931 | bp->nb_bufsize = bufsize; |
| 932 | break; |
| 933 | |
| 934 | case NBLK_READ: |
| 935 | case NBLK_WRITE: |
| 936 | /* |
| 937 | * Set or clear NB_READ now to let the UPL subsystem know |
| 938 | * if we intend to modify the pages or not. |
| 939 | */ |
| 940 | if (operation == NBLK_READ) { |
| 941 | SET(bp->nb_flags, NB_READ); |
| 942 | } else { |
| 943 | CLR(bp->nb_flags, NB_READ); |
| 944 | } |
| 945 | if (bufsize < PAGE_SIZE) |
| 946 | bufsize = PAGE_SIZE; |
| 947 | bp->nb_bufsize = bufsize; |
| 948 | bp->nb_validoff = bp->nb_validend = -1; |
| 949 | |
| 950 | if (UBCINFOEXISTS(vp)) { |
| 951 | /* set up upl */ |
| 952 | if (nfs_buf_upl_setup(bp)) { |
| 953 | /* unable to create upl */ |
| 954 | /* vm object must no longer exist */ |
| 955 | /* clean up buffer and return error */ |
| 956 | lck_mtx_lock(nfs_buf_mutex); |
| 957 | LIST_REMOVE(bp, nb_vnbufs); |
| 958 | bp->nb_vnbufs.le_next = NFSNOLIST; |
| 959 | bp->nb_np = NULL; |
| 960 | /* invalidate usage timestamp to allow immediate freeing */ |
| 961 | NBUFSTAMPINVALIDATE(bp); |
| 962 | if (bp->nb_free.tqe_next != NFSNOLIST) |
| 963 | panic("nfsbuf on freelist" ); |
| 964 | TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); |
| 965 | nfsbuffreecnt++; |
| 966 | lck_mtx_unlock(nfs_buf_mutex); |
| 967 | FSDBG_BOT(541, np, blkno, 0x2bc, EIO); |
| 968 | return (EIO); |
| 969 | } |
| 970 | nfs_buf_upl_check(bp); |
| 971 | } |
| 972 | break; |
| 973 | |
| 974 | default: |
| 975 | panic("nfs_buf_get: %d unknown operation" , operation); |
| 976 | } |
| 977 | |
| 978 | *bpp = bp; |
| 979 | |
| 980 | FSDBG_BOT(541, np, blkno, bp, bp->nb_flags); |
| 981 | |
| 982 | return (0); |
| 983 | } |
| 984 | |
| 985 | void |
| 986 | nfs_buf_release(struct nfsbuf *bp, int freeup) |
| 987 | { |
| 988 | nfsnode_t np = bp->nb_np; |
| 989 | vnode_t vp; |
| 990 | struct timeval now; |
| 991 | int wakeup_needbuffer, wakeup_buffer, wakeup_nbdwrite; |
| 992 | |
| 993 | FSDBG_TOP(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data); |
| 994 | FSDBG(548, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend); |
| 995 | FSDBG(548, bp->nb_valid, 0, bp->nb_dirty, 0); |
| 996 | |
| 997 | vp = np ? NFSTOV(np) : NULL; |
| 998 | if (vp && UBCINFOEXISTS(vp) && bp->nb_bufsize) { |
| 999 | int upl_flags, rv; |
| 1000 | upl_t upl; |
| 1001 | uint32_t i; |
| 1002 | |
| 1003 | if (!ISSET(bp->nb_flags, NB_PAGELIST) && !ISSET(bp->nb_flags, NB_INVAL)) { |
| 1004 | rv = nfs_buf_upl_setup(bp); |
| 1005 | if (rv) |
| 1006 | printf("nfs_buf_release: upl create failed %d\n" , rv); |
| 1007 | else |
| 1008 | nfs_buf_upl_check(bp); |
| 1009 | } |
| 1010 | upl = bp->nb_pagelist; |
| 1011 | if (!upl) |
| 1012 | goto pagelist_cleanup_done; |
| 1013 | if (bp->nb_data) { |
| 1014 | if (ubc_upl_unmap(upl) != KERN_SUCCESS) |
| 1015 | panic("ubc_upl_unmap failed" ); |
| 1016 | bp->nb_data = NULL; |
| 1017 | } |
| 1018 | /* |
| 1019 | * Abort the pages on error or: if this is an invalid or |
| 1020 | * non-needcommit nocache buffer AND no pages are dirty. |
| 1021 | */ |
| 1022 | if (ISSET(bp->nb_flags, NB_ERROR) || (!bp->nb_dirty && (ISSET(bp->nb_flags, NB_INVAL) || |
| 1023 | (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))))) { |
| 1024 | if (ISSET(bp->nb_flags, (NB_READ | NB_INVAL | NB_NOCACHE))) |
| 1025 | upl_flags = UPL_ABORT_DUMP_PAGES; |
| 1026 | else |
| 1027 | upl_flags = 0; |
| 1028 | ubc_upl_abort(upl, upl_flags); |
| 1029 | goto pagelist_cleanup_done; |
| 1030 | } |
| 1031 | for (i=0; i <= (bp->nb_bufsize - 1)/PAGE_SIZE; i++) { |
| 1032 | if (!NBPGVALID(bp,i)) |
| 1033 | ubc_upl_abort_range(upl, |
| 1034 | i*PAGE_SIZE, PAGE_SIZE, |
| 1035 | UPL_ABORT_DUMP_PAGES | |
| 1036 | UPL_ABORT_FREE_ON_EMPTY); |
| 1037 | else { |
| 1038 | if (NBPGDIRTY(bp,i)) |
| 1039 | upl_flags = UPL_COMMIT_SET_DIRTY; |
| 1040 | else |
| 1041 | upl_flags = UPL_COMMIT_CLEAR_DIRTY; |
| 1042 | |
| 1043 | if (!ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI))) |
| 1044 | upl_flags |= UPL_COMMIT_CLEAR_PRECIOUS; |
| 1045 | |
| 1046 | ubc_upl_commit_range(upl, |
| 1047 | i*PAGE_SIZE, PAGE_SIZE, |
| 1048 | upl_flags | |
| 1049 | UPL_COMMIT_INACTIVATE | |
| 1050 | UPL_COMMIT_FREE_ON_EMPTY); |
| 1051 | } |
| 1052 | } |
| 1053 | pagelist_cleanup_done: |
| 1054 | /* invalidate any pages past EOF */ |
| 1055 | if (NBOFF(bp) + bp->nb_bufsize > (off_t)(np->n_size)) { |
| 1056 | off_t start, end; |
| 1057 | start = trunc_page_64(np->n_size) + PAGE_SIZE_64; |
| 1058 | end = trunc_page_64(NBOFF(bp) + bp->nb_bufsize); |
| 1059 | if (start < NBOFF(bp)) |
| 1060 | start = NBOFF(bp); |
| 1061 | if (end > start) { |
| 1062 | if ((rv = ubc_msync(vp, start, end, NULL, UBC_INVALIDATE))) |
| 1063 | printf("nfs_buf_release(): ubc_msync failed!, error %d\n" , rv); |
| 1064 | } |
| 1065 | } |
| 1066 | CLR(bp->nb_flags, NB_PAGELIST); |
| 1067 | bp->nb_pagelist = NULL; |
| 1068 | } |
| 1069 | |
| 1070 | lck_mtx_lock(nfs_buf_mutex); |
| 1071 | |
| 1072 | wakeup_needbuffer = wakeup_buffer = wakeup_nbdwrite = 0; |
| 1073 | |
| 1074 | /* Wake up any processes waiting for any buffer to become free. */ |
| 1075 | if (nfsneedbuffer) { |
| 1076 | nfsneedbuffer = 0; |
| 1077 | wakeup_needbuffer = 1; |
| 1078 | } |
| 1079 | /* Wake up any processes waiting for _this_ buffer to become free. */ |
| 1080 | if (ISSET(bp->nb_lflags, NBL_WANTED)) { |
| 1081 | CLR(bp->nb_lflags, NBL_WANTED); |
| 1082 | wakeup_buffer = 1; |
| 1083 | } |
| 1084 | |
| 1085 | /* If it's non-needcommit nocache, or an error, mark it invalid. */ |
| 1086 | if (ISSET(bp->nb_flags, NB_ERROR) || |
| 1087 | (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))) |
| 1088 | SET(bp->nb_flags, NB_INVAL); |
| 1089 | |
| 1090 | if ((bp->nb_bufsize <= 0) || ISSET(bp->nb_flags, NB_INVAL)) { |
| 1091 | /* If it's invalid or empty, dissociate it from its nfsnode */ |
| 1092 | if (bp->nb_vnbufs.le_next != NFSNOLIST) { |
| 1093 | LIST_REMOVE(bp, nb_vnbufs); |
| 1094 | bp->nb_vnbufs.le_next = NFSNOLIST; |
| 1095 | } |
| 1096 | bp->nb_np = NULL; |
| 1097 | /* if this was a delayed write, wakeup anyone */ |
| 1098 | /* waiting for delayed writes to complete */ |
| 1099 | if (ISSET(bp->nb_flags, NB_DELWRI)) { |
| 1100 | CLR(bp->nb_flags, NB_DELWRI); |
| 1101 | nfs_nbdwrite--; |
| 1102 | NFSBUFCNTCHK(); |
| 1103 | wakeup_nbdwrite = 1; |
| 1104 | } |
| 1105 | /* invalidate usage timestamp to allow immediate freeing */ |
| 1106 | NBUFSTAMPINVALIDATE(bp); |
| 1107 | /* put buffer at head of free list */ |
| 1108 | if (bp->nb_free.tqe_next != NFSNOLIST) |
| 1109 | panic("nfsbuf on freelist" ); |
| 1110 | SET(bp->nb_flags, NB_INVAL); |
| 1111 | if (ISSET(bp->nb_flags, NB_META)) { |
| 1112 | TAILQ_INSERT_HEAD(&nfsbuffreemeta, bp, nb_free); |
| 1113 | nfsbuffreemetacnt++; |
| 1114 | } else { |
| 1115 | TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); |
| 1116 | nfsbuffreecnt++; |
| 1117 | } |
| 1118 | } else if (ISSET(bp->nb_flags, NB_DELWRI)) { |
| 1119 | /* put buffer at end of delwri list */ |
| 1120 | if (bp->nb_free.tqe_next != NFSNOLIST) |
| 1121 | panic("nfsbuf on freelist" ); |
| 1122 | TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); |
| 1123 | nfsbufdelwricnt++; |
| 1124 | freeup = 0; |
| 1125 | } else { |
| 1126 | /* update usage timestamp */ |
| 1127 | microuptime(&now); |
| 1128 | bp->nb_timestamp = now.tv_sec; |
| 1129 | /* put buffer at end of free list */ |
| 1130 | if (bp->nb_free.tqe_next != NFSNOLIST) |
| 1131 | panic("nfsbuf on freelist" ); |
| 1132 | if (ISSET(bp->nb_flags, NB_META)) { |
| 1133 | TAILQ_INSERT_TAIL(&nfsbuffreemeta, bp, nb_free); |
| 1134 | nfsbuffreemetacnt++; |
| 1135 | } else { |
| 1136 | TAILQ_INSERT_TAIL(&nfsbuffree, bp, nb_free); |
| 1137 | nfsbuffreecnt++; |
| 1138 | } |
| 1139 | } |
| 1140 | |
| 1141 | NFSBUFCNTCHK(); |
| 1142 | |
| 1143 | /* Unlock the buffer. */ |
| 1144 | CLR(bp->nb_flags, (NB_ASYNC | NB_STABLE)); |
| 1145 | CLR(bp->nb_lflags, NBL_BUSY); |
| 1146 | |
| 1147 | FSDBG_BOT(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data); |
| 1148 | |
| 1149 | lck_mtx_unlock(nfs_buf_mutex); |
| 1150 | |
| 1151 | if (wakeup_needbuffer) |
| 1152 | wakeup(&nfsneedbuffer); |
| 1153 | if (wakeup_buffer) |
| 1154 | wakeup(bp); |
| 1155 | if (wakeup_nbdwrite) |
| 1156 | wakeup(&nfs_nbdwrite); |
| 1157 | if (freeup) |
| 1158 | NFS_BUF_FREEUP(); |
| 1159 | } |
| 1160 | |
| 1161 | /* |
| 1162 | * Wait for operations on the buffer to complete. |
| 1163 | * When they do, extract and return the I/O's error value. |
| 1164 | */ |
| 1165 | int |
| 1166 | nfs_buf_iowait(struct nfsbuf *bp) |
| 1167 | { |
| 1168 | FSDBG_TOP(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); |
| 1169 | |
| 1170 | lck_mtx_lock(nfs_buf_mutex); |
| 1171 | |
| 1172 | while (!ISSET(bp->nb_flags, NB_DONE)) |
| 1173 | msleep(bp, nfs_buf_mutex, PRIBIO + 1, "nfs_buf_iowait" , NULL); |
| 1174 | |
| 1175 | lck_mtx_unlock(nfs_buf_mutex); |
| 1176 | |
| 1177 | FSDBG_BOT(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); |
| 1178 | |
| 1179 | /* check for interruption of I/O, then errors. */ |
| 1180 | if (ISSET(bp->nb_flags, NB_EINTR)) { |
| 1181 | CLR(bp->nb_flags, NB_EINTR); |
| 1182 | return (EINTR); |
| 1183 | } else if (ISSET(bp->nb_flags, NB_ERROR)) |
| 1184 | return (bp->nb_error ? bp->nb_error : EIO); |
| 1185 | return (0); |
| 1186 | } |
| 1187 | |
| 1188 | /* |
| 1189 | * Mark I/O complete on a buffer. |
| 1190 | */ |
| 1191 | void |
| 1192 | nfs_buf_iodone(struct nfsbuf *bp) |
| 1193 | { |
| 1194 | |
| 1195 | FSDBG_TOP(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); |
| 1196 | |
| 1197 | if (ISSET(bp->nb_flags, NB_DONE)) |
| 1198 | panic("nfs_buf_iodone already" ); |
| 1199 | |
| 1200 | if (!ISSET(bp->nb_flags, NB_READ)) { |
| 1201 | CLR(bp->nb_flags, NB_WRITEINPROG); |
| 1202 | /* |
| 1203 | * vnode_writedone() takes care of waking up |
| 1204 | * any throttled write operations |
| 1205 | */ |
| 1206 | vnode_writedone(NFSTOV(bp->nb_np)); |
| 1207 | nfs_node_lock_force(bp->nb_np); |
| 1208 | bp->nb_np->n_numoutput--; |
| 1209 | nfs_node_unlock(bp->nb_np); |
| 1210 | } |
| 1211 | if (ISSET(bp->nb_flags, NB_ASYNC)) { /* if async, release it */ |
| 1212 | SET(bp->nb_flags, NB_DONE); /* note that it's done */ |
| 1213 | nfs_buf_release(bp, 1); |
| 1214 | } else { /* or just wakeup the buffer */ |
| 1215 | lck_mtx_lock(nfs_buf_mutex); |
| 1216 | SET(bp->nb_flags, NB_DONE); /* note that it's done */ |
| 1217 | CLR(bp->nb_lflags, NBL_WANTED); |
| 1218 | lck_mtx_unlock(nfs_buf_mutex); |
| 1219 | wakeup(bp); |
| 1220 | } |
| 1221 | |
| 1222 | FSDBG_BOT(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); |
| 1223 | } |
| 1224 | |
| 1225 | void |
| 1226 | nfs_buf_write_delayed(struct nfsbuf *bp) |
| 1227 | { |
| 1228 | nfsnode_t np = bp->nb_np; |
| 1229 | |
| 1230 | FSDBG_TOP(551, bp, NBOFF(bp), bp->nb_flags, 0); |
| 1231 | FSDBG(551, bp, bp->nb_dirtyoff, bp->nb_dirtyend, bp->nb_dirty); |
| 1232 | |
| 1233 | /* |
| 1234 | * If the block hasn't been seen before: |
| 1235 | * (1) Mark it as having been seen, |
| 1236 | * (2) Make sure it's on its node's correct block list, |
| 1237 | */ |
| 1238 | if (!ISSET(bp->nb_flags, NB_DELWRI)) { |
| 1239 | SET(bp->nb_flags, NB_DELWRI); |
| 1240 | /* move to dirty list */ |
| 1241 | lck_mtx_lock(nfs_buf_mutex); |
| 1242 | nfs_nbdwrite++; |
| 1243 | NFSBUFCNTCHK(); |
| 1244 | if (bp->nb_vnbufs.le_next != NFSNOLIST) |
| 1245 | LIST_REMOVE(bp, nb_vnbufs); |
| 1246 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 1247 | lck_mtx_unlock(nfs_buf_mutex); |
| 1248 | } |
| 1249 | |
| 1250 | /* |
| 1251 | * If the vnode has "too many" write operations in progress |
| 1252 | * wait for them to finish the IO |
| 1253 | */ |
| 1254 | vnode_waitforwrites(NFSTOV(np), VNODE_ASYNC_THROTTLE, 0, 0, "nfs_buf_write_delayed" ); |
| 1255 | |
| 1256 | /* the file is in a modified state, so make sure the flag's set */ |
| 1257 | nfs_node_lock_force(np); |
| 1258 | np->n_flag |= NMODIFIED; |
| 1259 | nfs_node_unlock(np); |
| 1260 | |
| 1261 | /* |
| 1262 | * If we have too many delayed write buffers, |
| 1263 | * just fall back to doing the async write. |
| 1264 | */ |
| 1265 | if (nfs_nbdwrite < 0) |
| 1266 | panic("nfs_buf_write_delayed: Negative nfs_nbdwrite" ); |
| 1267 | if (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES) { |
| 1268 | /* issue async write */ |
| 1269 | SET(bp->nb_flags, NB_ASYNC); |
| 1270 | nfs_buf_write(bp); |
| 1271 | FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); |
| 1272 | return; |
| 1273 | } |
| 1274 | |
| 1275 | /* Otherwise, the "write" is done, so mark and release the buffer. */ |
| 1276 | SET(bp->nb_flags, NB_DONE); |
| 1277 | nfs_buf_release(bp, 1); |
| 1278 | FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, 0); |
| 1279 | return; |
| 1280 | } |
| 1281 | |
| 1282 | /* |
| 1283 | * Check that a "needcommit" buffer can still be committed. |
| 1284 | * If the write verifier has changed, we need to clear the |
| 1285 | * the needcommit flag. |
| 1286 | */ |
| 1287 | void |
| 1288 | nfs_buf_check_write_verifier(nfsnode_t np, struct nfsbuf *bp) |
| 1289 | { |
| 1290 | struct nfsmount *nmp; |
| 1291 | |
| 1292 | if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) |
| 1293 | return; |
| 1294 | |
| 1295 | nmp = NFSTONMP(np); |
| 1296 | if (nfs_mount_gone(nmp)) |
| 1297 | return; |
| 1298 | if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf)) |
| 1299 | return; |
| 1300 | |
| 1301 | /* write verifier changed, clear commit/wverf flags */ |
| 1302 | CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_STALEWVERF)); |
| 1303 | bp->nb_verf = 0; |
| 1304 | nfs_node_lock_force(np); |
| 1305 | np->n_needcommitcnt--; |
| 1306 | CHECK_NEEDCOMMITCNT(np); |
| 1307 | nfs_node_unlock(np); |
| 1308 | } |
| 1309 | |
| 1310 | /* |
| 1311 | * add a reference to a buffer so it doesn't disappear while being used |
| 1312 | * (must be called with nfs_buf_mutex held) |
| 1313 | */ |
| 1314 | void |
| 1315 | nfs_buf_refget(struct nfsbuf *bp) |
| 1316 | { |
| 1317 | bp->nb_refs++; |
| 1318 | } |
| 1319 | /* |
| 1320 | * release a reference on a buffer |
| 1321 | * (must be called with nfs_buf_mutex held) |
| 1322 | */ |
| 1323 | void |
| 1324 | nfs_buf_refrele(struct nfsbuf *bp) |
| 1325 | { |
| 1326 | bp->nb_refs--; |
| 1327 | } |
| 1328 | |
| 1329 | /* |
| 1330 | * mark a particular buffer as BUSY |
| 1331 | * (must be called with nfs_buf_mutex held) |
| 1332 | */ |
| 1333 | errno_t |
| 1334 | nfs_buf_acquire(struct nfsbuf *bp, int flags, int slpflag, int slptimeo) |
| 1335 | { |
| 1336 | errno_t error; |
| 1337 | struct timespec ts; |
| 1338 | |
| 1339 | if (ISSET(bp->nb_lflags, NBL_BUSY)) { |
| 1340 | /* |
| 1341 | * since the lck_mtx_lock may block, the buffer |
| 1342 | * may become BUSY, so we need to recheck for |
| 1343 | * a NOWAIT request |
| 1344 | */ |
| 1345 | if (flags & NBAC_NOWAIT) |
| 1346 | return (EBUSY); |
| 1347 | SET(bp->nb_lflags, NBL_WANTED); |
| 1348 | |
| 1349 | ts.tv_sec = (slptimeo/100); |
| 1350 | /* the hz value is 100; which leads to 10ms */ |
| 1351 | ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; |
| 1352 | |
| 1353 | error = msleep(bp, nfs_buf_mutex, slpflag | (PRIBIO + 1), |
| 1354 | "nfs_buf_acquire" , &ts); |
| 1355 | if (error) |
| 1356 | return (error); |
| 1357 | return (EAGAIN); |
| 1358 | } |
| 1359 | if (flags & NBAC_REMOVE) |
| 1360 | nfs_buf_remfree(bp); |
| 1361 | SET(bp->nb_lflags, NBL_BUSY); |
| 1362 | |
| 1363 | return (0); |
| 1364 | } |
| 1365 | |
| 1366 | /* |
| 1367 | * simply drop the BUSY status of a buffer |
| 1368 | * (must be called with nfs_buf_mutex held) |
| 1369 | */ |
| 1370 | void |
| 1371 | nfs_buf_drop(struct nfsbuf *bp) |
| 1372 | { |
| 1373 | int need_wakeup = 0; |
| 1374 | |
| 1375 | if (!ISSET(bp->nb_lflags, NBL_BUSY)) |
| 1376 | panic("nfs_buf_drop: buffer not busy!" ); |
| 1377 | if (ISSET(bp->nb_lflags, NBL_WANTED)) { |
| 1378 | /* delay the actual wakeup until after we clear NBL_BUSY */ |
| 1379 | need_wakeup = 1; |
| 1380 | } |
| 1381 | /* Unlock the buffer. */ |
| 1382 | CLR(bp->nb_lflags, (NBL_BUSY | NBL_WANTED)); |
| 1383 | |
| 1384 | if (need_wakeup) |
| 1385 | wakeup(bp); |
| 1386 | } |
| 1387 | |
| 1388 | /* |
| 1389 | * prepare for iterating over an nfsnode's buffer list |
| 1390 | * this lock protects the queue manipulation |
| 1391 | * (must be called with nfs_buf_mutex held) |
| 1392 | */ |
| 1393 | int |
| 1394 | nfs_buf_iterprepare(nfsnode_t np, struct nfsbuflists *iterheadp, int flags) |
| 1395 | { |
| 1396 | struct nfsbuflists *listheadp; |
| 1397 | |
| 1398 | if (flags & NBI_DIRTY) |
| 1399 | listheadp = &np->n_dirtyblkhd; |
| 1400 | else |
| 1401 | listheadp = &np->n_cleanblkhd; |
| 1402 | |
| 1403 | if ((flags & NBI_NOWAIT) && (np->n_bufiterflags & NBI_ITER)) { |
| 1404 | LIST_INIT(iterheadp); |
| 1405 | return(EWOULDBLOCK); |
| 1406 | } |
| 1407 | |
| 1408 | while (np->n_bufiterflags & NBI_ITER) { |
| 1409 | np->n_bufiterflags |= NBI_ITERWANT; |
| 1410 | msleep(&np->n_bufiterflags, nfs_buf_mutex, 0, "nfs_buf_iterprepare" , NULL); |
| 1411 | } |
| 1412 | if (LIST_EMPTY(listheadp)) { |
| 1413 | LIST_INIT(iterheadp); |
| 1414 | return(EINVAL); |
| 1415 | } |
| 1416 | np->n_bufiterflags |= NBI_ITER; |
| 1417 | |
| 1418 | iterheadp->lh_first = listheadp->lh_first; |
| 1419 | listheadp->lh_first->nb_vnbufs.le_prev = &iterheadp->lh_first; |
| 1420 | LIST_INIT(listheadp); |
| 1421 | |
| 1422 | return(0); |
| 1423 | } |
| 1424 | |
| 1425 | /* |
| 1426 | * clean up after iterating over an nfsnode's buffer list |
| 1427 | * this lock protects the queue manipulation |
| 1428 | * (must be called with nfs_buf_mutex held) |
| 1429 | */ |
| 1430 | void |
| 1431 | nfs_buf_itercomplete(nfsnode_t np, struct nfsbuflists *iterheadp, int flags) |
| 1432 | { |
| 1433 | struct nfsbuflists * listheadp; |
| 1434 | struct nfsbuf *bp; |
| 1435 | |
| 1436 | if (flags & NBI_DIRTY) |
| 1437 | listheadp = &np->n_dirtyblkhd; |
| 1438 | else |
| 1439 | listheadp = &np->n_cleanblkhd; |
| 1440 | |
| 1441 | while (!LIST_EMPTY(iterheadp)) { |
| 1442 | bp = LIST_FIRST(iterheadp); |
| 1443 | LIST_REMOVE(bp, nb_vnbufs); |
| 1444 | LIST_INSERT_HEAD(listheadp, bp, nb_vnbufs); |
| 1445 | } |
| 1446 | |
| 1447 | np->n_bufiterflags &= ~NBI_ITER; |
| 1448 | if (np->n_bufiterflags & NBI_ITERWANT) { |
| 1449 | np->n_bufiterflags &= ~NBI_ITERWANT; |
| 1450 | wakeup(&np->n_bufiterflags); |
| 1451 | } |
| 1452 | } |
| 1453 | |
| 1454 | |
| 1455 | /* |
| 1456 | * Read an NFS buffer for a file. |
| 1457 | */ |
| 1458 | int |
| 1459 | nfs_buf_read(struct nfsbuf *bp) |
| 1460 | { |
| 1461 | int error = 0; |
| 1462 | nfsnode_t np; |
| 1463 | thread_t thd; |
| 1464 | kauth_cred_t cred; |
| 1465 | |
| 1466 | np = bp->nb_np; |
| 1467 | cred = bp->nb_rcred; |
| 1468 | if (IS_VALID_CRED(cred)) |
| 1469 | kauth_cred_ref(cred); |
| 1470 | thd = ISSET(bp->nb_flags, NB_ASYNC) ? NULL : current_thread(); |
| 1471 | |
| 1472 | /* sanity checks */ |
| 1473 | if (!ISSET(bp->nb_flags, NB_READ)) |
| 1474 | panic("nfs_buf_read: !NB_READ" ); |
| 1475 | if (ISSET(bp->nb_flags, NB_DONE)) |
| 1476 | CLR(bp->nb_flags, NB_DONE); |
| 1477 | |
| 1478 | NFS_BUF_MAP(bp); |
| 1479 | |
| 1480 | OSAddAtomic64(1, &nfsstats.read_bios); |
| 1481 | |
| 1482 | error = nfs_buf_read_rpc(bp, thd, cred); |
| 1483 | /* |
| 1484 | * For async I/O, the callbacks will finish up the |
| 1485 | * read. Otherwise, the read has already been finished. |
| 1486 | */ |
| 1487 | |
| 1488 | if (IS_VALID_CRED(cred)) |
| 1489 | kauth_cred_unref(&cred); |
| 1490 | return (error); |
| 1491 | } |
| 1492 | |
| 1493 | /* |
| 1494 | * finish the reading of a buffer |
| 1495 | */ |
| 1496 | void |
| 1497 | nfs_buf_read_finish(struct nfsbuf *bp) |
| 1498 | { |
| 1499 | nfsnode_t np = bp->nb_np; |
| 1500 | struct nfsmount *nmp; |
| 1501 | |
| 1502 | if (!ISSET(bp->nb_flags, NB_ERROR)) { |
| 1503 | /* update valid range */ |
| 1504 | bp->nb_validoff = 0; |
| 1505 | bp->nb_validend = bp->nb_endio; |
| 1506 | if (bp->nb_endio < (int)bp->nb_bufsize) { |
| 1507 | /* |
| 1508 | * The read may be short because we have unflushed writes |
| 1509 | * that are extending the file size and the reads hit the |
| 1510 | * (old) EOF on the server. So, just make sure nb_validend |
| 1511 | * correctly tracks EOF. |
| 1512 | * Note that the missing data should have already been zeroed |
| 1513 | * in nfs_buf_read_rpc_finish(). |
| 1514 | */ |
| 1515 | off_t boff = NBOFF(bp); |
| 1516 | if ((off_t)np->n_size >= (boff + bp->nb_bufsize)) |
| 1517 | bp->nb_validend = bp->nb_bufsize; |
| 1518 | else if ((off_t)np->n_size >= boff) |
| 1519 | bp->nb_validend = np->n_size - boff; |
| 1520 | else |
| 1521 | bp->nb_validend = 0; |
| 1522 | } |
| 1523 | if ((nmp = NFSTONMP(np)) && (nmp->nm_vers == NFS_VER2) && |
| 1524 | ((NBOFF(bp) + bp->nb_validend) > 0x100000000LL)) |
| 1525 | bp->nb_validend = 0x100000000LL - NBOFF(bp); |
| 1526 | bp->nb_valid = (1 << (round_page_32(bp->nb_validend) / PAGE_SIZE)) - 1; |
| 1527 | if (bp->nb_validend & PAGE_MASK) { |
| 1528 | /* zero-fill remainder of last page */ |
| 1529 | bzero(bp->nb_data + bp->nb_validend, PAGE_SIZE - (bp->nb_validend & PAGE_MASK)); |
| 1530 | } |
| 1531 | } |
| 1532 | nfs_buf_iodone(bp); |
| 1533 | } |
| 1534 | |
| 1535 | /* |
| 1536 | * initiate the NFS READ RPC(s) for a buffer |
| 1537 | */ |
| 1538 | int |
| 1539 | nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) |
| 1540 | { |
| 1541 | struct nfsmount *nmp; |
| 1542 | nfsnode_t np = bp->nb_np; |
| 1543 | int error = 0, nfsvers, async; |
| 1544 | int offset, nrpcs; |
| 1545 | uint32_t nmrsize, length, len; |
| 1546 | off_t boff; |
| 1547 | struct nfsreq *req; |
| 1548 | struct nfsreq_cbinfo cb; |
| 1549 | |
| 1550 | nmp = NFSTONMP(np); |
| 1551 | if (nfs_mount_gone(nmp)) { |
| 1552 | bp->nb_error = error = ENXIO; |
| 1553 | SET(bp->nb_flags, NB_ERROR); |
| 1554 | nfs_buf_iodone(bp); |
| 1555 | return (error); |
| 1556 | } |
| 1557 | nfsvers = nmp->nm_vers; |
| 1558 | nmrsize = nmp->nm_rsize; |
| 1559 | |
| 1560 | boff = NBOFF(bp); |
| 1561 | offset = 0; |
| 1562 | length = bp->nb_bufsize; |
| 1563 | |
| 1564 | if (nfsvers == NFS_VER2) { |
| 1565 | if (boff > 0xffffffffLL) { |
| 1566 | bp->nb_error = error = EFBIG; |
| 1567 | SET(bp->nb_flags, NB_ERROR); |
| 1568 | nfs_buf_iodone(bp); |
| 1569 | return (error); |
| 1570 | } |
| 1571 | if ((boff + length - 1) > 0xffffffffLL) |
| 1572 | length = 0x100000000LL - boff; |
| 1573 | } |
| 1574 | |
| 1575 | /* Note: Can only do async I/O if nfsiods are configured. */ |
| 1576 | async = (bp->nb_flags & NB_ASYNC); |
| 1577 | cb.rcb_func = async ? nfs_buf_read_rpc_finish : NULL; |
| 1578 | cb.rcb_bp = bp; |
| 1579 | |
| 1580 | bp->nb_offio = bp->nb_endio = 0; |
| 1581 | bp->nb_rpcs = nrpcs = (length + nmrsize - 1) / nmrsize; |
| 1582 | if (async && (nrpcs > 1)) { |
| 1583 | SET(bp->nb_flags, NB_MULTASYNCRPC); |
| 1584 | } else { |
| 1585 | CLR(bp->nb_flags, NB_MULTASYNCRPC); |
| 1586 | } |
| 1587 | |
| 1588 | while (length > 0) { |
| 1589 | if (ISSET(bp->nb_flags, NB_ERROR)) { |
| 1590 | error = bp->nb_error; |
| 1591 | break; |
| 1592 | } |
| 1593 | len = (length > nmrsize) ? nmrsize : length; |
| 1594 | cb.rcb_args[0] = offset; |
| 1595 | cb.rcb_args[1] = len; |
| 1596 | if (nmp->nm_vers >= NFS_VER4) |
| 1597 | cb.rcb_args[2] = nmp->nm_stategenid; |
| 1598 | req = NULL; |
| 1599 | error = nmp->nm_funcs->nf_read_rpc_async(np, boff + offset, len, thd, cred, &cb, &req); |
| 1600 | if (error) |
| 1601 | break; |
| 1602 | offset += len; |
| 1603 | length -= len; |
| 1604 | if (async) |
| 1605 | continue; |
| 1606 | nfs_buf_read_rpc_finish(req); |
| 1607 | if (ISSET(bp->nb_flags, NB_ERROR)) { |
| 1608 | error = bp->nb_error; |
| 1609 | break; |
| 1610 | } |
| 1611 | } |
| 1612 | |
| 1613 | if (length > 0) { |
| 1614 | /* |
| 1615 | * Something bad happened while trying to send the RPC(s). |
| 1616 | * Wait for any outstanding requests to complete. |
| 1617 | */ |
| 1618 | bp->nb_error = error; |
| 1619 | SET(bp->nb_flags, NB_ERROR); |
| 1620 | if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) { |
| 1621 | nrpcs = (length + nmrsize - 1) / nmrsize; |
| 1622 | lck_mtx_lock(nfs_buf_mutex); |
| 1623 | bp->nb_rpcs -= nrpcs; |
| 1624 | if (bp->nb_rpcs == 0) { |
| 1625 | /* No RPCs left, so the buffer's done */ |
| 1626 | lck_mtx_unlock(nfs_buf_mutex); |
| 1627 | nfs_buf_iodone(bp); |
| 1628 | } else { |
| 1629 | /* wait for the last RPC to mark it done */ |
| 1630 | while (bp->nb_rpcs > 0) |
| 1631 | msleep(&bp->nb_rpcs, nfs_buf_mutex, 0, |
| 1632 | "nfs_buf_read_rpc_cancel" , NULL); |
| 1633 | lck_mtx_unlock(nfs_buf_mutex); |
| 1634 | } |
| 1635 | } else { |
| 1636 | nfs_buf_iodone(bp); |
| 1637 | } |
| 1638 | } |
| 1639 | |
| 1640 | return (error); |
| 1641 | } |
| 1642 | |
| 1643 | /* |
| 1644 | * finish up an NFS READ RPC on a buffer |
| 1645 | */ |
| 1646 | void |
| 1647 | nfs_buf_read_rpc_finish(struct nfsreq *req) |
| 1648 | { |
| 1649 | struct nfsmount *nmp; |
| 1650 | size_t rlen; |
| 1651 | struct nfsreq_cbinfo cb; |
| 1652 | struct nfsbuf *bp; |
| 1653 | int error = 0, nfsvers, offset, length, eof = 0, multasyncrpc, finished; |
| 1654 | void *wakeme = NULL; |
| 1655 | struct nfsreq *rreq = NULL; |
| 1656 | nfsnode_t np; |
| 1657 | thread_t thd; |
| 1658 | kauth_cred_t cred; |
| 1659 | uio_t auio; |
| 1660 | char uio_buf [ UIO_SIZEOF(1) ]; |
| 1661 | |
| 1662 | finish: |
| 1663 | np = req->r_np; |
| 1664 | thd = req->r_thread; |
| 1665 | cred = req->r_cred; |
| 1666 | if (IS_VALID_CRED(cred)) |
| 1667 | kauth_cred_ref(cred); |
| 1668 | cb = req->r_callback; |
| 1669 | bp = cb.rcb_bp; |
| 1670 | if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */ |
| 1671 | nfs_request_ref(req, 0); |
| 1672 | |
| 1673 | nmp = NFSTONMP(np); |
| 1674 | if (nfs_mount_gone(nmp)) { |
| 1675 | SET(bp->nb_flags, NB_ERROR); |
| 1676 | bp->nb_error = error = ENXIO; |
| 1677 | } |
| 1678 | if (error || ISSET(bp->nb_flags, NB_ERROR)) { |
| 1679 | /* just drop it */ |
| 1680 | nfs_request_async_cancel(req); |
| 1681 | goto out; |
| 1682 | } |
| 1683 | |
| 1684 | nfsvers = nmp->nm_vers; |
| 1685 | offset = cb.rcb_args[0]; |
| 1686 | rlen = length = cb.rcb_args[1]; |
| 1687 | |
| 1688 | auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, |
| 1689 | UIO_READ, &uio_buf, sizeof(uio_buf)); |
| 1690 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); |
| 1691 | |
| 1692 | /* finish the RPC */ |
| 1693 | error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, auio, &rlen, &eof); |
| 1694 | if ((error == EINPROGRESS) && cb.rcb_func) { |
| 1695 | /* async request restarted */ |
| 1696 | if (cb.rcb_func) |
| 1697 | nfs_request_rele(req); |
| 1698 | if (IS_VALID_CRED(cred)) |
| 1699 | kauth_cred_unref(&cred); |
| 1700 | return; |
| 1701 | } |
| 1702 | if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { |
| 1703 | lck_mtx_lock(&nmp->nm_lock); |
| 1704 | if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) { |
| 1705 | NP(np, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery" , |
| 1706 | error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid); |
| 1707 | nfs_need_recover(nmp, error); |
| 1708 | } |
| 1709 | lck_mtx_unlock(&nmp->nm_lock); |
| 1710 | if (np->n_flag & NREVOKE) { |
| 1711 | error = EIO; |
| 1712 | } else { |
| 1713 | if (error == NFSERR_GRACE) { |
| 1714 | if (cb.rcb_func) { |
| 1715 | /* |
| 1716 | * For an async I/O request, handle a grace delay just like |
| 1717 | * jukebox errors. Set the resend time and queue it up. |
| 1718 | */ |
| 1719 | struct timeval now; |
| 1720 | if (req->r_nmrep.nmc_mhead) { |
| 1721 | mbuf_freem(req->r_nmrep.nmc_mhead); |
| 1722 | req->r_nmrep.nmc_mhead = NULL; |
| 1723 | } |
| 1724 | req->r_error = 0; |
| 1725 | microuptime(&now); |
| 1726 | lck_mtx_lock(&req->r_mtx); |
| 1727 | req->r_resendtime = now.tv_sec + 2; |
| 1728 | req->r_xid = 0; // get a new XID |
| 1729 | req->r_flags |= R_RESTART; |
| 1730 | req->r_start = 0; |
| 1731 | nfs_asyncio_resend(req); |
| 1732 | lck_mtx_unlock(&req->r_mtx); |
| 1733 | if (IS_VALID_CRED(cred)) |
| 1734 | kauth_cred_unref(&cred); |
| 1735 | /* Note: nfsreq reference taken will be dropped later when finished */ |
| 1736 | return; |
| 1737 | } |
| 1738 | /* otherwise, just pause a couple seconds and retry */ |
| 1739 | tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace" , 2*hz); |
| 1740 | } |
| 1741 | if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { |
| 1742 | rlen = 0; |
| 1743 | goto readagain; |
| 1744 | } |
| 1745 | } |
| 1746 | } |
| 1747 | if (error) { |
| 1748 | SET(bp->nb_flags, NB_ERROR); |
| 1749 | bp->nb_error = error; |
| 1750 | goto out; |
| 1751 | } |
| 1752 | |
| 1753 | if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen))) |
| 1754 | bp->nb_endio = offset + rlen; |
| 1755 | |
| 1756 | if ((nfsvers == NFS_VER2) || eof || (rlen == 0)) { |
| 1757 | /* zero out the remaining data (up to EOF) */ |
| 1758 | off_t rpcrem, eofrem, rem; |
| 1759 | rpcrem = (length - rlen); |
| 1760 | eofrem = np->n_size - (NBOFF(bp) + offset + rlen); |
| 1761 | rem = (rpcrem < eofrem) ? rpcrem : eofrem; |
| 1762 | if (rem > 0) |
| 1763 | bzero(bp->nb_data + offset + rlen, rem); |
| 1764 | } else if (((int)rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) { |
| 1765 | /* |
| 1766 | * short read |
| 1767 | * |
| 1768 | * We haven't hit EOF and we didn't get all the data |
| 1769 | * requested, so we need to issue another read for the rest. |
| 1770 | * (Don't bother if the buffer already hit an error.) |
| 1771 | */ |
| 1772 | readagain: |
| 1773 | offset += rlen; |
| 1774 | length -= rlen; |
| 1775 | cb.rcb_args[0] = offset; |
| 1776 | cb.rcb_args[1] = length; |
| 1777 | if (nmp->nm_vers >= NFS_VER4) |
| 1778 | cb.rcb_args[2] = nmp->nm_stategenid; |
| 1779 | error = nmp->nm_funcs->nf_read_rpc_async(np, NBOFF(bp) + offset, length, thd, cred, &cb, &rreq); |
| 1780 | if (!error) { |
| 1781 | if (IS_VALID_CRED(cred)) |
| 1782 | kauth_cred_unref(&cred); |
| 1783 | if (!cb.rcb_func) { |
| 1784 | /* if !async we'll need to wait for this RPC to finish */ |
| 1785 | req = rreq; |
| 1786 | rreq = NULL; |
| 1787 | goto finish; |
| 1788 | } |
| 1789 | nfs_request_rele(req); |
| 1790 | /* |
| 1791 | * We're done here. |
| 1792 | * Outstanding RPC count is unchanged. |
| 1793 | * Callback will be called when RPC is done. |
| 1794 | */ |
| 1795 | return; |
| 1796 | } |
| 1797 | SET(bp->nb_flags, NB_ERROR); |
| 1798 | bp->nb_error = error; |
| 1799 | } |
| 1800 | |
| 1801 | out: |
| 1802 | if (cb.rcb_func) |
| 1803 | nfs_request_rele(req); |
| 1804 | if (IS_VALID_CRED(cred)) |
| 1805 | kauth_cred_unref(&cred); |
| 1806 | |
| 1807 | /* |
| 1808 | * Decrement outstanding RPC count on buffer |
| 1809 | * and call nfs_buf_read_finish on last RPC. |
| 1810 | * |
| 1811 | * (Note: when there are multiple async RPCs issued for a |
| 1812 | * buffer we need nfs_buffer_mutex to avoid problems when |
| 1813 | * aborting a partially-initiated set of RPCs) |
| 1814 | */ |
| 1815 | |
| 1816 | multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC); |
| 1817 | if (multasyncrpc) |
| 1818 | lck_mtx_lock(nfs_buf_mutex); |
| 1819 | |
| 1820 | bp->nb_rpcs--; |
| 1821 | finished = (bp->nb_rpcs == 0); |
| 1822 | |
| 1823 | if (multasyncrpc) |
| 1824 | lck_mtx_unlock(nfs_buf_mutex); |
| 1825 | |
| 1826 | if (finished) { |
| 1827 | if (multasyncrpc) |
| 1828 | wakeme = &bp->nb_rpcs; |
| 1829 | nfs_buf_read_finish(bp); |
| 1830 | if (wakeme) |
| 1831 | wakeup(wakeme); |
| 1832 | } |
| 1833 | } |
| 1834 | |
| 1835 | /* |
| 1836 | * Do buffer readahead. |
| 1837 | * Initiate async I/O to read buffers not in cache. |
| 1838 | */ |
| 1839 | int |
| 1840 | nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn, thread_t thd, kauth_cred_t cred) |
| 1841 | { |
| 1842 | struct nfsmount *nmp = NFSTONMP(np); |
| 1843 | struct nfsbuf *bp; |
| 1844 | int error = 0; |
| 1845 | uint32_t nra; |
| 1846 | |
| 1847 | if (nfs_mount_gone(nmp)) |
| 1848 | return (ENXIO); |
| 1849 | if (nmp->nm_readahead <= 0) |
| 1850 | return (0); |
| 1851 | if (*rabnp > lastrabn) |
| 1852 | return (0); |
| 1853 | |
| 1854 | for (nra = 0; (nra < nmp->nm_readahead) && (*rabnp <= lastrabn); nra++, *rabnp = *rabnp + 1) { |
| 1855 | /* check if block exists and is valid. */ |
| 1856 | if ((*rabnp * nmp->nm_biosize) >= (off_t)np->n_size) { |
| 1857 | /* stop reading ahead if we're beyond EOF */ |
| 1858 | *rabnp = lastrabn; |
| 1859 | break; |
| 1860 | } |
| 1861 | error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp); |
| 1862 | if (error) |
| 1863 | break; |
| 1864 | nfs_node_lock_force(np); |
| 1865 | np->n_lastrahead = *rabnp; |
| 1866 | nfs_node_unlock(np); |
| 1867 | if (!bp) |
| 1868 | continue; |
| 1869 | if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) && |
| 1870 | !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI|NB_NCRDAHEAD))) { |
| 1871 | CLR(bp->nb_flags, NB_CACHE); |
| 1872 | bp->nb_valid = 0; |
| 1873 | bp->nb_validoff = bp->nb_validend = -1; |
| 1874 | } |
| 1875 | if ((bp->nb_dirtyend <= 0) && !bp->nb_dirty && |
| 1876 | !ISSET(bp->nb_flags, (NB_CACHE|NB_DELWRI))) { |
| 1877 | SET(bp->nb_flags, (NB_READ|NB_ASYNC)); |
| 1878 | if (ioflag & IO_NOCACHE) |
| 1879 | SET(bp->nb_flags, NB_NCRDAHEAD); |
| 1880 | if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) { |
| 1881 | kauth_cred_ref(cred); |
| 1882 | bp->nb_rcred = cred; |
| 1883 | } |
| 1884 | if ((error = nfs_buf_read(bp))) |
| 1885 | break; |
| 1886 | continue; |
| 1887 | } |
| 1888 | nfs_buf_release(bp, 1); |
| 1889 | } |
| 1890 | return (error); |
| 1891 | } |
| 1892 | |
| 1893 | /* |
| 1894 | * NFS buffer I/O for reading files. |
| 1895 | */ |
| 1896 | int |
| 1897 | nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) |
| 1898 | { |
| 1899 | vnode_t vp = NFSTOV(np); |
| 1900 | struct nfsbuf *bp = NULL; |
| 1901 | struct nfsmount *nmp = VTONMP(vp); |
| 1902 | daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1; |
| 1903 | off_t diff; |
| 1904 | int error = 0, n = 0, on = 0; |
| 1905 | int nfsvers, biosize, modified, readaheads = 0; |
| 1906 | thread_t thd; |
| 1907 | kauth_cred_t cred; |
| 1908 | int64_t io_resid; |
| 1909 | |
| 1910 | FSDBG_TOP(514, np, uio_offset(uio), uio_resid(uio), ioflag); |
| 1911 | |
| 1912 | nfsvers = nmp->nm_vers; |
| 1913 | biosize = nmp->nm_biosize; |
| 1914 | thd = vfs_context_thread(ctx); |
| 1915 | cred = vfs_context_ucred(ctx); |
| 1916 | |
| 1917 | if (vnode_vtype(vp) != VREG) { |
| 1918 | printf("nfs_bioread: type %x unexpected\n" , vnode_vtype(vp)); |
| 1919 | FSDBG_BOT(514, np, 0xd1e0016, 0, EINVAL); |
| 1920 | return (EINVAL); |
| 1921 | } |
| 1922 | |
| 1923 | /* |
| 1924 | * For NFS, cache consistency can only be maintained approximately. |
| 1925 | * Although RFC1094 does not specify the criteria, the following is |
| 1926 | * believed to be compatible with the reference port. |
| 1927 | * |
| 1928 | * If the file has changed since the last read RPC or you have |
| 1929 | * written to the file, you may have lost data cache consistency |
| 1930 | * with the server. So, check for a change, and flush all of the |
| 1931 | * file's data out of the cache. |
| 1932 | * NB: This implies that cache data can be read when up to |
| 1933 | * NFS_MAXATTRTIMO seconds out of date. If you find that you |
| 1934 | * need current attributes, nfs_getattr() can be forced to fetch |
| 1935 | * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED). |
| 1936 | */ |
| 1937 | |
| 1938 | if (ISSET(np->n_flag, NUPDATESIZE)) |
| 1939 | nfs_data_update_size(np, 0); |
| 1940 | |
| 1941 | if ((error = nfs_node_lock(np))) { |
| 1942 | FSDBG_BOT(514, np, 0xd1e0222, 0, error); |
| 1943 | return (error); |
| 1944 | } |
| 1945 | |
| 1946 | if (np->n_flag & NNEEDINVALIDATE) { |
| 1947 | np->n_flag &= ~NNEEDINVALIDATE; |
| 1948 | nfs_node_unlock(np); |
| 1949 | error = nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); |
| 1950 | if (!error) |
| 1951 | error = nfs_node_lock(np); |
| 1952 | if (error) { |
| 1953 | FSDBG_BOT(514, np, 0xd1e0322, 0, error); |
| 1954 | return (error); |
| 1955 | } |
| 1956 | } |
| 1957 | |
| 1958 | modified = (np->n_flag & NMODIFIED); |
| 1959 | nfs_node_unlock(np); |
| 1960 | /* nfs_getattr() will check changed and purge caches */ |
| 1961 | error = nfs_getattr(np, NULL, ctx, modified ? NGA_UNCACHED : NGA_CACHED); |
| 1962 | if (error) { |
| 1963 | FSDBG_BOT(514, np, 0xd1e0004, 0, error); |
| 1964 | return (error); |
| 1965 | } |
| 1966 | |
| 1967 | if (uio_resid(uio) == 0) { |
| 1968 | FSDBG_BOT(514, np, 0xd1e0001, 0, 0); |
| 1969 | return (0); |
| 1970 | } |
| 1971 | if (uio_offset(uio) < 0) { |
| 1972 | FSDBG_BOT(514, np, 0xd1e0002, 0, EINVAL); |
| 1973 | return (EINVAL); |
| 1974 | } |
| 1975 | |
| 1976 | /* |
| 1977 | * set up readahead - which may be limited by: |
| 1978 | * + current request length (for IO_NOCACHE) |
| 1979 | * + readahead setting |
| 1980 | * + file size |
| 1981 | */ |
| 1982 | if (nmp->nm_readahead > 0) { |
| 1983 | off_t end = uio_offset(uio) + uio_resid(uio); |
| 1984 | if (end > (off_t)np->n_size) |
| 1985 | end = np->n_size; |
| 1986 | rabn = uio_offset(uio) / biosize; |
| 1987 | maxrabn = (end - 1) / biosize; |
| 1988 | nfs_node_lock_force(np); |
| 1989 | if (!(ioflag & IO_NOCACHE) && |
| 1990 | (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread+1)))) { |
| 1991 | maxrabn += nmp->nm_readahead; |
| 1992 | if ((maxrabn * biosize) >= (off_t)np->n_size) |
| 1993 | maxrabn = ((off_t)np->n_size - 1)/biosize; |
| 1994 | } |
| 1995 | if (maxrabn < np->n_lastrahead) |
| 1996 | np->n_lastrahead = -1; |
| 1997 | if (rabn < np->n_lastrahead) |
| 1998 | rabn = np->n_lastrahead + 1; |
| 1999 | nfs_node_unlock(np); |
| 2000 | } else { |
| 2001 | rabn = maxrabn = 0; |
| 2002 | } |
| 2003 | |
| 2004 | do { |
| 2005 | |
| 2006 | nfs_data_lock(np, NFS_DATA_LOCK_SHARED); |
| 2007 | lbn = uio_offset(uio) / biosize; |
| 2008 | |
| 2009 | /* |
| 2010 | * Copy directly from any cached pages without grabbing the bufs. |
| 2011 | * (If we are NOCACHE and we've issued readahead requests, we need |
| 2012 | * to grab the NB_NCRDAHEAD bufs to drop them.) |
| 2013 | */ |
| 2014 | if ((!(ioflag & IO_NOCACHE) || !readaheads) && |
| 2015 | ((uio->uio_segflg == UIO_USERSPACE32 || |
| 2016 | uio->uio_segflg == UIO_USERSPACE64 || |
| 2017 | uio->uio_segflg == UIO_USERSPACE))) { |
| 2018 | io_resid = uio_resid(uio); |
| 2019 | diff = np->n_size - uio_offset(uio); |
| 2020 | if (diff < io_resid) |
| 2021 | io_resid = diff; |
| 2022 | if (io_resid > 0) { |
| 2023 | int count = (io_resid > INT_MAX) ? INT_MAX : io_resid; |
| 2024 | error = cluster_copy_ubc_data(vp, uio, &count, 0); |
| 2025 | if (error) { |
| 2026 | nfs_data_unlock(np); |
| 2027 | FSDBG_BOT(514, np, uio_offset(uio), 0xcacefeed, error); |
| 2028 | return (error); |
| 2029 | } |
| 2030 | } |
| 2031 | /* count any biocache reads that we just copied directly */ |
| 2032 | if (lbn != (uio_offset(uio)/biosize)) { |
| 2033 | OSAddAtomic64((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads); |
| 2034 | FSDBG(514, np, 0xcacefeed, uio_offset(uio), error); |
| 2035 | } |
| 2036 | } |
| 2037 | |
| 2038 | lbn = uio_offset(uio) / biosize; |
| 2039 | on = uio_offset(uio) % biosize; |
| 2040 | nfs_node_lock_force(np); |
| 2041 | np->n_lastread = (uio_offset(uio) - 1) / biosize; |
| 2042 | nfs_node_unlock(np); |
| 2043 | |
| 2044 | if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) { |
| 2045 | nfs_data_unlock(np); |
| 2046 | FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa); |
| 2047 | return (0); |
| 2048 | } |
| 2049 | |
| 2050 | /* adjust readahead block number, if necessary */ |
| 2051 | if (rabn < lbn) |
| 2052 | rabn = lbn; |
| 2053 | lastrabn = MIN(maxrabn, lbn + nmp->nm_readahead); |
| 2054 | if (rabn <= lastrabn) { /* start readaheads */ |
| 2055 | error = nfs_buf_readahead(np, ioflag, &rabn, lastrabn, thd, cred); |
| 2056 | if (error) { |
| 2057 | nfs_data_unlock(np); |
| 2058 | FSDBG_BOT(514, np, 0xd1e000b, 1, error); |
| 2059 | return (error); |
| 2060 | } |
| 2061 | readaheads = 1; |
| 2062 | } |
| 2063 | |
| 2064 | OSAddAtomic64(1, &nfsstats.biocache_reads); |
| 2065 | |
| 2066 | /* |
| 2067 | * If the block is in the cache and has the required data |
| 2068 | * in a valid region, just copy it out. |
| 2069 | * Otherwise, get the block and write back/read in, |
| 2070 | * as required. |
| 2071 | */ |
| 2072 | again: |
| 2073 | io_resid = uio_resid(uio); |
| 2074 | n = (io_resid > (biosize - on)) ? (biosize - on) : io_resid; |
| 2075 | diff = np->n_size - uio_offset(uio); |
| 2076 | if (diff < n) |
| 2077 | n = diff; |
| 2078 | |
| 2079 | error = nfs_buf_get(np, lbn, biosize, thd, NBLK_READ, &bp); |
| 2080 | if (error) { |
| 2081 | nfs_data_unlock(np); |
| 2082 | FSDBG_BOT(514, np, 0xd1e000c, 0, error); |
| 2083 | return (error); |
| 2084 | } |
| 2085 | |
| 2086 | if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE)) { |
| 2087 | /* |
| 2088 | * IO_NOCACHE found a cached buffer. |
| 2089 | * Flush the buffer if it's dirty. |
| 2090 | * Invalidate the data if it wasn't just read |
| 2091 | * in as part of a "nocache readahead". |
| 2092 | */ |
| 2093 | if (bp->nb_dirty || (bp->nb_dirtyend > 0)) { |
| 2094 | /* so write the buffer out and try again */ |
| 2095 | SET(bp->nb_flags, NB_NOCACHE); |
| 2096 | goto flushbuffer; |
| 2097 | } |
| 2098 | if (ISSET(bp->nb_flags, NB_NCRDAHEAD)) { |
| 2099 | CLR(bp->nb_flags, NB_NCRDAHEAD); |
| 2100 | SET(bp->nb_flags, NB_NOCACHE); |
| 2101 | } |
| 2102 | } |
| 2103 | |
| 2104 | /* if any pages are valid... */ |
| 2105 | if (bp->nb_valid) { |
| 2106 | /* ...check for any invalid pages in the read range */ |
| 2107 | int pg, firstpg, lastpg, dirtypg; |
| 2108 | dirtypg = firstpg = lastpg = -1; |
| 2109 | pg = on/PAGE_SIZE; |
| 2110 | while (pg <= (on + n - 1)/PAGE_SIZE) { |
| 2111 | if (!NBPGVALID(bp,pg)) { |
| 2112 | if (firstpg < 0) |
| 2113 | firstpg = pg; |
| 2114 | lastpg = pg; |
| 2115 | } else if (firstpg >= 0 && dirtypg < 0 && NBPGDIRTY(bp,pg)) |
| 2116 | dirtypg = pg; |
| 2117 | pg++; |
| 2118 | } |
| 2119 | |
| 2120 | /* if there are no invalid pages, we're all set */ |
| 2121 | if (firstpg < 0) { |
| 2122 | if (bp->nb_validoff < 0) { |
| 2123 | /* valid range isn't set up, so */ |
| 2124 | /* set it to what we know is valid */ |
| 2125 | bp->nb_validoff = trunc_page(on); |
| 2126 | bp->nb_validend = round_page(on+n); |
| 2127 | nfs_buf_normalize_valid_range(np, bp); |
| 2128 | } |
| 2129 | goto buffer_ready; |
| 2130 | } |
| 2131 | |
| 2132 | /* there are invalid pages in the read range */ |
| 2133 | if (((dirtypg > firstpg) && (dirtypg < lastpg)) || |
| 2134 | (((firstpg*PAGE_SIZE) < bp->nb_dirtyend) && (((lastpg+1)*PAGE_SIZE) > bp->nb_dirtyoff))) { |
| 2135 | /* there are also dirty page(s) (or range) in the read range, */ |
| 2136 | /* so write the buffer out and try again */ |
| 2137 | flushbuffer: |
| 2138 | CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); |
| 2139 | SET(bp->nb_flags, NB_ASYNC); |
| 2140 | if (!IS_VALID_CRED(bp->nb_wcred)) { |
| 2141 | kauth_cred_ref(cred); |
| 2142 | bp->nb_wcred = cred; |
| 2143 | } |
| 2144 | error = nfs_buf_write(bp); |
| 2145 | if (error) { |
| 2146 | nfs_data_unlock(np); |
| 2147 | FSDBG_BOT(514, np, 0xd1e000d, 0, error); |
| 2148 | return (error); |
| 2149 | } |
| 2150 | goto again; |
| 2151 | } |
| 2152 | if (!bp->nb_dirty && bp->nb_dirtyend <= 0 && |
| 2153 | (lastpg - firstpg + 1) > (biosize/PAGE_SIZE)/2) { |
| 2154 | /* we need to read in more than half the buffer and the */ |
| 2155 | /* buffer's not dirty, so just fetch the whole buffer */ |
| 2156 | bp->nb_valid = 0; |
| 2157 | } else { |
| 2158 | /* read the page range in */ |
| 2159 | uio_t auio; |
| 2160 | char uio_buf[ UIO_SIZEOF(1) ]; |
| 2161 | |
| 2162 | NFS_BUF_MAP(bp); |
| 2163 | auio = uio_createwithbuffer(1, (NBOFF(bp) + firstpg * PAGE_SIZE_64), |
| 2164 | UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); |
| 2165 | if (!auio) { |
| 2166 | error = ENOMEM; |
| 2167 | } else { |
| 2168 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + (firstpg * PAGE_SIZE)), |
| 2169 | ((lastpg - firstpg + 1) * PAGE_SIZE)); |
| 2170 | error = nfs_read_rpc(np, auio, ctx); |
| 2171 | } |
| 2172 | if (error) { |
| 2173 | if (ioflag & IO_NOCACHE) |
| 2174 | SET(bp->nb_flags, NB_NOCACHE); |
| 2175 | nfs_buf_release(bp, 1); |
| 2176 | nfs_data_unlock(np); |
| 2177 | FSDBG_BOT(514, np, 0xd1e000e, 0, error); |
| 2178 | return (error); |
| 2179 | } |
| 2180 | /* Make sure that the valid range is set to cover this read. */ |
| 2181 | bp->nb_validoff = trunc_page_32(on); |
| 2182 | bp->nb_validend = round_page_32(on+n); |
| 2183 | nfs_buf_normalize_valid_range(np, bp); |
| 2184 | if (uio_resid(auio) > 0) { |
| 2185 | /* if short read, must have hit EOF, */ |
| 2186 | /* so zero the rest of the range */ |
| 2187 | bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio)); |
| 2188 | } |
| 2189 | /* mark the pages (successfully read) as valid */ |
| 2190 | for (pg=firstpg; pg <= lastpg; pg++) |
| 2191 | NBPGVALID_SET(bp,pg); |
| 2192 | } |
| 2193 | } |
| 2194 | /* if no pages are valid, read the whole block */ |
| 2195 | if (!bp->nb_valid) { |
| 2196 | if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) { |
| 2197 | kauth_cred_ref(cred); |
| 2198 | bp->nb_rcred = cred; |
| 2199 | } |
| 2200 | SET(bp->nb_flags, NB_READ); |
| 2201 | CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); |
| 2202 | error = nfs_buf_read(bp); |
| 2203 | if (ioflag & IO_NOCACHE) |
| 2204 | SET(bp->nb_flags, NB_NOCACHE); |
| 2205 | if (error) { |
| 2206 | nfs_data_unlock(np); |
| 2207 | nfs_buf_release(bp, 1); |
| 2208 | FSDBG_BOT(514, np, 0xd1e000f, 0, error); |
| 2209 | return (error); |
| 2210 | } |
| 2211 | } |
| 2212 | buffer_ready: |
| 2213 | /* validate read range against valid range and clip */ |
| 2214 | if (bp->nb_validend > 0) { |
| 2215 | diff = (on >= bp->nb_validend) ? 0 : (bp->nb_validend - on); |
| 2216 | if (diff < n) |
| 2217 | n = diff; |
| 2218 | } |
| 2219 | if (n > 0) { |
| 2220 | NFS_BUF_MAP(bp); |
| 2221 | error = uiomove(bp->nb_data + on, n, uio); |
| 2222 | } |
| 2223 | |
| 2224 | nfs_buf_release(bp, 1); |
| 2225 | nfs_data_unlock(np); |
| 2226 | nfs_node_lock_force(np); |
| 2227 | np->n_lastread = (uio_offset(uio) - 1) / biosize; |
| 2228 | nfs_node_unlock(np); |
| 2229 | } while (error == 0 && uio_resid(uio) > 0 && n > 0); |
| 2230 | FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), error); |
| 2231 | return (error); |
| 2232 | } |
| 2233 | |
| 2234 | /* |
| 2235 | * limit the number of outstanding async I/O writes |
| 2236 | */ |
| 2237 | int |
| 2238 | nfs_async_write_start(struct nfsmount *nmp) |
| 2239 | { |
| 2240 | int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0; |
| 2241 | struct timespec ts = {1, 0}; |
| 2242 | |
| 2243 | if (nfs_max_async_writes <= 0) |
| 2244 | return (0); |
| 2245 | lck_mtx_lock(&nmp->nm_lock); |
| 2246 | while ((nfs_max_async_writes > 0) && (nmp->nm_asyncwrites >= nfs_max_async_writes)) { |
| 2247 | if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) |
| 2248 | break; |
| 2249 | msleep(&nmp->nm_asyncwrites, &nmp->nm_lock, slpflag|(PZERO-1), "nfsasyncwrites" , &ts); |
| 2250 | slpflag = 0; |
| 2251 | } |
| 2252 | if (!error) |
| 2253 | nmp->nm_asyncwrites++; |
| 2254 | lck_mtx_unlock(&nmp->nm_lock); |
| 2255 | return (error); |
| 2256 | } |
| 2257 | void |
| 2258 | nfs_async_write_done(struct nfsmount *nmp) |
| 2259 | { |
| 2260 | if (nmp->nm_asyncwrites <= 0) |
| 2261 | return; |
| 2262 | lck_mtx_lock(&nmp->nm_lock); |
| 2263 | if (nmp->nm_asyncwrites-- >= nfs_max_async_writes) |
| 2264 | wakeup(&nmp->nm_asyncwrites); |
| 2265 | lck_mtx_unlock(&nmp->nm_lock); |
| 2266 | } |
| 2267 | |
| 2268 | /* |
| 2269 | * write (or commit) the given NFS buffer |
| 2270 | * |
| 2271 | * Commit the buffer if we can. |
| 2272 | * Write out any dirty range. |
| 2273 | * If any dirty pages remain, write them out. |
| 2274 | * Mark buffer done. |
| 2275 | * |
| 2276 | * For async requests, all the work beyond sending the initial |
| 2277 | * write RPC is handled in the RPC callback(s). |
| 2278 | */ |
| 2279 | int |
| 2280 | nfs_buf_write(struct nfsbuf *bp) |
| 2281 | { |
| 2282 | int error = 0, oldflags, async; |
| 2283 | nfsnode_t np; |
| 2284 | thread_t thd; |
| 2285 | kauth_cred_t cred; |
| 2286 | proc_t p = current_proc(); |
| 2287 | int iomode, doff, dend, firstpg, lastpg; |
| 2288 | uint32_t pagemask; |
| 2289 | |
| 2290 | FSDBG_TOP(553, bp, NBOFF(bp), bp->nb_flags, 0); |
| 2291 | |
| 2292 | if (!ISSET(bp->nb_lflags, NBL_BUSY)) |
| 2293 | panic("nfs_buf_write: buffer is not busy???" ); |
| 2294 | |
| 2295 | np = bp->nb_np; |
| 2296 | async = ISSET(bp->nb_flags, NB_ASYNC); |
| 2297 | oldflags = bp->nb_flags; |
| 2298 | |
| 2299 | CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); |
| 2300 | if (ISSET(oldflags, NB_DELWRI)) { |
| 2301 | lck_mtx_lock(nfs_buf_mutex); |
| 2302 | nfs_nbdwrite--; |
| 2303 | NFSBUFCNTCHK(); |
| 2304 | lck_mtx_unlock(nfs_buf_mutex); |
| 2305 | wakeup(&nfs_nbdwrite); |
| 2306 | } |
| 2307 | |
| 2308 | /* move to clean list */ |
| 2309 | if (ISSET(oldflags, (NB_ASYNC|NB_DELWRI))) { |
| 2310 | lck_mtx_lock(nfs_buf_mutex); |
| 2311 | if (bp->nb_vnbufs.le_next != NFSNOLIST) |
| 2312 | LIST_REMOVE(bp, nb_vnbufs); |
| 2313 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
| 2314 | lck_mtx_unlock(nfs_buf_mutex); |
| 2315 | } |
| 2316 | nfs_node_lock_force(np); |
| 2317 | np->n_numoutput++; |
| 2318 | nfs_node_unlock(np); |
| 2319 | vnode_startwrite(NFSTOV(np)); |
| 2320 | |
| 2321 | if (p && p->p_stats) |
| 2322 | OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); |
| 2323 | |
| 2324 | cred = bp->nb_wcred; |
| 2325 | if (!IS_VALID_CRED(cred) && ISSET(bp->nb_flags, NB_READ)) |
| 2326 | cred = bp->nb_rcred; /* shouldn't really happen, but... */ |
| 2327 | if (IS_VALID_CRED(cred)) |
| 2328 | kauth_cred_ref(cred); |
| 2329 | thd = async ? NULL : current_thread(); |
| 2330 | |
| 2331 | /* We need to make sure the pages are locked before doing I/O. */ |
| 2332 | if (!ISSET(bp->nb_flags, NB_META)) { |
| 2333 | if (UBCINFOEXISTS(NFSTOV(np))) { |
| 2334 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) { |
| 2335 | error = nfs_buf_upl_setup(bp); |
| 2336 | if (error) { |
| 2337 | printf("nfs_buf_write: upl create failed %d\n" , error); |
| 2338 | SET(bp->nb_flags, NB_ERROR); |
| 2339 | bp->nb_error = error = EIO; |
| 2340 | nfs_buf_iodone(bp); |
| 2341 | goto out; |
| 2342 | } |
| 2343 | nfs_buf_upl_check(bp); |
| 2344 | } |
| 2345 | } else { |
| 2346 | /* We should never be in nfs_buf_write() with no UBCINFO. */ |
| 2347 | printf("nfs_buf_write: ubcinfo already gone\n" ); |
| 2348 | SET(bp->nb_flags, NB_ERROR); |
| 2349 | bp->nb_error = error = EIO; |
| 2350 | nfs_buf_iodone(bp); |
| 2351 | goto out; |
| 2352 | } |
| 2353 | } |
| 2354 | |
| 2355 | /* If NB_NEEDCOMMIT is set, a commit RPC may do the trick. */ |
| 2356 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) |
| 2357 | nfs_buf_check_write_verifier(np, bp); |
| 2358 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
| 2359 | struct nfsmount *nmp = NFSTONMP(np); |
| 2360 | if (nfs_mount_gone(nmp)) { |
| 2361 | SET(bp->nb_flags, NB_ERROR); |
| 2362 | bp->nb_error = error = EIO; |
| 2363 | nfs_buf_iodone(bp); |
| 2364 | goto out; |
| 2365 | } |
| 2366 | SET(bp->nb_flags, NB_WRITEINPROG); |
| 2367 | error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp) + bp->nb_dirtyoff, |
| 2368 | bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred, bp->nb_verf); |
| 2369 | CLR(bp->nb_flags, NB_WRITEINPROG); |
| 2370 | if (error) { |
| 2371 | if (error != NFSERR_STALEWRITEVERF) { |
| 2372 | SET(bp->nb_flags, NB_ERROR); |
| 2373 | bp->nb_error = error; |
| 2374 | } |
| 2375 | nfs_buf_iodone(bp); |
| 2376 | goto out; |
| 2377 | } |
| 2378 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 2379 | CLR(bp->nb_flags, NB_NEEDCOMMIT); |
| 2380 | nfs_node_lock_force(np); |
| 2381 | np->n_needcommitcnt--; |
| 2382 | CHECK_NEEDCOMMITCNT(np); |
| 2383 | nfs_node_unlock(np); |
| 2384 | } |
| 2385 | if (!error && (bp->nb_dirtyend > 0)) { |
| 2386 | /* sanity check the dirty range */ |
| 2387 | if (NBOFF(bp) + bp->nb_dirtyend > (off_t) np->n_size) { |
| 2388 | bp->nb_dirtyend = np->n_size - NBOFF(bp); |
| 2389 | if (bp->nb_dirtyoff >= bp->nb_dirtyend) |
| 2390 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 2391 | } |
| 2392 | } |
| 2393 | if (!error && (bp->nb_dirtyend > 0)) { |
| 2394 | /* there's a dirty range that needs to be written out */ |
| 2395 | NFS_BUF_MAP(bp); |
| 2396 | |
| 2397 | doff = bp->nb_dirtyoff; |
| 2398 | dend = bp->nb_dirtyend; |
| 2399 | |
| 2400 | /* if doff page is dirty, move doff to start of page */ |
| 2401 | if (NBPGDIRTY(bp, doff / PAGE_SIZE)) |
| 2402 | doff -= doff & PAGE_MASK; |
| 2403 | /* try to expand write range to include preceding dirty pages */ |
| 2404 | if (!(doff & PAGE_MASK)) |
| 2405 | while ((doff > 0) && NBPGDIRTY(bp, (doff - 1) / PAGE_SIZE)) |
| 2406 | doff -= PAGE_SIZE; |
| 2407 | /* if dend page is dirty, move dend to start of next page */ |
| 2408 | if ((dend & PAGE_MASK) && NBPGDIRTY(bp, dend / PAGE_SIZE)) |
| 2409 | dend = round_page_32(dend); |
| 2410 | /* try to expand write range to include trailing dirty pages */ |
| 2411 | if (!(dend & PAGE_MASK)) |
| 2412 | while ((dend < (int)bp->nb_bufsize) && NBPGDIRTY(bp, dend / PAGE_SIZE)) |
| 2413 | dend += PAGE_SIZE; |
| 2414 | /* make sure to keep dend clipped to EOF */ |
| 2415 | if ((NBOFF(bp) + dend) > (off_t) np->n_size) |
| 2416 | dend = np->n_size - NBOFF(bp); |
| 2417 | /* calculate range of complete pages being written */ |
| 2418 | firstpg = round_page_32(doff) / PAGE_SIZE; |
| 2419 | lastpg = (trunc_page_32(dend) - 1) / PAGE_SIZE; |
| 2420 | /* calculate mask for that page range */ |
| 2421 | pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); |
| 2422 | |
| 2423 | /* |
| 2424 | * compare page mask to nb_dirty; if there are other dirty pages |
| 2425 | * then write FILESYNC; otherwise, write UNSTABLE if async and |
| 2426 | * not needcommit/stable; otherwise write FILESYNC |
| 2427 | */ |
| 2428 | if (bp->nb_dirty & ~pagemask) |
| 2429 | iomode = NFS_WRITE_FILESYNC; |
| 2430 | else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_STABLE)) == NB_ASYNC) |
| 2431 | iomode = NFS_WRITE_UNSTABLE; |
| 2432 | else |
| 2433 | iomode = NFS_WRITE_FILESYNC; |
| 2434 | |
| 2435 | /* write the whole contiguous dirty range */ |
| 2436 | bp->nb_offio = doff; |
| 2437 | bp->nb_endio = dend; |
| 2438 | |
| 2439 | OSAddAtomic64(1, &nfsstats.write_bios); |
| 2440 | |
| 2441 | SET(bp->nb_flags, NB_WRITEINPROG); |
| 2442 | error = nfs_buf_write_rpc(bp, iomode, thd, cred); |
| 2443 | /* |
| 2444 | * For async I/O, the callbacks will finish up the |
| 2445 | * write and push out any dirty pages. Otherwise, |
| 2446 | * the write has already been finished and any dirty |
| 2447 | * pages pushed out. |
| 2448 | */ |
| 2449 | } else { |
| 2450 | if (!error && bp->nb_dirty) /* write out any dirty pages */ |
| 2451 | error = nfs_buf_write_dirty_pages(bp, thd, cred); |
| 2452 | nfs_buf_iodone(bp); |
| 2453 | } |
| 2454 | /* note: bp is still valid only for !async case */ |
| 2455 | out: |
| 2456 | if (!async) { |
| 2457 | error = nfs_buf_iowait(bp); |
| 2458 | /* move to clean list */ |
| 2459 | if (oldflags & NB_DELWRI) { |
| 2460 | lck_mtx_lock(nfs_buf_mutex); |
| 2461 | if (bp->nb_vnbufs.le_next != NFSNOLIST) |
| 2462 | LIST_REMOVE(bp, nb_vnbufs); |
| 2463 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
| 2464 | lck_mtx_unlock(nfs_buf_mutex); |
| 2465 | } |
| 2466 | FSDBG_BOT(553, bp, NBOFF(bp), bp->nb_flags, error); |
| 2467 | nfs_buf_release(bp, 1); |
| 2468 | /* check if we need to invalidate (and we can) */ |
| 2469 | if ((np->n_flag & NNEEDINVALIDATE) && |
| 2470 | !(np->n_bflag & (NBINVALINPROG|NBFLUSHINPROG))) { |
| 2471 | int invalidate = 0; |
| 2472 | nfs_node_lock_force(np); |
| 2473 | if (np->n_flag & NNEEDINVALIDATE) { |
| 2474 | invalidate = 1; |
| 2475 | np->n_flag &= ~NNEEDINVALIDATE; |
| 2476 | } |
| 2477 | nfs_node_unlock(np); |
| 2478 | if (invalidate) { |
| 2479 | /* |
| 2480 | * There was a write error and we need to |
| 2481 | * invalidate attrs and flush buffers in |
| 2482 | * order to sync up with the server. |
| 2483 | * (if this write was extending the file, |
| 2484 | * we may no longer know the correct size) |
| 2485 | * |
| 2486 | * But we couldn't call vinvalbuf while holding |
| 2487 | * the buffer busy. So we call vinvalbuf() after |
| 2488 | * releasing the buffer. |
| 2489 | */ |
| 2490 | nfs_vinvalbuf2(NFSTOV(np), V_SAVE|V_IGNORE_WRITEERR, thd, cred, 1); |
| 2491 | } |
| 2492 | } |
| 2493 | } |
| 2494 | |
| 2495 | if (IS_VALID_CRED(cred)) |
| 2496 | kauth_cred_unref(&cred); |
| 2497 | return (error); |
| 2498 | } |
| 2499 | |
| 2500 | /* |
| 2501 | * finish the writing of a buffer |
| 2502 | */ |
| 2503 | void |
| 2504 | nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) |
| 2505 | { |
| 2506 | nfsnode_t np = bp->nb_np; |
| 2507 | int error = (bp->nb_flags & NB_ERROR) ? bp->nb_error : 0; |
| 2508 | int firstpg, lastpg; |
| 2509 | uint32_t pagemask; |
| 2510 | |
| 2511 | if ((error == EINTR) || (error == ERESTART)) { |
| 2512 | CLR(bp->nb_flags, NB_ERROR); |
| 2513 | SET(bp->nb_flags, NB_EINTR); |
| 2514 | } |
| 2515 | |
| 2516 | if (!error) { |
| 2517 | /* calculate range of complete pages being written */ |
| 2518 | firstpg = round_page_32(bp->nb_offio) / PAGE_SIZE; |
| 2519 | lastpg = (trunc_page_32(bp->nb_endio) - 1) / PAGE_SIZE; |
| 2520 | /* calculate mask for that page range written */ |
| 2521 | pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); |
| 2522 | /* clear dirty bits for pages we've written */ |
| 2523 | bp->nb_dirty &= ~pagemask; |
| 2524 | } |
| 2525 | |
| 2526 | /* manage needcommit state */ |
| 2527 | if (!error && (bp->nb_commitlevel == NFS_WRITE_UNSTABLE)) { |
| 2528 | if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
| 2529 | nfs_node_lock_force(np); |
| 2530 | np->n_needcommitcnt++; |
| 2531 | nfs_node_unlock(np); |
| 2532 | SET(bp->nb_flags, NB_NEEDCOMMIT); |
| 2533 | } |
| 2534 | /* make sure nb_dirtyoff/nb_dirtyend reflect actual range written */ |
| 2535 | bp->nb_dirtyoff = bp->nb_offio; |
| 2536 | bp->nb_dirtyend = bp->nb_endio; |
| 2537 | } else if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
| 2538 | nfs_node_lock_force(np); |
| 2539 | np->n_needcommitcnt--; |
| 2540 | CHECK_NEEDCOMMITCNT(np); |
| 2541 | nfs_node_unlock(np); |
| 2542 | CLR(bp->nb_flags, NB_NEEDCOMMIT); |
| 2543 | } |
| 2544 | |
| 2545 | CLR(bp->nb_flags, NB_WRITEINPROG); |
| 2546 | |
| 2547 | /* |
| 2548 | * For an unstable write, the buffer is still treated as dirty until |
| 2549 | * a commit (or stable (re)write) is performed. Buffers needing only |
| 2550 | * a commit are marked with the NB_DELWRI and NB_NEEDCOMMIT flags. |
| 2551 | * |
| 2552 | * If the write was interrupted we set NB_EINTR. Don't set NB_ERROR |
| 2553 | * because that would cause the buffer to be dropped. The buffer is |
| 2554 | * still valid and simply needs to be written again. |
| 2555 | */ |
| 2556 | if ((error == EINTR) || (error == ERESTART) || (!error && (bp->nb_flags & NB_NEEDCOMMIT))) { |
| 2557 | CLR(bp->nb_flags, NB_INVAL); |
| 2558 | if (!ISSET(bp->nb_flags, NB_DELWRI)) { |
| 2559 | SET(bp->nb_flags, NB_DELWRI); |
| 2560 | lck_mtx_lock(nfs_buf_mutex); |
| 2561 | nfs_nbdwrite++; |
| 2562 | NFSBUFCNTCHK(); |
| 2563 | lck_mtx_unlock(nfs_buf_mutex); |
| 2564 | } |
| 2565 | /* |
| 2566 | * Since for the NB_ASYNC case, we've reassigned the buffer to the |
| 2567 | * clean list, we have to reassign it back to the dirty one. Ugh. |
| 2568 | */ |
| 2569 | if (ISSET(bp->nb_flags, NB_ASYNC)) { |
| 2570 | /* move to dirty list */ |
| 2571 | lck_mtx_lock(nfs_buf_mutex); |
| 2572 | if (bp->nb_vnbufs.le_next != NFSNOLIST) |
| 2573 | LIST_REMOVE(bp, nb_vnbufs); |
| 2574 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 2575 | lck_mtx_unlock(nfs_buf_mutex); |
| 2576 | } |
| 2577 | } else { |
| 2578 | /* either there's an error or we don't need to commit */ |
| 2579 | if (error) { |
| 2580 | /* |
| 2581 | * There was a write error and we need to invalidate |
| 2582 | * attrs and flush buffers in order to sync up with the |
| 2583 | * server. (if this write was extending the file, we |
| 2584 | * may no longer know the correct size) |
| 2585 | * |
| 2586 | * But we can't call vinvalbuf while holding this |
| 2587 | * buffer busy. Set a flag to do it after releasing |
| 2588 | * the buffer. |
| 2589 | */ |
| 2590 | nfs_node_lock_force(np); |
| 2591 | np->n_error = error; |
| 2592 | np->n_flag |= (NWRITEERR | NNEEDINVALIDATE); |
| 2593 | NATTRINVALIDATE(np); |
| 2594 | nfs_node_unlock(np); |
| 2595 | } |
| 2596 | /* clear the dirty range */ |
| 2597 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 2598 | } |
| 2599 | |
| 2600 | if (!error && bp->nb_dirty) |
| 2601 | nfs_buf_write_dirty_pages(bp, thd, cred); |
| 2602 | nfs_buf_iodone(bp); |
| 2603 | } |
| 2604 | |
| 2605 | /* |
| 2606 | * write out any pages marked dirty in a buffer |
| 2607 | * |
| 2608 | * We do use unstable writes and follow up with a commit. |
| 2609 | * If we catch the write verifier changing we'll restart |
| 2610 | * do the writes filesync. |
| 2611 | */ |
| 2612 | int |
| 2613 | nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) |
| 2614 | { |
| 2615 | nfsnode_t np = bp->nb_np; |
| 2616 | struct nfsmount *nmp = NFSTONMP(np); |
| 2617 | int error = 0, commit, iomode, iomode2, len, pg, count, npages, off; |
| 2618 | uint32_t dirty = bp->nb_dirty; |
| 2619 | uint64_t wverf; |
| 2620 | uio_t auio; |
| 2621 | char uio_buf [ UIO_SIZEOF(1) ]; |
| 2622 | |
| 2623 | if (!bp->nb_dirty) |
| 2624 | return (0); |
| 2625 | |
| 2626 | /* there are pages marked dirty that need to be written out */ |
| 2627 | OSAddAtomic64(1, &nfsstats.write_bios); |
| 2628 | NFS_BUF_MAP(bp); |
| 2629 | SET(bp->nb_flags, NB_WRITEINPROG); |
| 2630 | npages = bp->nb_bufsize / PAGE_SIZE; |
| 2631 | iomode = NFS_WRITE_UNSTABLE; |
| 2632 | |
| 2633 | auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, |
| 2634 | &uio_buf, sizeof(uio_buf)); |
| 2635 | |
| 2636 | again: |
| 2637 | dirty = bp->nb_dirty; |
| 2638 | wverf = bp->nb_verf; |
| 2639 | commit = NFS_WRITE_FILESYNC; |
| 2640 | for (pg = 0; pg < npages; pg++) { |
| 2641 | if (!NBPGDIRTY(bp, pg)) |
| 2642 | continue; |
| 2643 | count = 1; |
| 2644 | while (((pg + count) < npages) && NBPGDIRTY(bp, pg + count)) |
| 2645 | count++; |
| 2646 | /* write count pages starting with page pg */ |
| 2647 | off = pg * PAGE_SIZE; |
| 2648 | len = count * PAGE_SIZE; |
| 2649 | /* clip writes to EOF */ |
| 2650 | if (NBOFF(bp) + off + len > (off_t) np->n_size) |
| 2651 | len -= (NBOFF(bp) + off + len) - np->n_size; |
| 2652 | if (len > 0) { |
| 2653 | iomode2 = iomode; |
| 2654 | uio_reset(auio, NBOFF(bp) + off, UIO_SYSSPACE, UIO_WRITE); |
| 2655 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + off), len); |
| 2656 | error = nfs_write_rpc2(np, auio, thd, cred, &iomode2, &bp->nb_verf); |
| 2657 | if (error) |
| 2658 | break; |
| 2659 | if (iomode2 < commit) /* Retain the lowest commitment level returned. */ |
| 2660 | commit = iomode2; |
| 2661 | if ((commit != NFS_WRITE_FILESYNC) && (wverf != bp->nb_verf)) { |
| 2662 | /* verifier changed, redo all the writes filesync */ |
| 2663 | iomode = NFS_WRITE_FILESYNC; |
| 2664 | goto again; |
| 2665 | } |
| 2666 | } |
| 2667 | /* clear dirty bits */ |
| 2668 | while (count--) { |
| 2669 | dirty &= ~(1 << pg); |
| 2670 | if (count) /* leave pg on last page */ |
| 2671 | pg++; |
| 2672 | } |
| 2673 | } |
| 2674 | CLR(bp->nb_flags, NB_WRITEINPROG); |
| 2675 | |
| 2676 | if (!error && (commit != NFS_WRITE_FILESYNC)) { |
| 2677 | error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp), bp->nb_bufsize, cred, wverf); |
| 2678 | if (error == NFSERR_STALEWRITEVERF) { |
| 2679 | /* verifier changed, so we need to restart all the writes */ |
| 2680 | iomode = NFS_WRITE_FILESYNC; |
| 2681 | goto again; |
| 2682 | } |
| 2683 | } |
| 2684 | if (!error) { |
| 2685 | bp->nb_dirty = dirty; |
| 2686 | } else { |
| 2687 | SET(bp->nb_flags, NB_ERROR); |
| 2688 | bp->nb_error = error; |
| 2689 | } |
| 2690 | return (error); |
| 2691 | } |
| 2692 | |
| 2693 | /* |
| 2694 | * initiate the NFS WRITE RPC(s) for a buffer |
| 2695 | */ |
| 2696 | int |
| 2697 | nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred) |
| 2698 | { |
| 2699 | struct nfsmount *nmp; |
| 2700 | nfsnode_t np = bp->nb_np; |
| 2701 | int error = 0, nfsvers, async; |
| 2702 | int offset, nrpcs; |
| 2703 | uint32_t nmwsize, length, len; |
| 2704 | struct nfsreq *req; |
| 2705 | struct nfsreq_cbinfo cb; |
| 2706 | uio_t auio; |
| 2707 | char uio_buf [ UIO_SIZEOF(1) ]; |
| 2708 | |
| 2709 | nmp = NFSTONMP(np); |
| 2710 | if (nfs_mount_gone(nmp)) { |
| 2711 | bp->nb_error = error = ENXIO; |
| 2712 | SET(bp->nb_flags, NB_ERROR); |
| 2713 | nfs_buf_iodone(bp); |
| 2714 | return (error); |
| 2715 | } |
| 2716 | nfsvers = nmp->nm_vers; |
| 2717 | nmwsize = nmp->nm_wsize; |
| 2718 | |
| 2719 | offset = bp->nb_offio; |
| 2720 | length = bp->nb_endio - bp->nb_offio; |
| 2721 | |
| 2722 | /* Note: Can only do async I/O if nfsiods are configured. */ |
| 2723 | async = (bp->nb_flags & NB_ASYNC) && (NFSIOD_MAX > 0); |
| 2724 | bp->nb_commitlevel = NFS_WRITE_FILESYNC; |
| 2725 | cb.rcb_func = async ? nfs_buf_write_rpc_finish : NULL; |
| 2726 | cb.rcb_bp = bp; |
| 2727 | |
| 2728 | if ((nfsvers == NFS_VER2) && ((NBOFF(bp) + bp->nb_endio) > 0xffffffffLL)) { |
| 2729 | bp->nb_error = error = EFBIG; |
| 2730 | SET(bp->nb_flags, NB_ERROR); |
| 2731 | nfs_buf_iodone(bp); |
| 2732 | return (error); |
| 2733 | } |
| 2734 | |
| 2735 | auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, |
| 2736 | UIO_WRITE, &uio_buf, sizeof(uio_buf)); |
| 2737 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); |
| 2738 | |
| 2739 | bp->nb_rpcs = nrpcs = (length + nmwsize - 1) / nmwsize; |
| 2740 | if (async && (nrpcs > 1)) { |
| 2741 | SET(bp->nb_flags, NB_MULTASYNCRPC); |
| 2742 | } else { |
| 2743 | CLR(bp->nb_flags, NB_MULTASYNCRPC); |
| 2744 | } |
| 2745 | |
| 2746 | while (length > 0) { |
| 2747 | if (ISSET(bp->nb_flags, NB_ERROR)) { |
| 2748 | error = bp->nb_error; |
| 2749 | break; |
| 2750 | } |
| 2751 | len = (length > nmwsize) ? nmwsize : length; |
| 2752 | cb.rcb_args[0] = offset; |
| 2753 | cb.rcb_args[1] = len; |
| 2754 | if (nmp->nm_vers >= NFS_VER4) |
| 2755 | cb.rcb_args[2] = nmp->nm_stategenid; |
| 2756 | if (async && ((error = nfs_async_write_start(nmp)))) |
| 2757 | break; |
| 2758 | req = NULL; |
| 2759 | error = nmp->nm_funcs->nf_write_rpc_async(np, auio, len, thd, cred, |
| 2760 | iomode, &cb, &req); |
| 2761 | if (error) { |
| 2762 | if (async) |
| 2763 | nfs_async_write_done(nmp); |
| 2764 | break; |
| 2765 | } |
| 2766 | offset += len; |
| 2767 | length -= len; |
| 2768 | if (async) |
| 2769 | continue; |
| 2770 | nfs_buf_write_rpc_finish(req); |
| 2771 | } |
| 2772 | |
| 2773 | if (length > 0) { |
| 2774 | /* |
| 2775 | * Something bad happened while trying to send the RPCs. |
| 2776 | * Wait for any outstanding requests to complete. |
| 2777 | */ |
| 2778 | bp->nb_error = error; |
| 2779 | SET(bp->nb_flags, NB_ERROR); |
| 2780 | if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) { |
| 2781 | nrpcs = (length + nmwsize - 1) / nmwsize; |
| 2782 | lck_mtx_lock(nfs_buf_mutex); |
| 2783 | bp->nb_rpcs -= nrpcs; |
| 2784 | if (bp->nb_rpcs == 0) { |
| 2785 | /* No RPCs left, so the buffer's done */ |
| 2786 | lck_mtx_unlock(nfs_buf_mutex); |
| 2787 | nfs_buf_write_finish(bp, thd, cred); |
| 2788 | } else { |
| 2789 | /* wait for the last RPC to mark it done */ |
| 2790 | while (bp->nb_rpcs > 0) |
| 2791 | msleep(&bp->nb_rpcs, nfs_buf_mutex, 0, |
| 2792 | "nfs_buf_write_rpc_cancel" , NULL); |
| 2793 | lck_mtx_unlock(nfs_buf_mutex); |
| 2794 | } |
| 2795 | } else { |
| 2796 | nfs_buf_write_finish(bp, thd, cred); |
| 2797 | } |
| 2798 | /* It may have just been an interrupt... that's OK */ |
| 2799 | if (!ISSET(bp->nb_flags, NB_ERROR)) |
| 2800 | error = 0; |
| 2801 | } |
| 2802 | |
| 2803 | return (error); |
| 2804 | } |
| 2805 | |
| 2806 | /* |
| 2807 | * finish up an NFS WRITE RPC on a buffer |
| 2808 | */ |
| 2809 | void |
| 2810 | nfs_buf_write_rpc_finish(struct nfsreq *req) |
| 2811 | { |
| 2812 | int error = 0, nfsvers, offset, length, multasyncrpc, finished; |
| 2813 | int committed = NFS_WRITE_FILESYNC; |
| 2814 | uint64_t wverf = 0; |
| 2815 | size_t rlen; |
| 2816 | void *wakeme = NULL; |
| 2817 | struct nfsreq_cbinfo cb; |
| 2818 | struct nfsreq *wreq = NULL; |
| 2819 | struct nfsbuf *bp; |
| 2820 | struct nfsmount *nmp; |
| 2821 | nfsnode_t np; |
| 2822 | thread_t thd; |
| 2823 | kauth_cred_t cred; |
| 2824 | uio_t auio; |
| 2825 | char uio_buf [ UIO_SIZEOF(1) ]; |
| 2826 | |
| 2827 | finish: |
| 2828 | np = req->r_np; |
| 2829 | thd = req->r_thread; |
| 2830 | cred = req->r_cred; |
| 2831 | if (IS_VALID_CRED(cred)) |
| 2832 | kauth_cred_ref(cred); |
| 2833 | cb = req->r_callback; |
| 2834 | bp = cb.rcb_bp; |
| 2835 | if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */ |
| 2836 | nfs_request_ref(req, 0); |
| 2837 | |
| 2838 | nmp = NFSTONMP(np); |
| 2839 | if (nfs_mount_gone(nmp)) { |
| 2840 | SET(bp->nb_flags, NB_ERROR); |
| 2841 | bp->nb_error = error = ENXIO; |
| 2842 | } |
| 2843 | if (error || ISSET(bp->nb_flags, NB_ERROR)) { |
| 2844 | /* just drop it */ |
| 2845 | nfs_request_async_cancel(req); |
| 2846 | goto out; |
| 2847 | } |
| 2848 | nfsvers = nmp->nm_vers; |
| 2849 | |
| 2850 | offset = cb.rcb_args[0]; |
| 2851 | rlen = length = cb.rcb_args[1]; |
| 2852 | |
| 2853 | /* finish the RPC */ |
| 2854 | error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &committed, &rlen, &wverf); |
| 2855 | if ((error == EINPROGRESS) && cb.rcb_func) { |
| 2856 | /* async request restarted */ |
| 2857 | if (cb.rcb_func) |
| 2858 | nfs_request_rele(req); |
| 2859 | if (IS_VALID_CRED(cred)) |
| 2860 | kauth_cred_unref(&cred); |
| 2861 | return; |
| 2862 | } |
| 2863 | if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { |
| 2864 | lck_mtx_lock(&nmp->nm_lock); |
| 2865 | if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) { |
| 2866 | NP(np, "nfs_buf_write_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery" , |
| 2867 | error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid); |
| 2868 | nfs_need_recover(nmp, error); |
| 2869 | } |
| 2870 | lck_mtx_unlock(&nmp->nm_lock); |
| 2871 | if (np->n_flag & NREVOKE) { |
| 2872 | error = EIO; |
| 2873 | } else { |
| 2874 | if (error == NFSERR_GRACE) { |
| 2875 | if (cb.rcb_func) { |
| 2876 | /* |
| 2877 | * For an async I/O request, handle a grace delay just like |
| 2878 | * jukebox errors. Set the resend time and queue it up. |
| 2879 | */ |
| 2880 | struct timeval now; |
| 2881 | if (req->r_nmrep.nmc_mhead) { |
| 2882 | mbuf_freem(req->r_nmrep.nmc_mhead); |
| 2883 | req->r_nmrep.nmc_mhead = NULL; |
| 2884 | } |
| 2885 | req->r_error = 0; |
| 2886 | microuptime(&now); |
| 2887 | lck_mtx_lock(&req->r_mtx); |
| 2888 | req->r_resendtime = now.tv_sec + 2; |
| 2889 | req->r_xid = 0; // get a new XID |
| 2890 | req->r_flags |= R_RESTART; |
| 2891 | req->r_start = 0; |
| 2892 | nfs_asyncio_resend(req); |
| 2893 | lck_mtx_unlock(&req->r_mtx); |
| 2894 | if (IS_VALID_CRED(cred)) |
| 2895 | kauth_cred_unref(&cred); |
| 2896 | /* Note: nfsreq reference taken will be dropped later when finished */ |
| 2897 | return; |
| 2898 | } |
| 2899 | /* otherwise, just pause a couple seconds and retry */ |
| 2900 | tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace" , 2*hz); |
| 2901 | } |
| 2902 | if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { |
| 2903 | rlen = 0; |
| 2904 | goto writeagain; |
| 2905 | } |
| 2906 | } |
| 2907 | } |
| 2908 | if (error) { |
| 2909 | SET(bp->nb_flags, NB_ERROR); |
| 2910 | bp->nb_error = error; |
| 2911 | } |
| 2912 | if (error || (nfsvers == NFS_VER2)) |
| 2913 | goto out; |
| 2914 | if (rlen <= 0) { |
| 2915 | SET(bp->nb_flags, NB_ERROR); |
| 2916 | bp->nb_error = error = EIO; |
| 2917 | goto out; |
| 2918 | } |
| 2919 | |
| 2920 | /* save lowest commit level returned */ |
| 2921 | if (committed < bp->nb_commitlevel) |
| 2922 | bp->nb_commitlevel = committed; |
| 2923 | |
| 2924 | /* check the write verifier */ |
| 2925 | if (!bp->nb_verf) { |
| 2926 | bp->nb_verf = wverf; |
| 2927 | } else if (bp->nb_verf != wverf) { |
| 2928 | /* verifier changed, so buffer will need to be rewritten */ |
| 2929 | bp->nb_flags |= NB_STALEWVERF; |
| 2930 | bp->nb_commitlevel = NFS_WRITE_UNSTABLE; |
| 2931 | bp->nb_verf = wverf; |
| 2932 | } |
| 2933 | |
| 2934 | /* |
| 2935 | * check for a short write |
| 2936 | * |
| 2937 | * If the server didn't write all the data, then we |
| 2938 | * need to issue another write for the rest of it. |
| 2939 | * (Don't bother if the buffer hit an error or stale wverf.) |
| 2940 | */ |
| 2941 | if (((int)rlen < length) && !(bp->nb_flags & (NB_STALEWVERF|NB_ERROR))) { |
| 2942 | writeagain: |
| 2943 | offset += rlen; |
| 2944 | length -= rlen; |
| 2945 | |
| 2946 | auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, |
| 2947 | UIO_WRITE, &uio_buf, sizeof(uio_buf)); |
| 2948 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); |
| 2949 | |
| 2950 | cb.rcb_args[0] = offset; |
| 2951 | cb.rcb_args[1] = length; |
| 2952 | if (nmp->nm_vers >= NFS_VER4) |
| 2953 | cb.rcb_args[2] = nmp->nm_stategenid; |
| 2954 | |
| 2955 | // XXX iomode should really match the original request |
| 2956 | error = nmp->nm_funcs->nf_write_rpc_async(np, auio, length, thd, cred, |
| 2957 | NFS_WRITE_FILESYNC, &cb, &wreq); |
| 2958 | if (!error) { |
| 2959 | if (IS_VALID_CRED(cred)) |
| 2960 | kauth_cred_unref(&cred); |
| 2961 | if (!cb.rcb_func) { |
| 2962 | /* if !async we'll need to wait for this RPC to finish */ |
| 2963 | req = wreq; |
| 2964 | wreq = NULL; |
| 2965 | goto finish; |
| 2966 | } |
| 2967 | nfs_request_rele(req); |
| 2968 | /* |
| 2969 | * We're done here. |
| 2970 | * Outstanding RPC count is unchanged. |
| 2971 | * Callback will be called when RPC is done. |
| 2972 | */ |
| 2973 | return; |
| 2974 | } |
| 2975 | SET(bp->nb_flags, NB_ERROR); |
| 2976 | bp->nb_error = error; |
| 2977 | } |
| 2978 | |
| 2979 | out: |
| 2980 | if (cb.rcb_func) { |
| 2981 | nfs_async_write_done(nmp); |
| 2982 | nfs_request_rele(req); |
| 2983 | } |
| 2984 | /* |
| 2985 | * Decrement outstanding RPC count on buffer |
| 2986 | * and call nfs_buf_write_finish on last RPC. |
| 2987 | * |
| 2988 | * (Note: when there are multiple async RPCs issued for a |
| 2989 | * buffer we need nfs_buffer_mutex to avoid problems when |
| 2990 | * aborting a partially-initiated set of RPCs) |
| 2991 | */ |
| 2992 | multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC); |
| 2993 | if (multasyncrpc) |
| 2994 | lck_mtx_lock(nfs_buf_mutex); |
| 2995 | |
| 2996 | bp->nb_rpcs--; |
| 2997 | finished = (bp->nb_rpcs == 0); |
| 2998 | |
| 2999 | if (multasyncrpc) |
| 3000 | lck_mtx_unlock(nfs_buf_mutex); |
| 3001 | |
| 3002 | if (finished) { |
| 3003 | if (multasyncrpc) |
| 3004 | wakeme = &bp->nb_rpcs; |
| 3005 | nfs_buf_write_finish(bp, thd, cred); |
| 3006 | if (wakeme) |
| 3007 | wakeup(wakeme); |
| 3008 | } |
| 3009 | |
| 3010 | if (IS_VALID_CRED(cred)) |
| 3011 | kauth_cred_unref(&cred); |
| 3012 | } |
| 3013 | |
| 3014 | /* |
| 3015 | * Send commit(s) for the given node's "needcommit" buffers |
| 3016 | */ |
| 3017 | int |
| 3018 | nfs_flushcommits(nfsnode_t np, int nowait) |
| 3019 | { |
| 3020 | struct nfsmount *nmp; |
| 3021 | struct nfsbuf *bp, *prevlbp, *lbp; |
| 3022 | struct nfsbuflists blist, commitlist; |
| 3023 | int error = 0, retv, wcred_set, flags, dirty; |
| 3024 | u_quad_t off, endoff, toff; |
| 3025 | uint64_t wverf; |
| 3026 | u_int32_t count; |
| 3027 | kauth_cred_t wcred = NULL; |
| 3028 | |
| 3029 | FSDBG_TOP(557, np, 0, 0, 0); |
| 3030 | |
| 3031 | /* |
| 3032 | * A nb_flags == (NB_DELWRI | NB_NEEDCOMMIT) block has been written to the |
| 3033 | * server, but nas not been committed to stable storage on the server |
| 3034 | * yet. The byte range is worked out for as many nfsbufs as we can handle |
| 3035 | * and the commit rpc is done. |
| 3036 | */ |
| 3037 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { |
| 3038 | error = nfs_node_lock(np); |
| 3039 | if (error) |
| 3040 | goto done; |
| 3041 | np->n_flag |= NMODIFIED; |
| 3042 | nfs_node_unlock(np); |
| 3043 | } |
| 3044 | |
| 3045 | off = (u_quad_t)-1; |
| 3046 | endoff = 0; |
| 3047 | wcred_set = 0; |
| 3048 | LIST_INIT(&commitlist); |
| 3049 | |
| 3050 | nmp = NFSTONMP(np); |
| 3051 | if (nfs_mount_gone(nmp)) { |
| 3052 | error = ENXIO; |
| 3053 | goto done; |
| 3054 | } |
| 3055 | if (nmp->nm_vers == NFS_VER2) { |
| 3056 | error = EINVAL; |
| 3057 | goto done; |
| 3058 | } |
| 3059 | |
| 3060 | flags = NBI_DIRTY; |
| 3061 | if (nowait) |
| 3062 | flags |= NBI_NOWAIT; |
| 3063 | lck_mtx_lock(nfs_buf_mutex); |
| 3064 | wverf = nmp->nm_verf; |
| 3065 | if (!nfs_buf_iterprepare(np, &blist, flags)) { |
| 3066 | while ((bp = LIST_FIRST(&blist))) { |
| 3067 | LIST_REMOVE(bp, nb_vnbufs); |
| 3068 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 3069 | error = nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0); |
| 3070 | if (error) |
| 3071 | continue; |
| 3072 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) |
| 3073 | nfs_buf_check_write_verifier(np, bp); |
| 3074 | if (((bp->nb_flags & (NB_DELWRI | NB_NEEDCOMMIT)) != (NB_DELWRI | NB_NEEDCOMMIT)) || |
| 3075 | (bp->nb_verf != wverf)) { |
| 3076 | nfs_buf_drop(bp); |
| 3077 | continue; |
| 3078 | } |
| 3079 | nfs_buf_remfree(bp); |
| 3080 | |
| 3081 | /* buffer UPLs will be grabbed *in order* below */ |
| 3082 | |
| 3083 | FSDBG(557, bp, bp->nb_flags, bp->nb_valid, bp->nb_dirty); |
| 3084 | FSDBG(557, bp->nb_validoff, bp->nb_validend, |
| 3085 | bp->nb_dirtyoff, bp->nb_dirtyend); |
| 3086 | |
| 3087 | /* |
| 3088 | * Work out if all buffers are using the same cred |
| 3089 | * so we can deal with them all with one commit. |
| 3090 | * |
| 3091 | * Note: creds in bp's must be obtained by kauth_cred_ref |
| 3092 | * on the same original cred in order for them to be equal. |
| 3093 | */ |
| 3094 | if (wcred_set == 0) { |
| 3095 | wcred = bp->nb_wcred; |
| 3096 | if (!IS_VALID_CRED(wcred)) |
| 3097 | panic("nfs: needcommit w/out wcred" ); |
| 3098 | wcred_set = 1; |
| 3099 | } else if ((wcred_set == 1) && wcred != bp->nb_wcred) { |
| 3100 | wcred_set = -1; |
| 3101 | } |
| 3102 | SET(bp->nb_flags, NB_WRITEINPROG); |
| 3103 | |
| 3104 | /* |
| 3105 | * Add this buffer to the list of buffers we are committing. |
| 3106 | * Buffers are inserted into the list in ascending order so that |
| 3107 | * we can take the UPLs in order after the list is complete. |
| 3108 | */ |
| 3109 | prevlbp = NULL; |
| 3110 | LIST_FOREACH(lbp, &commitlist, nb_vnbufs) { |
| 3111 | if (bp->nb_lblkno < lbp->nb_lblkno) |
| 3112 | break; |
| 3113 | prevlbp = lbp; |
| 3114 | } |
| 3115 | LIST_REMOVE(bp, nb_vnbufs); |
| 3116 | if (prevlbp) |
| 3117 | LIST_INSERT_AFTER(prevlbp, bp, nb_vnbufs); |
| 3118 | else |
| 3119 | LIST_INSERT_HEAD(&commitlist, bp, nb_vnbufs); |
| 3120 | |
| 3121 | /* update commit range start, end */ |
| 3122 | toff = NBOFF(bp) + bp->nb_dirtyoff; |
| 3123 | if (toff < off) |
| 3124 | off = toff; |
| 3125 | toff += (u_quad_t)(bp->nb_dirtyend - bp->nb_dirtyoff); |
| 3126 | if (toff > endoff) |
| 3127 | endoff = toff; |
| 3128 | } |
| 3129 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); |
| 3130 | } |
| 3131 | lck_mtx_unlock(nfs_buf_mutex); |
| 3132 | |
| 3133 | if (LIST_EMPTY(&commitlist)) { |
| 3134 | error = ENOBUFS; |
| 3135 | goto done; |
| 3136 | } |
| 3137 | |
| 3138 | /* |
| 3139 | * We need a UPL to prevent others from accessing the buffers during |
| 3140 | * our commit RPC(s). |
| 3141 | * |
| 3142 | * We used to also check for dirty pages here; if there were any we'd |
| 3143 | * abort the commit and force the entire buffer to be written again. |
| 3144 | * Instead of doing that, we just go ahead and commit the dirty range, |
| 3145 | * and then leave the buffer around with dirty pages that will be |
| 3146 | * written out later. |
| 3147 | */ |
| 3148 | LIST_FOREACH(bp, &commitlist, nb_vnbufs) { |
| 3149 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) { |
| 3150 | retv = nfs_buf_upl_setup(bp); |
| 3151 | if (retv) { |
| 3152 | /* Unable to create the UPL, the VM object probably no longer exists. */ |
| 3153 | printf("nfs_flushcommits: upl create failed %d\n" , retv); |
| 3154 | bp->nb_valid = bp->nb_dirty = 0; |
| 3155 | } |
| 3156 | } |
| 3157 | nfs_buf_upl_check(bp); |
| 3158 | } |
| 3159 | |
| 3160 | /* |
| 3161 | * Commit data on the server, as required. |
| 3162 | * If all bufs are using the same wcred, then use that with |
| 3163 | * one call for all of them, otherwise commit each one |
| 3164 | * separately. |
| 3165 | */ |
| 3166 | if (wcred_set == 1) { |
| 3167 | /* |
| 3168 | * Note, it's possible the commit range could be >2^32-1. |
| 3169 | * If it is, we'll send one commit that covers the whole file. |
| 3170 | */ |
| 3171 | if ((endoff - off) > 0xffffffff) |
| 3172 | count = 0; |
| 3173 | else |
| 3174 | count = (endoff - off); |
| 3175 | retv = nmp->nm_funcs->nf_commit_rpc(np, off, count, wcred, wverf); |
| 3176 | } else { |
| 3177 | retv = 0; |
| 3178 | LIST_FOREACH(bp, &commitlist, nb_vnbufs) { |
| 3179 | toff = NBOFF(bp) + bp->nb_dirtyoff; |
| 3180 | count = bp->nb_dirtyend - bp->nb_dirtyoff; |
| 3181 | retv = nmp->nm_funcs->nf_commit_rpc(np, toff, count, bp->nb_wcred, wverf); |
| 3182 | if (retv) |
| 3183 | break; |
| 3184 | } |
| 3185 | } |
| 3186 | |
| 3187 | /* |
| 3188 | * Now, either mark the blocks I/O done or mark the |
| 3189 | * blocks dirty, depending on whether the commit |
| 3190 | * succeeded. |
| 3191 | */ |
| 3192 | while ((bp = LIST_FIRST(&commitlist))) { |
| 3193 | LIST_REMOVE(bp, nb_vnbufs); |
| 3194 | FSDBG(557, bp, retv, bp->nb_flags, bp->nb_dirty); |
| 3195 | nfs_node_lock_force(np); |
| 3196 | CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_WRITEINPROG)); |
| 3197 | np->n_needcommitcnt--; |
| 3198 | CHECK_NEEDCOMMITCNT(np); |
| 3199 | nfs_node_unlock(np); |
| 3200 | |
| 3201 | if (retv) { |
| 3202 | /* move back to dirty list */ |
| 3203 | lck_mtx_lock(nfs_buf_mutex); |
| 3204 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 3205 | lck_mtx_unlock(nfs_buf_mutex); |
| 3206 | nfs_buf_release(bp, 1); |
| 3207 | continue; |
| 3208 | } |
| 3209 | |
| 3210 | nfs_node_lock_force(np); |
| 3211 | np->n_numoutput++; |
| 3212 | nfs_node_unlock(np); |
| 3213 | vnode_startwrite(NFSTOV(np)); |
| 3214 | if (ISSET(bp->nb_flags, NB_DELWRI)) { |
| 3215 | lck_mtx_lock(nfs_buf_mutex); |
| 3216 | nfs_nbdwrite--; |
| 3217 | NFSBUFCNTCHK(); |
| 3218 | lck_mtx_unlock(nfs_buf_mutex); |
| 3219 | wakeup(&nfs_nbdwrite); |
| 3220 | } |
| 3221 | CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); |
| 3222 | /* if block still has dirty pages, we don't want it to */ |
| 3223 | /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */ |
| 3224 | if (!(dirty = bp->nb_dirty)) |
| 3225 | SET(bp->nb_flags, NB_ASYNC); |
| 3226 | else |
| 3227 | CLR(bp->nb_flags, NB_ASYNC); |
| 3228 | |
| 3229 | /* move to clean list */ |
| 3230 | lck_mtx_lock(nfs_buf_mutex); |
| 3231 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
| 3232 | lck_mtx_unlock(nfs_buf_mutex); |
| 3233 | |
| 3234 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 3235 | |
| 3236 | nfs_buf_iodone(bp); |
| 3237 | if (dirty) { |
| 3238 | /* throw it back in as a delayed write buffer */ |
| 3239 | CLR(bp->nb_flags, NB_DONE); |
| 3240 | nfs_buf_write_delayed(bp); |
| 3241 | } |
| 3242 | } |
| 3243 | |
| 3244 | done: |
| 3245 | FSDBG_BOT(557, np, 0, 0, error); |
| 3246 | return (error); |
| 3247 | } |
| 3248 | |
| 3249 | /* |
| 3250 | * Flush all the blocks associated with a vnode. |
| 3251 | * Walk through the buffer pool and push any dirty pages |
| 3252 | * associated with the vnode. |
| 3253 | */ |
| 3254 | int |
| 3255 | nfs_flush(nfsnode_t np, int waitfor, thread_t thd, int ignore_writeerr) |
| 3256 | { |
| 3257 | struct nfsbuf *bp; |
| 3258 | struct nfsbuflists blist; |
| 3259 | struct nfsmount *nmp = NFSTONMP(np); |
| 3260 | int error = 0, error2, slptimeo = 0, slpflag = 0; |
| 3261 | int nfsvers, flags, passone = 1; |
| 3262 | |
| 3263 | FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0); |
| 3264 | |
| 3265 | if (nfs_mount_gone(nmp)) { |
| 3266 | error = ENXIO; |
| 3267 | goto out; |
| 3268 | } |
| 3269 | nfsvers = nmp->nm_vers; |
| 3270 | if (NMFLAG(nmp, INTR)) |
| 3271 | slpflag = PCATCH; |
| 3272 | |
| 3273 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { |
| 3274 | nfs_node_lock_force(np); |
| 3275 | np->n_flag |= NMODIFIED; |
| 3276 | nfs_node_unlock(np); |
| 3277 | } |
| 3278 | |
| 3279 | lck_mtx_lock(nfs_buf_mutex); |
| 3280 | while (np->n_bflag & NBFLUSHINPROG) { |
| 3281 | np->n_bflag |= NBFLUSHWANT; |
| 3282 | error = msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_flush" , NULL); |
| 3283 | if ((error && (error != EWOULDBLOCK)) || |
| 3284 | ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0)))) { |
| 3285 | lck_mtx_unlock(nfs_buf_mutex); |
| 3286 | goto out; |
| 3287 | } |
| 3288 | } |
| 3289 | np->n_bflag |= NBFLUSHINPROG; |
| 3290 | |
| 3291 | /* |
| 3292 | * On the first pass, start async/unstable writes on all |
| 3293 | * delayed write buffers. Then wait for all writes to complete |
| 3294 | * and call nfs_flushcommits() to commit any uncommitted buffers. |
| 3295 | * On all subsequent passes, start STABLE writes on any remaining |
| 3296 | * dirty buffers. Then wait for all writes to complete. |
| 3297 | */ |
| 3298 | again: |
| 3299 | FSDBG(518, LIST_FIRST(&np->n_dirtyblkhd), np->n_flag, 0, 0); |
| 3300 | if (!NFSTONMP(np)) { |
| 3301 | lck_mtx_unlock(nfs_buf_mutex); |
| 3302 | error = ENXIO; |
| 3303 | goto done; |
| 3304 | } |
| 3305 | |
| 3306 | /* Start/do any write(s) that are required. */ |
| 3307 | if (!nfs_buf_iterprepare(np, &blist, NBI_DIRTY)) { |
| 3308 | while ((bp = LIST_FIRST(&blist))) { |
| 3309 | LIST_REMOVE(bp, nb_vnbufs); |
| 3310 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 3311 | flags = (passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) ? NBAC_NOWAIT : 0; |
| 3312 | if (flags != NBAC_NOWAIT) |
| 3313 | nfs_buf_refget(bp); |
| 3314 | while ((error = nfs_buf_acquire(bp, flags, slpflag, slptimeo))) { |
| 3315 | FSDBG(524, bp, flags, bp->nb_lflags, bp->nb_flags); |
| 3316 | if (error == EBUSY) |
| 3317 | break; |
| 3318 | if (error) { |
| 3319 | error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); |
| 3320 | if (error2) { |
| 3321 | if (flags != NBAC_NOWAIT) |
| 3322 | nfs_buf_refrele(bp); |
| 3323 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); |
| 3324 | lck_mtx_unlock(nfs_buf_mutex); |
| 3325 | error = error2; |
| 3326 | goto done; |
| 3327 | } |
| 3328 | if (slpflag == PCATCH) { |
| 3329 | slpflag = 0; |
| 3330 | slptimeo = 2 * hz; |
| 3331 | } |
| 3332 | } |
| 3333 | } |
| 3334 | if (flags != NBAC_NOWAIT) |
| 3335 | nfs_buf_refrele(bp); |
| 3336 | if (error == EBUSY) |
| 3337 | continue; |
| 3338 | if (!bp->nb_np) { |
| 3339 | /* buffer is no longer valid */ |
| 3340 | nfs_buf_drop(bp); |
| 3341 | continue; |
| 3342 | } |
| 3343 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) |
| 3344 | nfs_buf_check_write_verifier(np, bp); |
| 3345 | if (!ISSET(bp->nb_flags, NB_DELWRI)) { |
| 3346 | /* buffer is no longer dirty */ |
| 3347 | nfs_buf_drop(bp); |
| 3348 | continue; |
| 3349 | } |
| 3350 | FSDBG(525, bp, passone, bp->nb_lflags, bp->nb_flags); |
| 3351 | if ((passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) && |
| 3352 | ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
| 3353 | nfs_buf_drop(bp); |
| 3354 | continue; |
| 3355 | } |
| 3356 | nfs_buf_remfree(bp); |
| 3357 | lck_mtx_unlock(nfs_buf_mutex); |
| 3358 | if (ISSET(bp->nb_flags, NB_ERROR)) { |
| 3359 | nfs_node_lock_force(np); |
| 3360 | np->n_error = bp->nb_error ? bp->nb_error : EIO; |
| 3361 | np->n_flag |= NWRITEERR; |
| 3362 | nfs_node_unlock(np); |
| 3363 | nfs_buf_release(bp, 1); |
| 3364 | lck_mtx_lock(nfs_buf_mutex); |
| 3365 | continue; |
| 3366 | } |
| 3367 | SET(bp->nb_flags, NB_ASYNC); |
| 3368 | if (!passone) { |
| 3369 | /* NB_STABLE forces this to be written FILESYNC */ |
| 3370 | SET(bp->nb_flags, NB_STABLE); |
| 3371 | } |
| 3372 | nfs_buf_write(bp); |
| 3373 | lck_mtx_lock(nfs_buf_mutex); |
| 3374 | } |
| 3375 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); |
| 3376 | } |
| 3377 | lck_mtx_unlock(nfs_buf_mutex); |
| 3378 | |
| 3379 | if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) { |
| 3380 | while ((error = vnode_waitforwrites(NFSTOV(np), 0, slpflag, slptimeo, "nfsflush" ))) { |
| 3381 | error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); |
| 3382 | if (error2) { |
| 3383 | error = error2; |
| 3384 | goto done; |
| 3385 | } |
| 3386 | if (slpflag == PCATCH) { |
| 3387 | slpflag = 0; |
| 3388 | slptimeo = 2 * hz; |
| 3389 | } |
| 3390 | } |
| 3391 | } |
| 3392 | |
| 3393 | if (nfsvers != NFS_VER2) { |
| 3394 | /* loop while it looks like there are still buffers to be */ |
| 3395 | /* commited and nfs_flushcommits() seems to be handling them. */ |
| 3396 | while (np->n_needcommitcnt) |
| 3397 | if (nfs_flushcommits(np, 0)) |
| 3398 | break; |
| 3399 | } |
| 3400 | |
| 3401 | if (passone) { |
| 3402 | passone = 0; |
| 3403 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { |
| 3404 | nfs_node_lock_force(np); |
| 3405 | np->n_flag |= NMODIFIED; |
| 3406 | nfs_node_unlock(np); |
| 3407 | } |
| 3408 | lck_mtx_lock(nfs_buf_mutex); |
| 3409 | goto again; |
| 3410 | } |
| 3411 | |
| 3412 | if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) { |
| 3413 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { |
| 3414 | nfs_node_lock_force(np); |
| 3415 | np->n_flag |= NMODIFIED; |
| 3416 | nfs_node_unlock(np); |
| 3417 | } |
| 3418 | lck_mtx_lock(nfs_buf_mutex); |
| 3419 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) |
| 3420 | goto again; |
| 3421 | lck_mtx_unlock(nfs_buf_mutex); |
| 3422 | nfs_node_lock_force(np); |
| 3423 | /* |
| 3424 | * OK, it looks like there are no dirty blocks. If we have no |
| 3425 | * writes in flight and no one in the write code, we can clear |
| 3426 | * the modified flag. In order to make sure we see the latest |
| 3427 | * attributes and size, we also invalidate the attributes and |
| 3428 | * advance the attribute cache XID to guarantee that attributes |
| 3429 | * newer than our clearing of NMODIFIED will get loaded next. |
| 3430 | * (If we don't do this, it's possible for the flush's final |
| 3431 | * write/commit (xid1) to be executed in parallel with a subsequent |
| 3432 | * getattr request (xid2). The getattr could return attributes |
| 3433 | * from *before* the write/commit completed but the stale attributes |
| 3434 | * would be preferred because of the xid ordering.) |
| 3435 | */ |
| 3436 | if (!np->n_wrbusy && !np->n_numoutput) { |
| 3437 | np->n_flag &= ~NMODIFIED; |
| 3438 | NATTRINVALIDATE(np); |
| 3439 | nfs_get_xid(&np->n_xid); |
| 3440 | } |
| 3441 | } else { |
| 3442 | nfs_node_lock_force(np); |
| 3443 | } |
| 3444 | |
| 3445 | FSDBG(526, np->n_flag, np->n_error, 0, 0); |
| 3446 | if (!ignore_writeerr && (np->n_flag & NWRITEERR)) { |
| 3447 | error = np->n_error; |
| 3448 | np->n_flag &= ~NWRITEERR; |
| 3449 | } |
| 3450 | nfs_node_unlock(np); |
| 3451 | done: |
| 3452 | lck_mtx_lock(nfs_buf_mutex); |
| 3453 | flags = np->n_bflag; |
| 3454 | np->n_bflag &= ~(NBFLUSHINPROG|NBFLUSHWANT); |
| 3455 | lck_mtx_unlock(nfs_buf_mutex); |
| 3456 | if (flags & NBFLUSHWANT) |
| 3457 | wakeup(&np->n_bflag); |
| 3458 | out: |
| 3459 | FSDBG_BOT(517, np, error, ignore_writeerr, 0); |
| 3460 | return (error); |
| 3461 | } |
| 3462 | |
| 3463 | /* |
| 3464 | * Flush out and invalidate all buffers associated with a vnode. |
| 3465 | * Called with the underlying object locked. |
| 3466 | */ |
| 3467 | int |
| 3468 | nfs_vinvalbuf_internal( |
| 3469 | nfsnode_t np, |
| 3470 | int flags, |
| 3471 | thread_t thd, |
| 3472 | kauth_cred_t cred, |
| 3473 | int slpflag, |
| 3474 | int slptimeo) |
| 3475 | { |
| 3476 | struct nfsbuf *bp; |
| 3477 | struct nfsbuflists blist; |
| 3478 | int list, error = 0; |
| 3479 | |
| 3480 | if (flags & V_SAVE) { |
| 3481 | if ((error = nfs_flush(np, MNT_WAIT, thd, (flags & V_IGNORE_WRITEERR)))) |
| 3482 | return (error); |
| 3483 | } |
| 3484 | |
| 3485 | lck_mtx_lock(nfs_buf_mutex); |
| 3486 | for (;;) { |
| 3487 | list = NBI_CLEAN; |
| 3488 | if (nfs_buf_iterprepare(np, &blist, list)) { |
| 3489 | list = NBI_DIRTY; |
| 3490 | if (nfs_buf_iterprepare(np, &blist, list)) |
| 3491 | break; |
| 3492 | } |
| 3493 | while ((bp = LIST_FIRST(&blist))) { |
| 3494 | LIST_REMOVE(bp, nb_vnbufs); |
| 3495 | if (list == NBI_CLEAN) |
| 3496 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
| 3497 | else |
| 3498 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 3499 | nfs_buf_refget(bp); |
| 3500 | while ((error = nfs_buf_acquire(bp, NBAC_REMOVE, slpflag, slptimeo))) { |
| 3501 | FSDBG(556, np, bp, NBOFF(bp), bp->nb_flags); |
| 3502 | if (error != EAGAIN) { |
| 3503 | FSDBG(554, np, bp, -1, error); |
| 3504 | nfs_buf_refrele(bp); |
| 3505 | nfs_buf_itercomplete(np, &blist, list); |
| 3506 | lck_mtx_unlock(nfs_buf_mutex); |
| 3507 | return (error); |
| 3508 | } |
| 3509 | } |
| 3510 | nfs_buf_refrele(bp); |
| 3511 | FSDBG(554, np, bp, NBOFF(bp), bp->nb_flags); |
| 3512 | lck_mtx_unlock(nfs_buf_mutex); |
| 3513 | if ((flags & V_SAVE) && UBCINFOEXISTS(NFSTOV(np)) && bp->nb_np && |
| 3514 | (NBOFF(bp) < (off_t)np->n_size)) { |
| 3515 | /* extra paranoia: make sure we're not */ |
| 3516 | /* somehow leaving any dirty data around */ |
| 3517 | int mustwrite = 0; |
| 3518 | int end = (NBOFF(bp) + bp->nb_bufsize > (off_t)np->n_size) ? |
| 3519 | ((off_t)np->n_size - NBOFF(bp)) : bp->nb_bufsize; |
| 3520 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) { |
| 3521 | error = nfs_buf_upl_setup(bp); |
| 3522 | if (error == EINVAL) { |
| 3523 | /* vm object must no longer exist */ |
| 3524 | /* hopefully we don't need to do */ |
| 3525 | /* anything for this buffer */ |
| 3526 | } else if (error) |
| 3527 | printf("nfs_vinvalbuf: upl setup failed %d\n" , error); |
| 3528 | bp->nb_valid = bp->nb_dirty = 0; |
| 3529 | } |
| 3530 | nfs_buf_upl_check(bp); |
| 3531 | /* check for any dirty data before the EOF */ |
| 3532 | if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) { |
| 3533 | /* clip dirty range to EOF */ |
| 3534 | if (bp->nb_dirtyend > end) { |
| 3535 | bp->nb_dirtyend = end; |
| 3536 | if (bp->nb_dirtyoff >= bp->nb_dirtyend) |
| 3537 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; |
| 3538 | } |
| 3539 | if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) |
| 3540 | mustwrite++; |
| 3541 | } |
| 3542 | bp->nb_dirty &= (1 << (round_page_32(end)/PAGE_SIZE)) - 1; |
| 3543 | if (bp->nb_dirty) |
| 3544 | mustwrite++; |
| 3545 | /* also make sure we'll have a credential to do the write */ |
| 3546 | if (mustwrite && !IS_VALID_CRED(bp->nb_wcred) && !IS_VALID_CRED(cred)) { |
| 3547 | printf("nfs_vinvalbuf: found dirty buffer with no write creds\n" ); |
| 3548 | mustwrite = 0; |
| 3549 | } |
| 3550 | if (mustwrite) { |
| 3551 | FSDBG(554, np, bp, 0xd00dee, bp->nb_flags); |
| 3552 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) |
| 3553 | panic("nfs_vinvalbuf: dirty buffer without upl" ); |
| 3554 | /* gotta write out dirty data before invalidating */ |
| 3555 | /* (NB_STABLE indicates that data writes should be FILESYNC) */ |
| 3556 | /* (NB_NOCACHE indicates buffer should be discarded) */ |
| 3557 | CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC)); |
| 3558 | SET(bp->nb_flags, NB_STABLE | NB_NOCACHE); |
| 3559 | if (!IS_VALID_CRED(bp->nb_wcred)) { |
| 3560 | kauth_cred_ref(cred); |
| 3561 | bp->nb_wcred = cred; |
| 3562 | } |
| 3563 | error = nfs_buf_write(bp); |
| 3564 | // Note: bp has been released |
| 3565 | if (error) { |
| 3566 | FSDBG(554, bp, 0xd00dee, 0xbad, error); |
| 3567 | nfs_node_lock_force(np); |
| 3568 | if ((error != EINTR) && (error != ERESTART)) { |
| 3569 | np->n_error = error; |
| 3570 | np->n_flag |= NWRITEERR; |
| 3571 | } |
| 3572 | /* |
| 3573 | * There was a write error and we need to |
| 3574 | * invalidate attrs to sync with server. |
| 3575 | * (if this write was extending the file, |
| 3576 | * we may no longer know the correct size) |
| 3577 | */ |
| 3578 | NATTRINVALIDATE(np); |
| 3579 | nfs_node_unlock(np); |
| 3580 | if ((error == EINTR) || (error == ERESTART)) { |
| 3581 | /* |
| 3582 | * Abort on EINTR. If we don't, we could |
| 3583 | * be stuck in this loop forever because |
| 3584 | * the buffer will continue to stay dirty. |
| 3585 | */ |
| 3586 | lck_mtx_lock(nfs_buf_mutex); |
| 3587 | nfs_buf_itercomplete(np, &blist, list); |
| 3588 | lck_mtx_unlock(nfs_buf_mutex); |
| 3589 | return (error); |
| 3590 | } |
| 3591 | error = 0; |
| 3592 | } |
| 3593 | lck_mtx_lock(nfs_buf_mutex); |
| 3594 | continue; |
| 3595 | } |
| 3596 | } |
| 3597 | SET(bp->nb_flags, NB_INVAL); |
| 3598 | // hold off on FREEUPs until we're done here |
| 3599 | nfs_buf_release(bp, 0); |
| 3600 | lck_mtx_lock(nfs_buf_mutex); |
| 3601 | } |
| 3602 | nfs_buf_itercomplete(np, &blist, list); |
| 3603 | } |
| 3604 | if (!LIST_EMPTY(&(np)->n_dirtyblkhd) || !LIST_EMPTY(&(np)->n_cleanblkhd)) |
| 3605 | panic("nfs_vinvalbuf: flush/inval failed" ); |
| 3606 | lck_mtx_unlock(nfs_buf_mutex); |
| 3607 | nfs_node_lock_force(np); |
| 3608 | if (!(flags & V_SAVE)) |
| 3609 | np->n_flag &= ~NMODIFIED; |
| 3610 | if (vnode_vtype(NFSTOV(np)) == VREG) |
| 3611 | np->n_lastrahead = -1; |
| 3612 | nfs_node_unlock(np); |
| 3613 | NFS_BUF_FREEUP(); |
| 3614 | return (0); |
| 3615 | } |
| 3616 | |
| 3617 | |
| 3618 | /* |
| 3619 | * Flush and invalidate all dirty buffers. If another process is already |
| 3620 | * doing the flush, just wait for completion. |
| 3621 | */ |
| 3622 | int |
| 3623 | nfs_vinvalbuf(vnode_t vp, int flags, vfs_context_t ctx, int intrflg) |
| 3624 | { |
| 3625 | return nfs_vinvalbuf2(vp, flags, vfs_context_thread(ctx), vfs_context_ucred(ctx), intrflg); |
| 3626 | } |
| 3627 | |
| 3628 | int |
| 3629 | nfs_vinvalbuf2(vnode_t vp, int flags, thread_t thd, kauth_cred_t cred, int intrflg) |
| 3630 | { |
| 3631 | nfsnode_t np = VTONFS(vp); |
| 3632 | struct nfsmount *nmp = VTONMP(vp); |
| 3633 | int error, slpflag, slptimeo, nflags, retry = 0; |
| 3634 | int ubcflags = UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE; |
| 3635 | struct timespec ts = { 2, 0 }; |
| 3636 | off_t size; |
| 3637 | |
| 3638 | FSDBG_TOP(554, np, flags, intrflg, 0); |
| 3639 | |
| 3640 | /* |
| 3641 | * If the mount is gone no sense to try and write anything. |
| 3642 | * and hang trying to do IO. |
| 3643 | */ |
| 3644 | if (nfs_mount_gone(nmp)) { |
| 3645 | flags &= ~V_SAVE; |
| 3646 | ubcflags &= ~UBC_PUSHALL; |
| 3647 | } |
| 3648 | |
| 3649 | if (nmp && !NMFLAG(nmp, INTR)) |
| 3650 | intrflg = 0; |
| 3651 | if (intrflg) { |
| 3652 | slpflag = PCATCH; |
| 3653 | slptimeo = 2 * hz; |
| 3654 | } else { |
| 3655 | slpflag = 0; |
| 3656 | slptimeo = 0; |
| 3657 | } |
| 3658 | |
| 3659 | /* First wait for any other process doing a flush to complete. */ |
| 3660 | lck_mtx_lock(nfs_buf_mutex); |
| 3661 | while (np->n_bflag & NBINVALINPROG) { |
| 3662 | np->n_bflag |= NBINVALWANT; |
| 3663 | msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_vinvalbuf" , &ts); |
| 3664 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { |
| 3665 | lck_mtx_unlock(nfs_buf_mutex); |
| 3666 | return (error); |
| 3667 | } |
| 3668 | if (np->n_bflag & NBINVALINPROG) |
| 3669 | slpflag = 0; |
| 3670 | } |
| 3671 | np->n_bflag |= NBINVALINPROG; |
| 3672 | lck_mtx_unlock(nfs_buf_mutex); |
| 3673 | |
| 3674 | /* Now, flush as required. */ |
| 3675 | again: |
| 3676 | error = nfs_vinvalbuf_internal(np, flags, thd, cred, slpflag, 0); |
| 3677 | while (error) { |
| 3678 | FSDBG(554, np, 0, 0, error); |
| 3679 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) |
| 3680 | goto done; |
| 3681 | error = nfs_vinvalbuf_internal(np, flags, thd, cred, 0, slptimeo); |
| 3682 | } |
| 3683 | |
| 3684 | /* get the pages out of vm also */ |
| 3685 | if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) |
| 3686 | if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) { |
| 3687 | if (error == EINVAL) |
| 3688 | panic("nfs_vinvalbuf(): ubc_msync failed!, error %d" , error); |
| 3689 | if (retry++ < 10) { /* retry invalidating a few times */ |
| 3690 | if (retry > 1 || error == ENXIO) |
| 3691 | ubcflags &= ~UBC_PUSHALL; |
| 3692 | goto again; |
| 3693 | } |
| 3694 | /* give up */ |
| 3695 | printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n" , error); |
| 3696 | } |
| 3697 | done: |
| 3698 | lck_mtx_lock(nfs_buf_mutex); |
| 3699 | nflags = np->n_bflag; |
| 3700 | np->n_bflag &= ~(NBINVALINPROG|NBINVALWANT); |
| 3701 | lck_mtx_unlock(nfs_buf_mutex); |
| 3702 | if (nflags & NBINVALWANT) |
| 3703 | wakeup(&np->n_bflag); |
| 3704 | |
| 3705 | FSDBG_BOT(554, np, flags, intrflg, error); |
| 3706 | return (error); |
| 3707 | } |
| 3708 | |
| 3709 | /* |
| 3710 | * Wait for any busy buffers to complete. |
| 3711 | */ |
| 3712 | void |
| 3713 | nfs_wait_bufs(nfsnode_t np) |
| 3714 | { |
| 3715 | struct nfsbuf *bp; |
| 3716 | struct nfsbuflists blist; |
| 3717 | int error = 0; |
| 3718 | |
| 3719 | lck_mtx_lock(nfs_buf_mutex); |
| 3720 | if (!nfs_buf_iterprepare(np, &blist, NBI_CLEAN)) { |
| 3721 | while ((bp = LIST_FIRST(&blist))) { |
| 3722 | LIST_REMOVE(bp, nb_vnbufs); |
| 3723 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
| 3724 | nfs_buf_refget(bp); |
| 3725 | while ((error = nfs_buf_acquire(bp, 0, 0, 0))) { |
| 3726 | if (error != EAGAIN) { |
| 3727 | nfs_buf_refrele(bp); |
| 3728 | nfs_buf_itercomplete(np, &blist, NBI_CLEAN); |
| 3729 | lck_mtx_unlock(nfs_buf_mutex); |
| 3730 | return; |
| 3731 | } |
| 3732 | } |
| 3733 | nfs_buf_refrele(bp); |
| 3734 | nfs_buf_drop(bp); |
| 3735 | } |
| 3736 | nfs_buf_itercomplete(np, &blist, NBI_CLEAN); |
| 3737 | } |
| 3738 | if (!nfs_buf_iterprepare(np, &blist, NBI_DIRTY)) { |
| 3739 | while ((bp = LIST_FIRST(&blist))) { |
| 3740 | LIST_REMOVE(bp, nb_vnbufs); |
| 3741 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
| 3742 | nfs_buf_refget(bp); |
| 3743 | while ((error = nfs_buf_acquire(bp, 0, 0, 0))) { |
| 3744 | if (error != EAGAIN) { |
| 3745 | nfs_buf_refrele(bp); |
| 3746 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); |
| 3747 | lck_mtx_unlock(nfs_buf_mutex); |
| 3748 | return; |
| 3749 | } |
| 3750 | } |
| 3751 | nfs_buf_refrele(bp); |
| 3752 | nfs_buf_drop(bp); |
| 3753 | } |
| 3754 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); |
| 3755 | } |
| 3756 | lck_mtx_unlock(nfs_buf_mutex); |
| 3757 | } |
| 3758 | |
| 3759 | |
| 3760 | /* |
| 3761 | * Add an async I/O request to the mount's async I/O queue and make |
| 3762 | * sure that an nfsiod will service it. |
| 3763 | */ |
| 3764 | void |
| 3765 | nfs_asyncio_finish(struct nfsreq *req) |
| 3766 | { |
| 3767 | struct nfsmount *nmp; |
| 3768 | struct nfsiod *niod; |
| 3769 | int started = 0; |
| 3770 | |
| 3771 | FSDBG_TOP(552, nmp, 0, 0, 0); |
| 3772 | again: |
| 3773 | nmp = req->r_nmp; |
| 3774 | |
| 3775 | if (nmp == NULL) |
| 3776 | return; |
| 3777 | |
| 3778 | lck_mtx_lock(nfsiod_mutex); |
| 3779 | niod = nmp->nm_niod; |
| 3780 | |
| 3781 | /* grab an nfsiod if we don't have one already */ |
| 3782 | if (!niod) { |
| 3783 | niod = TAILQ_FIRST(&nfsiodfree); |
| 3784 | if (niod) { |
| 3785 | TAILQ_REMOVE(&nfsiodfree, niod, niod_link); |
| 3786 | TAILQ_INSERT_TAIL(&nfsiodwork, niod, niod_link); |
| 3787 | niod->niod_nmp = nmp; |
| 3788 | } else if (((nfsiod_thread_count < NFSIOD_MAX) || (nfsiod_thread_count <= 0)) && (started < 4)) { |
| 3789 | /* |
| 3790 | * Try starting a new thread. |
| 3791 | * We may try a couple times if other callers |
| 3792 | * get the new threads before we do. |
| 3793 | */ |
| 3794 | lck_mtx_unlock(nfsiod_mutex); |
| 3795 | started++; |
| 3796 | if (!nfsiod_start()) |
| 3797 | goto again; |
| 3798 | lck_mtx_lock(nfsiod_mutex); |
| 3799 | } |
| 3800 | } |
| 3801 | |
| 3802 | /* |
| 3803 | * If we got here while being on the resendq we need to get off. This |
| 3804 | * happens when the timer fires and errors out requests from nfs_sigintr |
| 3805 | * or we receive a reply (UDP case) while being on the resend queue so |
| 3806 | * we're just finishing up and are not going to be resent. |
| 3807 | */ |
| 3808 | lck_mtx_lock(&req->r_mtx); |
| 3809 | if (req->r_flags & R_RESENDQ) { |
| 3810 | lck_mtx_lock(&nmp->nm_lock); |
| 3811 | if (req->r_rchain.tqe_next != NFSREQNOLIST) { |
| 3812 | NFS_BIO_DBG("Proccessing async request on resendq. Removing" ); |
| 3813 | TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); |
| 3814 | req->r_rchain.tqe_next = NFSREQNOLIST; |
| 3815 | assert(req->r_refs > 1); |
| 3816 | /* Remove resendq reference */ |
| 3817 | req->r_refs--; |
| 3818 | } |
| 3819 | lck_mtx_unlock(&nmp->nm_lock); |
| 3820 | req->r_flags &= ~R_RESENDQ; |
| 3821 | } |
| 3822 | lck_mtx_unlock(&req->r_mtx); |
| 3823 | |
| 3824 | if (req->r_achain.tqe_next == NFSREQNOLIST) |
| 3825 | TAILQ_INSERT_TAIL(&nmp->nm_iodq, req, r_achain); |
| 3826 | |
| 3827 | /* If this mount doesn't already have an nfsiod working on it... */ |
| 3828 | if (!nmp->nm_niod) { |
| 3829 | if (niod) { /* give it the nfsiod we just grabbed */ |
| 3830 | nmp->nm_niod = niod; |
| 3831 | lck_mtx_unlock(nfsiod_mutex); |
| 3832 | wakeup(niod); |
| 3833 | } else if (nfsiod_thread_count > 0) { |
| 3834 | /* just queue it up on nfsiod mounts queue if needed */ |
| 3835 | if (nmp->nm_iodlink.tqe_next == NFSNOLIST) |
| 3836 | TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink); |
| 3837 | lck_mtx_unlock(nfsiod_mutex); |
| 3838 | } else { |
| 3839 | printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n" , nfsiod_thread_count, NFSIOD_MAX, started); |
| 3840 | lck_mtx_unlock(nfsiod_mutex); |
| 3841 | /* we have no other option but to be persistent */ |
| 3842 | started = 0; |
| 3843 | goto again; |
| 3844 | } |
| 3845 | } else { |
| 3846 | lck_mtx_unlock(nfsiod_mutex); |
| 3847 | } |
| 3848 | |
| 3849 | FSDBG_BOT(552, nmp, 0, 0, 0); |
| 3850 | } |
| 3851 | |
| 3852 | /* |
| 3853 | * queue up async I/O request for resend |
| 3854 | */ |
| 3855 | void |
| 3856 | nfs_asyncio_resend(struct nfsreq *req) |
| 3857 | { |
| 3858 | struct nfsmount *nmp = req->r_nmp; |
| 3859 | |
| 3860 | if (nfs_mount_gone(nmp)) |
| 3861 | return; |
| 3862 | |
| 3863 | nfs_gss_clnt_rpcdone(req); |
| 3864 | lck_mtx_lock(&nmp->nm_lock); |
| 3865 | if (!(req->r_flags & R_RESENDQ)) { |
| 3866 | TAILQ_INSERT_TAIL(&nmp->nm_resendq, req, r_rchain); |
| 3867 | req->r_flags |= R_RESENDQ; |
| 3868 | /* |
| 3869 | * We take a reference on this request so that it can't be |
| 3870 | * destroyed while a resend is queued or in progress. |
| 3871 | */ |
| 3872 | nfs_request_ref(req, 1); |
| 3873 | } |
| 3874 | nfs_mount_sock_thread_wake(nmp); |
| 3875 | lck_mtx_unlock(&nmp->nm_lock); |
| 3876 | } |
| 3877 | |
| 3878 | /* |
| 3879 | * Read directory data into a buffer. |
| 3880 | * |
| 3881 | * Buffer will be filled (unless EOF is hit). |
| 3882 | * Buffers after this one may also be completely/partially filled. |
| 3883 | */ |
| 3884 | int |
| 3885 | nfs_buf_readdir(struct nfsbuf *bp, vfs_context_t ctx) |
| 3886 | { |
| 3887 | nfsnode_t np = bp->nb_np; |
| 3888 | struct nfsmount *nmp = NFSTONMP(np); |
| 3889 | int error = 0; |
| 3890 | |
| 3891 | if (nfs_mount_gone(nmp)) |
| 3892 | return (ENXIO); |
| 3893 | |
| 3894 | if (nmp->nm_vers < NFS_VER4) |
| 3895 | error = nfs3_readdir_rpc(np, bp, ctx); |
| 3896 | else |
| 3897 | error = nfs4_readdir_rpc(np, bp, ctx); |
| 3898 | |
| 3899 | if (error && (error != NFSERR_DIRBUFDROPPED)) { |
| 3900 | SET(bp->nb_flags, NB_ERROR); |
| 3901 | bp->nb_error = error; |
| 3902 | } |
| 3903 | return (error); |
| 3904 | } |
| 3905 | |