Lines Matching refs:new

66  * new proxy locks created for non overlapping ranges.
104 zfs_range_lock_writer(znode_t *zp, rl_t *new)
110 uint64_t off = new->r_off;
111 uint64_t len = new->r_len;
130 if (new->r_type == RL_APPEND)
131 new->r_off = zp->z_size;
138 end_size = MAX(zp->z_size, new->r_off + len);
141 new->r_off = 0;
142 new->r_len = UINT64_MAX;
150 new->r_type = RL_WRITER; /* convert to writer */
151 avl_add(tree, new);
158 rl = avl_find(tree, new, &where);
163 if (rl && (rl->r_off < new->r_off + new->r_len))
167 if (rl && rl->r_off + rl->r_len > new->r_off)
170 new->r_type = RL_WRITER; /* convert possible RL_APPEND */
171 avl_insert(tree, new, where);
181 new->r_off = off;
182 new->r_len = len;
251 * Create and add a new proxy range lock for the supplied range.
271 zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where)
274 uint64_t off = new->r_off;
275 uint64_t len = new->r_len;
281 * range may overlap with the new range
282 * - null, if there were no ranges starting before the new one
304 /* no overlaps, use the original new rl_t in the tree */
305 avl_insert(tree, new, where);
314 new->r_cnt = 0; /* will use proxies in tree */
317 * of the new range. For each entry we make it a proxy if it
319 * gaps between the ranges then we create a new proxy range.
337 /* new range ends in the middle of this block */
356 zfs_range_lock_reader(znode_t *zp, rl_t *new)
361 uint64_t off = new->r_off;
362 uint64_t len = new->r_len;
368 prev = avl_find(tree, new, &where);
416 zfs_range_add_reader(tree, new, prev, where);
428 rl_t *new;
432 new = kmem_alloc(sizeof (rl_t), KM_SLEEP);
433 new->r_zp = zp;
434 new->r_off = off;
437 new->r_len = len;
438 new->r_cnt = 1; /* assume it's going to be in the tree */
439 new->r_type = type;
440 new->r_proxy = B_FALSE;
441 new->r_write_wanted = B_FALSE;
442 new->r_read_wanted = B_FALSE;
450 avl_add(&zp->z_range_avl, new);
452 zfs_range_lock_reader(zp, new);
454 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */
456 return (new);