Lines Matching refs:ip

102 ud_bmap_has_holes(struct ud_inode *ip)
109 ASSERT(RW_LOCK_HELD(&ip->i_contents));
112 if (ip->i_desc_type != ICB_FLAG_ONE_AD) {
113 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
114 for (i = 0; i < ip->i_ext_used; i++) {
115 iext = &ip->i_ext[i];
128 ud_bmap_read(struct ud_inode *ip, u_offset_t off, daddr_t *bnp, int32_t *lenp)
137 ASSERT(RW_LOCK_HELD(&ip->i_contents));
139 lbmask = ip->i_udf->udf_lbmask;
140 l2b = ip->i_udf->udf_l2b_shift;
141 l2d = ip->i_udf->udf_l2d_shift;
143 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
144 for (i = 0; i < ip->i_ext_used; i++) {
145 iext = &ip->i_ext[i];
161 bno = ud_xlate_to_daddr(ip->i_udf,
171 if (i == ip->i_ext_used) {
196 ud_bmap_write(struct ud_inode *ip,
214 ASSERT(RW_WRITE_HELD(&ip->i_contents));
216 udf_vfsp = ip->i_udf;
225 issync = ((ip->i_flag & ISYNC) != 0);
227 isdir = (ip->i_type == VDIR);
233 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
234 if (end_req < ip->i_max_emb) {
238 if (ip->i_size != 0) {
239 error = fbread(ITOV(ip), 0, ip->i_size, S_OTHER, &fbp);
249 ip->i_desc_type = ICB_FLAG_SHORT_AD;
253 ASSERT(ip->i_ext == NULL);
254 ASSERT(ip->i_astrat == STRAT_TYPE4);
256 ip->i_ext_used = 0;
257 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
258 ip->i_cur_max_ext --;
264 ip->i_ext_count =
266 iext = ip->i_ext = (struct icb_ext *)kmem_zalloc(
267 ip->i_ext_count * sizeof (struct icb_ext), KM_SLEEP);
278 if ((PCEIL(ip->i_size) < PBASE(off)) &&
279 ((PBASE(off) - PCEIL(ip->i_size)) >= PAGESIZE)) {
281 if (ip->i_size != 0) {
289 if (error = ud_create_ext(ip, ip->i_ext_used,
298 * Allocate a hole from PCEIL(ip->i_size) to PBASE(off)
301 count = PBASE(off) - PCEIL(ip->i_size);
302 (void) ud_create_ext(ip, ip->i_ext_used, NEW_EXT,
320 if (error = ud_create_ext(ip, ip->i_ext_used,
331 ip->i_desc_type = ICB_FLAG_ONE_AD;
333 for (i = 0; i < ip->i_ext_used; i++) {
334 iext = &ip->i_ext[i];
336 ud_free_space(ip->i_udf->udf_vfs,
343 kmem_free(ip->i_ext,
344 ip->i_ext_count *
346 ip->i_ext = NULL;
347 ip->i_ext_count = ip->i_ext_used = 0;
361 if (ip->i_ext == NULL) {
368 if (ud_read_icb_till_off(ip, ip->i_size) != 0) {
373 isize = CEIL(ip->i_size);
382 if (ip->i_ext == NULL) {
384 } else if (ip->i_ext_used == 0) {
388 error = ud_last_alloc_ext(ip, off, size, alloc_only);
398 iext = &ip->i_ext[ip->i_ext_used - 1];
414 ASSERT(ip->i_ext);
419 for (i = 0; i < ip->i_ext_used; i++) {
420 iext = &ip->i_ext[i];
433 iext = &ip->i_ext[i];
464 ip, i, BASE(iext->ib_offset) -
475 if ((error = ud_break_create_new_icb(ip, i,
479 iext = &ip->i_ext[i];
493 if ((error = ud_break_create_new_icb(ip, i,
513 pext = &ip->i_ext[i - 1];
520 iext = &ip->i_ext[i];
523 if ((error = ud_alloc_space(ip->i_vfs,
524 ip->i_icb_prn, prox, blkcount,
528 ip->i_lbr += sz;
534 error = ud_zero_it(ip, blkno, sz);
549 pext = &ip->i_ext[i - 1];
560 ud_remove_ext_at_index(ip, i);
574 ip, i, sz << l2b)) != 0) {
578 iext = &ip->i_ext[i];
580 iext->ib_prn = ip->i_icb_prn;
602 ud_common_ad(struct ud_inode *ip, struct buf *bp)
617 if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
621 } else if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
631 count = (((ip->i_ext_used + ndesc) / EXT_PER_MALLOC) + 1) *
634 bcopy(ip->i_ext, addr, ip->i_ext_used * sizeof (struct icb_ext));
635 kmem_free(ip->i_ext, ip->i_ext_count * sizeof (struct icb_ext));
636 ip->i_ext = addr;
637 ip->i_ext_count = count;
642 lbmask = ip->i_udf->udf_lbmask;
643 iext = &ip->i_ext[ip->i_ext_used - 1];
655 if (ip->i_con_used == ip->i_con_count) {
659 old = ip->i_con;
660 old_count = ip->i_con_count *
662 ip->i_con_count += EXT_PER_MALLOC;
663 ip->i_con = kmem_zalloc(ip->i_con_count *
667 bcopy(old, ip->i_con, old_count);
671 con = &ip->i_con[ip->i_con_used];
676 con->ib_prn = ip->i_icb_prn;
681 ip->i_con_used++;
700 ip->i_ext_used++;
707 ud_read_next_cont(struct ud_inode *ip)
715 cont = &ip->i_con[ip->i_con_read];
718 bno = ud_xlate_to_daddr(ip->i_udf, cont->ib_prn, cont->ib_block,
720 bp = ud_bread(ip->i_dev, bno << ip->i_udf->udf_l2d_shift,
732 ud_common_ad(ip, bp);
740 ud_read_icb_till_off(struct ud_inode *ip, u_offset_t offset)
747 if (ip->i_desc_type == ICB_FLAG_ONE_AD)
749 else if ((ip->i_astrat != STRAT_TYPE4) &&
750 (ip->i_astrat != STRAT_TYPE4096))
752 else if (ip->i_ext_used == 0)
753 return ((ip->i_size == 0) ? 0 : EINVAL);
760 mutex_enter(&ip->i_con_lock);
761 iext = &ip->i_ext[ip->i_ext_used - 1];
763 if (ip->i_con_used == ip->i_con_read) {
767 if (error = ud_read_next_cont(ip))
769 ip->i_con_read++;
770 iext = &ip->i_ext[ip->i_ext_used - 1];
772 mutex_exit(&ip->i_con_lock);
779 * Assumption is the off is beyond ip->i_size
783 ud_last_alloc_ext(struct ud_inode *ip, uint64_t off,
794 udf_vfsp = ip->i_udf;
808 iext = &ip->i_ext[ip->i_ext_used - 1];
832 iext = &ip->i_ext[ip->i_ext_used - 1];
843 error = ud_create_ext(ip, ip->i_ext_used,
855 error = ud_create_ext(ip, ip->i_ext_used - 1,
865 iext = &ip->i_ext[ip->i_ext_used - 1];
876 (void) ud_create_ext(ip, ip->i_ext_used - 1,
886 (void) ud_create_ext(ip, ip->i_ext_used,
899 iext = &ip->i_ext[ip->i_ext_used - 1];
905 iext = &ip->i_ext[ip->i_ext_used - 1];
916 error = ud_create_ext(ip, ip->i_ext_used,
925 error = ud_create_ext(ip, ip->i_ext_used - 1,
939 ud_break_create_new_icb(struct ud_inode *ip,
947 iext = &ip->i_ext[index];
951 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
955 for (i = ip->i_ext_used; i > index; i--) {
956 ip->i_ext[i] = ip->i_ext[i - 1];
959 next = &ip->i_ext[index + 1];
960 iext = &ip->i_ext[index];
967 iext->ib_count >> ip->i_udf->udf_l2b_shift;
969 ip->i_ext_used++;
974 ud_remove_ext_at_index(struct ud_inode *ip, int32_t index)
978 ASSERT(index <= ip->i_ext_used);
980 for (i = index; i < ip->i_ext_used; i++) {
981 if ((i + 1) < ip->i_ext_count) {
982 ip->i_ext[i] = ip->i_ext[i + 1];
984 bzero(&ip->i_ext[i], sizeof (struct icb_ext));
987 ip->i_ext_used --;
991 ud_bump_ext_count(struct ud_inode *ip, int32_t sleep_flag)
997 ASSERT(ip);
1002 if (ip->i_ext_used >= ip->i_ext_count) {
1004 old_count = sizeof (struct icb_ext) * ip->i_ext_count;
1005 ip->i_ext_count += EXT_PER_MALLOC;
1007 ip->i_ext_count, sleep_flag);
1008 bcopy(ip->i_ext, iext, old_count);
1009 kmem_free(ip->i_ext, old_count);
1010 ip->i_ext = iext;
1013 if (ip->i_ext_used >= ip->i_cur_max_ext) {
1019 lbmask = ip->i_udf->udf_lbmask;
1020 l2b = ip->i_udf->udf_l2b_shift;
1022 if ((error = ud_read_icb_till_off(ip, ip->i_size)) != 0) {
1030 if (ip->i_con_used != 0) {
1031 icon = &ip->i_con[ip->i_con_used - 1];
1040 if ((error = ud_alloc_space(ip->i_vfs, ip->i_icb_prn,
1050 if (ip->i_con_used == ip->i_con_count) {
1054 old = ip->i_con;
1055 old_count = ip->i_con_count *
1057 ip->i_con_count += EXT_PER_MALLOC;
1058 ip->i_con = kmem_zalloc(ip->i_con_count *
1061 bcopy(old, ip->i_con, old_count);
1065 icon = &ip->i_con[ip->i_con_used++];
1067 icon->ib_prn = ip->i_icb_prn;
1078 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1080 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1086 ip->i_cur_max_ext += sz / elen;
1092 ud_create_ext(struct ud_inode *ip, int32_t index, uint32_t flags,
1106 udf_vfsp = ip->i_udf;
1111 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
1115 iext = &ip->i_ext[index];
1118 (ip->i_ext_count == 0)) {
1121 iext->ib_prn = ip->i_icb_prn;
1127 if ((error = ud_alloc_space(ip->i_vfs,
1128 ip->i_icb_prn, 0, blkcount,
1135 ip->i_lbr += sz;
1146 if (ip->i_ext_used <= index)
1147 ip->i_ext_used ++;
1160 if ((error = ud_alloc_space(ip->i_vfs,
1161 ip->i_icb_prn, prox, blkcount,
1175 ip->i_lbr += sz;
1179 if ((error = ud_bump_ext_count(ip, KM_SLEEP))
1183 pext = &ip->i_ext[index];
1184 iext = &ip->i_ext[index + 1];
1186 iext->ib_prn = ip->i_icb_prn;
1196 if (ip->i_ext_used <= index)
1197 ip->i_ext_used ++;
1201 error = ud_zero_it(ip, blkno, sz);
1222 if (ip->i_ext_used <= index)
1223 ip->i_ext_used ++;
1251 ud_zero_it(struct ud_inode *ip, uint32_t start_block, uint32_t block_count)
1265 udf_vfsp = ip->i_udf;
1267 ip->i_icb_prn, start_block, block_count, &dummy);
1275 bp->b_edev = ip->i_dev;
1276 bp->b_dev = cmpdev(ip->i_dev);
1280 bp->b_file = ip->i_vnode;