Lines Matching defs:chunk

161 #define	_ii_unlock_chunk(ip, chunk)	_ii_unlock_chunks(ip, chunk, 1)
1881 * copy with a bitmap file stored mapping for chunk locations.
1892 /* do not add in partial chunk at end */
3999 /* take 1 off as chunk 0 contains header */
6509 * avoid deadlocks on the same chunk.
6674 * Avoid deadlock with COW on same chunk of sibling shadow
6675 * by unlocking this chunk before copying all other sibling
6680 * Only using a single chunk when copying to master avoids
6733 * Reacquire chunk lock and check that a COW by a sibling
6734 * has not already copied this chunk.
7245 * Locks access to the specified chunk
7250 _ii_lock_chunk(_ii_info_t *ip, chunkid_t chunk)
7252 if (chunk == II_NULLCHUNK) {
7269 while (DSW_BIT_ISSET(ip->bi_busy[chunk / DSW_BITS],
7270 chunk % DSW_BITS))
7272 DSW_BIT_SET(ip->bi_busy[chunk / DSW_BITS], chunk % DSW_BITS);
7281 * Tries to lock access to the specified chunk
7287 _ii_trylock_chunk(_ii_info_t *ip, chunkid_t chunk)
7291 ASSERT(chunk != II_NULLCHUNK);
7303 if (DSW_BIT_ISSET(ip->bi_busy[chunk / DSW_BITS], chunk % DSW_BITS)) {
7307 DSW_BIT_SET(ip->bi_busy[chunk / DSW_BITS], chunk % DSW_BITS);
7322 _ii_unlock_chunks(_ii_info_t *ip, chunkid_t chunk, int n)
7324 if (chunk == II_NULLCHUNK) {
7340 for (; n-- > 0; chunk++) {
7341 ASSERT(DSW_BIT_ISSET(ip->bi_busy[chunk / DSW_BITS],
7342 chunk % DSW_BITS));
7343 DSW_BIT_CLR(ip->bi_busy[chunk / DSW_BITS],
7344 chunk % DSW_BITS);
7791 * Determine if a chunk has been copied to the shadow device
7801 _ii_ab_tst_shd_bit(_ii_info_t *ip, chunkid_t chunk)
7810 fba = ip->bi_shdfba + chunk / (FBA_SIZE(1) * DSW_BITS);
7811 chunk %= FBA_SIZE(1) * DSW_BITS;
7821 rc = DSW_BIT_ISSET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7822 chunk%DSW_BITS);
7831 * Records that a chunk has been copied to the shadow device
7838 _ii_ab_set_shd_bit(_ii_info_t *ip, chunkid_t chunk)
7847 fba = ip->bi_shdfba + chunk / (FBA_SIZE(1) * DSW_BITS);
7848 chunk %= FBA_SIZE(1) * DSW_BITS;
7858 if (DSW_BIT_ISSET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7859 chunk%DSW_BITS) == 0) {
7860 DSW_BIT_SET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7861 chunk%DSW_BITS);
7878 * Determine if a chunk needs to be copied during updates.
7881 * Returns 1 if the copy bit for the chunk is set
7882 * Returns 0 if the copy bit for the chunk is not set
7887 _ii_ab_tst_copy_bit(_ii_info_t *ip, chunkid_t chunk)
7896 fba = ip->bi_copyfba + chunk / (FBA_SIZE(1) * DSW_BITS);
7897 chunk %= FBA_SIZE(1) * DSW_BITS;
7907 rc = DSW_BIT_ISSET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7908 chunk%DSW_BITS);
7917 * Records that a chunk has been copied to the shadow device
7924 _ii_ab_set_copy_bit(_ii_info_t *ip, chunkid_t chunk)
7933 fba = ip->bi_copyfba + chunk / (FBA_SIZE(1) * DSW_BITS);
7934 chunk %= FBA_SIZE(1) * DSW_BITS;
7944 if (DSW_BIT_ISSET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7945 chunk%DSW_BITS) == 0) {
7946 DSW_BIT_SET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7947 chunk%DSW_BITS);
7965 * Records that a chunk has been cleared on the shadow device, this
7974 _ii_ab_clr_copy_bits(_ii_info_t *ip, chunkid_t chunk, int nchunks)
7983 fba = ip->bi_copyfba + chunk / (FBA_SIZE(1) * DSW_BITS);
7984 chunk %= FBA_SIZE(1) * DSW_BITS;
7994 for (; nchunks-- > 0; chunk++) {
7995 DSW_BIT_CLR(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
7996 chunk%DSW_BITS);
8090 chunkid_t chunk;
8104 chunk = startchunk % bits_per_fba;
8117 for (; startchunk < high; chunk++, startchunk++) {
8118 if (DSW_BIT_ISSET(tmp->sb_vec->sv_addr[chunk/DSW_BITS],
8119 chunk%DSW_BITS)) {
8131 * chunk while we were acquiring
8132 * the chunk lock.
8146 chunk++;
8148 nextchunk++, chunk++) {
8150 [chunk/DSW_BITS], chunk%DSW_BITS)) {
8544 * Determine if a chunk has been copied to the shadow device
8552 _ii_km_tst_shd_bit(_ii_info_t *ip, chunkid_t chunk)
8562 rc = DSW_BIT_ISSET(bmp[chunk/DSW_BITS], chunk%DSW_BITS);
8571 * Records that a chunk has been copied to the shadow device
8575 _ii_km_set_shd_bit(_ii_info_t *ip, chunkid_t chunk)
8584 if (DSW_BIT_ISSET(bmp[chunk/DSW_BITS], chunk%DSW_BITS) == 0) {
8585 DSW_BIT_SET(bmp[chunk/DSW_BITS], chunk%DSW_BITS);
8596 * Determine if a chunk needs to be copied during updates.
8599 * Returns 1 if the copy bit for the chunk is set,
8604 _ii_km_tst_copy_bit(_ii_info_t *ip, chunkid_t chunk)
8614 rc = DSW_BIT_ISSET(bmp[chunk/DSW_BITS], chunk%DSW_BITS);
8623 * Records that a chunk has been copied to the shadow device
8627 _ii_km_set_copy_bit(_ii_info_t *ip, chunkid_t chunk)
8636 if (DSW_BIT_ISSET(bmp[chunk/DSW_BITS], chunk%DSW_BITS) == 0) {
8637 DSW_BIT_SET(bmp[chunk/DSW_BITS], chunk%DSW_BITS);
8649 * Records that a chunk has been cleared on the shadow device
8653 _ii_km_clr_copy_bits(_ii_info_t *ip, chunkid_t chunk, int nchunks)
8662 for (; nchunks-- > 0; chunk++) {
8663 DSW_BIT_CLR(bmp[chunk/DSW_BITS], chunk%DSW_BITS);
8776 _ii_km_next_copy_bit(_ii_info_t *ip, chunkid_t chunk, chunkid_t maxchunk,
8788 for (; chunk < maxchunk; chunk++) {
8789 if (DSW_BIT_ISSET(bmp[chunk/DSW_BITS], chunk%DSW_BITS)) {
8794 if (!_ii_trylock_chunk(ip, chunk)) {
8796 _ii_lock_chunk(ip, chunk);
8801 return (chunk);
8804 for (nextchunk = chunk + 1;
8817 return (chunk);
8947 /* convert chunk number from tsearch into final fba */
9198 * made to read that chunk.
10091 * write buffer will c-o-write the chunk.
10131 /* move on to next chunk */