Searched defs:chunk (Results 26 - 48 of 48) sorted by relevance

12

/illumos-gate/usr/src/uts/common/xen/io/
H A Dxnb.c722 size_t chunk = ml->b_wptr - ml->b_rptr; local
724 bcopy(ml->b_rptr, valoop, chunk);
725 valoop += chunk;
726 len += chunk;
1036 * gnttab_copy_t for each sub-page chunk in each data
1041 size_t chunk = ml->b_wptr - ml->b_rptr; local
1055 mblk_t *ml_new = replace_msg(ml, chunk,
1072 if (d_offset + chunk > PAGESIZE)
1075 "(%lu) + chunk (%lu) > PAGESIZE %d!",
1078 d_offset, chunk, (in
1096 (mblk_t *), ml, int, chunk, int, local
1123 chunk, int, len, int, item_count); local
[all...]
/illumos-gate/usr/src/uts/common/io/
H A Dfssnap.c81 * "chunks" should be considered for copy-on-write (a chunk is the unit of
86 * If a chunk has no allocated blocks, it does not need to be copied before
96 * described above, and the "hastrans bitmap" which tells it whether the chunk
97 * has been copied already or not. If the chunk is a candidate but has not
100 * task queue is dispatched for each old chunk read in which writes the old
111 * information to satisfy read requests. If the requested chunk is not a
112 * candidate, it returns a zeroed buffer. If the chunk is a candidate but
165 static int snap_getchunk(struct snapshot_id *sidp, chunknumber_t chunk,
652 * cycles through each chunk in the requested buffer and calls
653 * snap_getchunk() on each chunk t
662 chunknumber_t chunk; local
772 snap_getchunk(struct snapshot_id *sidp, chunknumber_t chunk, int offset, int len, char *buffer) argument
1189 transtbl_add(cow_map_t *cmap, chunknumber_t chunk, caddr_t buf) argument
1220 transtbl_get(cow_map_t *cmap, chunknumber_t chunk) argument
[all...]
H A Ddevinfo.c104 * Size of each chunk is buf_size.
107 struct di_mem *next; /* link to next chunk */
273 static uint_t di_chunk = 32; /* I/O chunk size in pages */
693 * Copy one chunk at a time
863 * Get a chunk of memory >= size, for the snapshot
887 if (st->mem_size == 0) { /* first chunk */
891 * locate end of linked list and add a chunk at the end
1049 * going on. Otherwise, start on a new chunk.
3760 size_t chunk; local
3854 chunk
3917 size_t map_size, sz, chunk; local
[all...]
/illumos-gate/usr/src/uts/common/io/rge/
H A Drge_main.c208 * Utility routine to carve a slice off a chunk of allocated memory,
209 * updating the chunk descriptor accordingly. The size of the slice
213 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, argument
220 ASSERT(totsize <= chunk->alength);
222 *slice = *chunk;
228 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
229 chunk->alength -= totsize;
230 chunk->offset += totsize;
231 chunk
[all...]
/illumos-gate/usr/src/uts/common/io/sfxge/common/
H A Def10_nvram.c1139 /* Read initial chunk of the segment, starting at offset */
1873 size_t chunk; local
1877 chunk = MIN(size, EF10_NVRAM_CHUNK);
1880 data, chunk, mode)) != 0) {
1884 size -= chunk;
1885 data += chunk;
1886 offset += chunk;
1966 size_t chunk; local
1976 * Check that the size is a multiple of the write chunk size if
1977 * the write chunk siz
[all...]
/illumos-gate/usr/src/boot/sys/boot/zfs/
H A Dzfs.c294 int chunk; local
307 * Figure out which chunk we are currently looking at
309 * low bits of f_seekp as a simple chunk index.
312 chunk = fp->f_seekp & (bsize - 1);
313 if (chunk == ZAP_LEAF_NUMCHUNKS(&zl)) {
315 chunk = 0;
331 zc = &ZAP_LEAF_CHUNK(&zl, chunk);
/illumos-gate/usr/src/lib/librsm/common/
H A Drsmlib.c401 rsm_pollfd_chunk_t *chunk; local
407 chunk = pollfd_table.buckets[hash];
408 while (chunk) {
409 if (chunk->nfree > 0)
411 chunk = chunk->next;
414 if (!chunk) { /* couldn't find a free chunk - allocate a new one */
415 chunk = malloc(sizeof (rsm_pollfd_chunk_t));
416 if (!chunk) {
462 rsm_pollfd_chunk_t *chunk; local
502 rsm_pollfd_chunk_t *chunk; local
[all...]
/illumos-gate/usr/src/cmd/praudit/
H A Dformat.c1483 hexconvert(char *c, int size, int chunk) argument
1496 if (chunk > size || chunk <= 0)
1497 chunk = size;
1499 numchunks = size / chunk;
1500 leftovers = size % chunk;
1509 for (k = 0; k < chunk; k++) {
/illumos-gate/usr/src/cmd/ptools/pmap/
H A Dpmap.c84 * Per-page information for a memory chunk.
87 * chunks. The chunk information is stored in the memory_chunk structure.
1468 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz) argument
1470 chunk->end_addr = end;
1471 chunk->page_size = psz;
1472 chunk->page_index = 0;
1473 chunk->chunk_start = chunk->chunk_end = 0;
1477 * Create a new chunk of addresses starting from vaddr.
1478 * Pass the whole chunk t
1482 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr) argument
1611 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz) argument
[all...]
/illumos-gate/usr/src/cmd/cmd-inet/usr.sbin/snoop/
H A Dsnoop_ldap.c1419 int chunk = 16; /* 16 bytes per line */ local
1423 for (p = data; p < data + datalen; p += chunk) {
1426 len = MIN(chunk, left);
1432 for (i = 0; i < (chunk - left) / 2; i++)
/illumos-gate/usr/src/cmd/isns/isnsd/
H A Dobj.c345 * chunk- which chunk of the hash table.
355 uint16_t chunk,
386 * chunk- which chunk of the hash table.
396 uint16_t chunk,
426 * chunk- which chunk of the hash table.
436 uint16_t chunk,
479 * chunk
352 entity_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
393 iscsi_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
433 portal_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
486 pg_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
556 dd_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
596 dds_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
636 obj_hval( void *p, uint16_t chunk, uint32_t *flags ) argument
[all...]
/illumos-gate/usr/src/uts/common/io/idm/
H A Didm_so.c2565 size_t remainder, chunk; local
2585 /* check to see if we need to chunk the data */
2587 chunk = max_dataseglen;
2589 chunk = remainder;
2614 hton24(bhs->dlength, chunk);
2619 pdu->isp_datalen = (uint_t)chunk;
2621 if (chunk == remainder) {
2635 remainder -= chunk;
2636 data_offset += chunk;
2664 idt->idt_tx_bytes += chunk;
[all...]
/illumos-gate/usr/src/uts/common/io/uath/
H A Duath.c1560 struct uath_chunk *chunk; local
1597 chunk = (struct uath_chunk *)rxbuf;
1598 if (chunk->seqnum == 0 && chunk->flags == 0 && chunk->length == 0) {
1606 if (chunk->seqnum != sc->sc_intrx_nextnum) {
1609 chunk->seqnum, sc->sc_intrx_nextnum);
1616 /* check multi-chunk frames */
1617 if ((chunk->seqnum == 0 && !(chunk
1989 struct uath_chunk *chunk; local
2324 struct uath_chunk *chunk; local
[all...]
/illumos-gate/usr/src/uts/common/io/lvm/mirror/
H A Dmirror_resync.c1900 diskaddr_t chunk; local
1984 chunk = un->c.un_total_blocks / 1000;
1986 chunk = un->c.un_total_blocks / 100;
1987 if (chunk == 0)
1988 chunk = un->c.un_total_blocks;
1990 * If a MN set, round the chunk size up to a multiple of
1994 chunk = ((chunk + MD_DEF_RESYNC_BLK_SZ)/MD_DEF_RESYNC_BLK_SZ)
1996 if (chunk > un->c.un_total_blocks)
1997 chunk
[all...]
/illumos-gate/usr/src/uts/common/io/nge/
H A Dnge_main.c268 * Utility routine to carve a slice off a chunk of allocated memory,
269 * updating the chunk descriptor accordingly. The size of the slice
273 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, argument
280 ASSERT(totsize <= chunk->alength);
282 *slice = *chunk;
287 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
288 chunk->alength -= totsize;
289 chunk->offset += totsize;
290 chunk
[all...]
/illumos-gate/usr/src/uts/sun4u/io/
H A Dopl_cfg.c242 int chunk; local
245 for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++)
246 printf("\t%d 0x%lx 0x%lx\n", chunk,
247 mem->mem_chunks[chunk].chnk_start_address,
248 mem->mem_chunks[chunk].chnk_size);
/illumos-gate/usr/src/grub/grub-0.97/stage2/
H A Dfsys_zfs.c630 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk, argument
637 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
640 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
645 chunk = la->la_next;
663 uint16_t chunk; local
672 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
673 chunk != CHAIN_END; chunk = le->le_next) {
675 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
678 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
[all...]
/illumos-gate/usr/src/uts/common/io/bge/
H A Dbge_main2.c2172 * Utility routine to carve a slice off a chunk of allocated memory,
2173 * updating the chunk descriptor accordingly. The size of the slice
2177 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, argument
2184 ASSERT(totsize <= chunk->alength);
2186 *slice = *chunk;
2192 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
2193 chunk->alength -= totsize;
2194 chunk->offset += totsize;
2195 chunk
[all...]
/illumos-gate/usr/src/uts/common/crypto/io/
H A Dcrypto.c460 minor_t chunk = crypto_minor_chunk; local
465 big_count = crypto_minors_count + chunk;
473 (crypto_minors_table_count + chunk);
504 crypto_minors_table_count += chunk;
/illumos-gate/usr/src/cmd/mdb/common/modules/genunix/
H A Dkmem.c1840 kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *) local
1844 klw->klw_sorted[k++] = &chunk[j];
3616 "representing a single chunk of data. Only ALLOC segments have debugging\n"
/illumos-gate/usr/src/uts/common/avs/ns/dsw/
H A Ddsw_dev.c161 #define _ii_unlock_chunk(ip, chunk) _ii_unlock_chunks(ip, chunk, 1)
1881 * copy with a bitmap file stored mapping for chunk locations.
1892 /* do not add in partial chunk at end */
3999 /* take 1 off as chunk 0 contains header */
6509 * avoid deadlocks on the same chunk.
6674 * Avoid deadlock with COW on same chunk of sibling shadow
6675 * by unlocking this chunk before copying all other sibling
6680 * Only using a single chunk when copying to master avoids
6733 * Reacquire chunk loc
7250 _ii_lock_chunk(_ii_info_t *ip, chunkid_t chunk) argument
7287 _ii_trylock_chunk(_ii_info_t *ip, chunkid_t chunk) argument
7322 _ii_unlock_chunks(_ii_info_t *ip, chunkid_t chunk, int n) argument
7801 _ii_ab_tst_shd_bit(_ii_info_t *ip, chunkid_t chunk) argument
7838 _ii_ab_set_shd_bit(_ii_info_t *ip, chunkid_t chunk) argument
7887 _ii_ab_tst_copy_bit(_ii_info_t *ip, chunkid_t chunk) argument
7924 _ii_ab_set_copy_bit(_ii_info_t *ip, chunkid_t chunk) argument
7974 _ii_ab_clr_copy_bits(_ii_info_t *ip, chunkid_t chunk, int nchunks) argument
8090 chunkid_t chunk; local
8552 _ii_km_tst_shd_bit(_ii_info_t *ip, chunkid_t chunk) argument
8575 _ii_km_set_shd_bit(_ii_info_t *ip, chunkid_t chunk) argument
8604 _ii_km_tst_copy_bit(_ii_info_t *ip, chunkid_t chunk) argument
8627 _ii_km_set_copy_bit(_ii_info_t *ip, chunkid_t chunk) argument
8653 _ii_km_clr_copy_bits(_ii_info_t *ip, chunkid_t chunk, int nchunks) argument
8776 _ii_km_next_copy_bit(_ii_info_t *ip, chunkid_t chunk, chunkid_t maxchunk, int want, int *got) argument
[all...]
/illumos-gate/usr/src/cmd/mdb/common/modules/libumem/
H A Dumem.c1773 caddr_t chunk = (caddr_t) local
1778 ulw->ulw_sorted[k++] = (umem_bufctl_audit_t *)chunk;
1779 chunk += UMEM_BUFCTL_AUDIT_SIZE;
3271 "representing a single chunk of data. Only ALLOC segments have debugging\n"
3874 size_t um_malloc_overhead; /* sum of in-chunk overheads */
4242 * compute the per-slab overhead, we just subtract the chunk usage
4247 * |////color///| chunk | chunk | ... | chunk |/color/|/slab//|
4254 * the external umem_slab_t and per-chunk bufct
[all...]
/illumos-gate/usr/src/uts/sun4u/starcat/io/
H A Ddrmach.c6205 mem_chunk_t *chunk; local
6224 chunk = gdcd->dcd_chunk_list.dcl_chunk;
6227 if ((chunk->mc_base_pa & mask) == pa) {
6228 sz += chunk->mc_mbytes * 1048576;
6231 ++chunk;
6624 mem_chunk_t *chunk; local
6639 chunk = gdcd->dcd_chunk_list.dcl_chunk;
6642 if ((chunk->mc_base_pa & mask) == pa) {
6643 mlist = memlist_add_span(mlist, chunk->mc_base_pa,
6644 chunk
[all...]

Completed in 230 milliseconds

12