Lines Matching refs:ra
1875 receive_read(struct receive_arg *ra, int len, void *buf)
1888 ra->err = vn_rdwr(UIO_READ, ra->vp,
1890 ra->voff, UIO_SYSSPACE, FAPPEND,
1898 ra->err = SET_ERROR(ECKSUM);
1900 ra->voff += len - done - resid;
1902 if (ra->err != 0)
1903 return (ra->err);
1906 ra->bytes_read += len;
2418 receive_cksum(struct receive_arg *ra, int len, void *buf)
2420 if (ra->byteswap) {
2421 fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2423 fletcher_4_incremental_native(buf, len, &ra->cksum);
2430 * Allocate ra->next_rrd and read the next record's header into
2431 * ra->next_rrd->header.
2435 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2441 err = receive_read(ra, len, buf);
2444 receive_cksum(ra, len, buf);
2447 if (ra->rrd != NULL) {
2448 ra->rrd->payload = buf;
2449 ra->rrd->payload_size = len;
2450 ra->rrd->bytes_read = ra->bytes_read;
2454 ra->prev_cksum = ra->cksum;
2456 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2457 err = receive_read(ra, sizeof (ra->next_rrd->header),
2458 &ra->next_rrd->header);
2459 ra->next_rrd->bytes_read = ra->bytes_read;
2461 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2462 ra->next_rrd = NULL;
2465 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2466 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2467 ra->next_rrd = NULL;
2477 receive_cksum(ra,
2479 &ra->next_rrd->header);
2482 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2484 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2486 if (ra->byteswap)
2487 byteswap_record(&ra->next_rrd->header);
2490 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2491 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2492 ra->next_rrd = NULL;
2496 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2580 receive_read_prefetch(struct receive_arg *ra,
2583 if (!objlist_exists(&ra->ignore_objlist, object)) {
2584 dmu_prefetch(ra->os, object, 1, offset, length,
2593 receive_read_record(struct receive_arg *ra)
2597 switch (ra->rrd->header.drr_type) {
2600 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2604 err = receive_read_payload_and_next_header(ra, size, buf);
2609 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2616 objlist_insert(&ra->ignore_objlist, drro->drr_object);
2623 err = receive_read_payload_and_next_header(ra, 0, NULL);
2628 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2637 dmu_objset_spa(ra->os),
2641 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2645 err = receive_read_payload_and_next_header(ra,
2651 ra->rrd->write_buf = abuf;
2652 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2659 &ra->rrd->header.drr_u.drr_write_byref;
2660 err = receive_read_payload_and_next_header(ra, 0, NULL);
2661 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2668 &ra->rrd->header.drr_u.drr_write_embedded;
2672 err = receive_read_payload_and_next_header(ra, size, buf);
2678 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2688 err = receive_read_payload_and_next_header(ra, 0, NULL);
2693 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2694 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2700 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2702 err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2821 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2824 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2825 uint64_t dsobj = dmu_objset_id(ra->os);
2863 struct receive_arg ra = { 0 };
2868 ra.byteswap = drc->drc_byteswap;
2869 ra.cksum = drc->drc_cksum;
2870 ra.vp = vp;
2871 ra.voff = *voffp;
2876 sizeof (ra.bytes_read), 1, &ra.bytes_read);
2879 objlist_create(&ra.ignore_objlist);
2889 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2900 ra.err = SET_ERROR(EBADF);
2903 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2904 if (ra.err != 0) {
2918 if (ra.err != 0)
2923 if (ra.err != 0)
2935 err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
2949 err = resume_check(&ra, begin_nvl);
2958 rwa.os = ra.os;
2974 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2977 * we free ra.rrd and exit.
2985 ASSERT3P(ra.rrd, ==, NULL);
2986 ra.rrd = ra.next_rrd;
2987 ra.next_rrd = NULL;
2988 /* Allocates and loads header into ra.next_rrd */
2989 err = receive_read_record(&ra);
2991 if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2992 kmem_free(ra.rrd, sizeof (*ra.rrd));
2993 ra.rrd = NULL;
2997 bqueue_enqueue(&rwa.q, ra.rrd,
2998 sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2999 ra.rrd = NULL;
3001 if (ra.next_rrd == NULL)
3002 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
3003 ra.next_rrd->eos_marker = B_TRUE;
3004 bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
3032 *voffp = ra.voff;
3033 objlist_destroy(&ra.ignore_objlist);