Lines Matching defs:krdc

118 	rdc_k_info_t *krdc;
125 krdc = &rdc_k_info[index];
127 ASSERT(krdc->index == index);
178 * the nsc_fd to krdc->diskqfd
181 rdc_open_diskq(rdc_k_info_t *krdc)
190 grp = krdc->group;
191 urdc = &rdc_u_info[krdc->index];
347 rdc_fail_diskq(rdc_k_info_t *krdc, int wait, int flag)
350 rdc_u_info_t *q = &rdc_u_info[krdc->index];
351 rdc_group_t *group = krdc->group;
352 disk_queue *dq = &krdc->group->diskq;
361 rdc_group_enter(krdc);
362 rdc_group_log(krdc, RDC_NOFLUSH | RDC_ALLREMOTE,
364 rdc_group_exit(krdc);
382 bzero(krdc->bitmap_ref, krdc->bitmap_size * BITS_IN_BYTE *
386 rdc_group_enter(krdc);
396 for (p = krdc->group_next; p != krdc; p = p->group_next) {
410 rdc_group_exit(krdc);
426 rdc_stamp_diskq(rdc_k_info_t *krdc, int rsrvd, int failflags)
435 grp = krdc->group;
436 q = &krdc->group->diskq;
440 urdc = &rdc_u_info[krdc->index];
446 rdc_fail_diskq(krdc, RDC_NOWAIT, failflags);
457 rdc_fail_diskq(krdc, RDC_NOWAIT, failflags);
483 rdc_fail_diskq(krdc, RDC_NOWAIT, failflags);
541 rdc_unfail_diskq(rdc_k_info_t *krdc)
544 rdc_u_info_t *q = &rdc_u_info[krdc->index];
545 rdc_group_t *group = krdc->group;
548 rdc_group_enter(krdc);
552 rdc_group_exit(krdc);
556 for (p = krdc->group_next; p != krdc; p = p->group_next) {
565 rdc_group_exit(krdc);
572 krdc->aux_state &= ~RDC_AUXSYNCIP;
573 if (rdc_stamp_diskq(krdc, 0, RDC_GROUP_LOCKED | RDC_DOLOG) < 0) {
591 krdc->aux_state |= RDC_AUXSYNCIP;
597 rdc_read_diskq_header(rdc_k_info_t *krdc)
601 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
603 if (krdc->group->diskqfd == NULL) {
613 header = &krdc->group->diskq.disk_hdr.h;
614 if (_rdc_rsrv_diskq(krdc->group)) {
618 rc = rdc_ns_io(krdc->group->diskqfd, NSC_RDBUF, 0,
621 _rdc_rlse_diskq(krdc->group);
639 rdc_stop_diskq_flusher(rdc_k_info_t *krdc)
646 group = krdc->group;
647 qp = &krdc->group->diskq;
661 rdc_group_exit(krdc);
666 rdc_group_enter(krdc);
680 rdc_enable_diskq(rdc_k_info_t *krdc)
685 group = krdc->group;
688 if (rdc_open_diskq(krdc) < 0)
694 if (rdc_stamp_diskq(krdc, 0, RDC_NOLOG) < 0) {
718 rdc_resume_diskq(rdc_k_info_t *krdc)
725 urdc = &rdc_u_info[krdc->index];
726 group = krdc->group;
729 if (rdc_open_diskq(krdc) < 0) {
738 if (rdc_read_diskq_header(krdc) < 0) {
815 if (rdc_stamp_diskq(krdc, 0, RDC_NOLOG) < 0) {
816 rdc_fail_diskq(krdc, RDC_NOWAIT, RDC_NOLOG);
842 rdc_suspend_diskq(rdc_k_info_t *krdc)
845 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
848 q = &krdc->group->diskq;
854 if ((krdc->group->rdc_thrnum) && (!IS_QSTATE(q, RDC_STOPPINGFLUSH))) {
856 rdc_stop_diskq_flusher(krdc);
860 krdc->group->diskq.disk_hdr.h.state &= ~RDC_SHUTDOWN_BAD;
861 krdc->group->diskq.disk_hdr.h.state |= RDC_SHUTDOWN_OK;
862 krdc->group->diskq.disk_hdr.h.state &= ~RDC_QBADRESUME;
865 if (krdc->group->rdc_thrnum) {
868 rdc_group_exit(krdc);
870 while (krdc->group->rdc_thrnum)
873 rdc_group_enter(krdc);
878 if ((rc = rdc_write_refcount(krdc)) < 0) {
879 rdc_group_exit(krdc);
890 krdc->group->diskq.disk_hdr.h.state &= ~RDC_QDISABLEPEND;
900 if (krdc->group->count > 1) {
901 rdc_group_exit(krdc);
904 rdc_group_exit(krdc); /* in case this stamp fails */
907 rc = rdc_stamp_diskq(krdc, 0, RDC_NOLOG);
912 rdc_group_enter(krdc);
959 rdc_qfill_shldwakeup(rdc_k_info_t *krdc)
961 rdc_group_t *group = krdc->group;
962 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
968 if (!RDC_IS_DISKQ(krdc->group))
1014 rdc_diskq_enqueue(rdc_k_info_t *krdc, rdc_aio_t *aio)
1034 urdc = &rdc_u_info[krdc->index];
1035 group = krdc->group;
1173 (void) rdc_writer(krdc->index);
1205 rdc_fail_diskq(krdc, RDC_WAIT,
1249 * if (krdc->io_kstats) {
1250 * mutex_enter(krdc->io_kstats->ks_lock);
1251 * kstat_waitq_enter(KSTAT_IO_PTR(krdc->io_kstats));
1252 * mutex_exit(krdc->io_kstats->ks_lock);
1261 rdc_fail_diskq(krdc, RDC_WAIT, RDC_DOLOG);
1284 rdc_fail_diskq(krdc, RDC_WAIT, RDC_DOLOG);
1312 rdc_fail_diskq(krdc, RDC_WAIT, RDC_DOLOG);
1349 if ((!krdc->group->rdc_writer) && !IS_STATE(urdc, RDC_LOGGING))
1350 (void) rdc_writer(krdc->index);
1407 rdc_clr_iohdr(rdc_k_info_t *krdc, nsc_size_t qpos)
1409 rdc_group_t *group = krdc->group;
1503 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1549 rdc_k_info_t *krdc = &rdc_k_info[aio->index];
1552 RDC_CHECK_BIT(krdc, aio->pos, aio->len);
1682 rdc_k_info_t *krdc = &rdc_k_info[index];
1886 rdc_fail_diskq(krdc, RDC_NOWAIT, RDC_DOLOG);
1913 rdc_k_info_t *krdc = &rdc_k_info[index];
1916 group = krdc->group;
2163 rdc_fail_diskq(krdc, RDC_NOWAIT, RDC_DOLOG);
2235 rdc_calc_len(rdc_k_info_t *krdc, disk_queue *dq)
2258 len = min(len, krdc->maxfbas);
2315 rdc_k_info_t *krdc = &rdc_k_info[index];
2317 rdc_group_t *group = krdc->group;
2387 len = rdc_calc_len(krdc, dq);
2502 rdc_fail_diskq(krdc, RDC_NOWAIT, RDC_DOLOG);
2514 rdc_dequeue(rdc_k_info_t *krdc, int *rc)
2516 net_queue *q = &krdc->group->ra_queue;
2517 disk_queue *dq = &krdc->group->diskq;
2518 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2579 if (rdc_qfill_shldwakeup(krdc))
2612 rdc_qfill_shldsleep(rdc_k_info_t *krdc)
2614 net_queue *nq = &krdc->group->ra_queue;
2615 disk_queue *dq = &krdc->group->diskq;
2616 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2620 if (!RDC_IS_DISKQ(krdc->group))
2626 krdc->index);
2654 " seq: %d seqack %d", krdc->group->seq,
2655 krdc->group->seqack);
2708 rdc_qfiller_thr(rdc_k_info_t *krdc)
2710 rdc_group_t *grp = krdc->group;
2711 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2714 int index = krdc->index;
2760 while (rdc_qfill_shldsleep(krdc)) {
2793 rdc_k_info_t *krdc, *kp;
2798 krdc = &rdc_k_info[index];
2800 group = krdc->group;
2819 rc = rdc_enable_diskq(krdc);
2826 RDC_ZERO_BITREF(krdc);
2827 for (kp = krdc->group_next; kp != krdc; kp = kp->group_next) {
2848 rdc_k_info_t *krdc, *this;
2864 krdc = &rdc_k_info[index];
2866 group = krdc->group;
2885 req_size = RDC_BITMAP_FBA + FBA_LEN(krdc->bitmap_size);
2886 req_size += FBA_LEN(krdc->bitmap_size * BITS_IN_BYTE);
2888 rc = _rdc_rsrv_devs(krdc, RDC_BMP, RDC_INTERNAL);
2899 (void) nsc_partsize(krdc->bitmapfd, &vol_size);
2901 _rdc_rlse_devs(krdc, RDC_BMP);
2910 krdc = krdc->group_next;
2911 urdc = &rdc_u_info[krdc->index];
2913 } while (krdc != this);
2939 rdc_group_enter(krdc);
2947 rdc_group_exit(krdc);
2953 _rdc_init_diskq(rdc_k_info_t *krdc)
2955 rdc_group_t *group = krdc->group;
2961 if (rdc_stamp_diskq(krdc, 0, RDC_NOLOG) < 0)
2978 rdc_k_info_t *krdc, *kp;
2996 krdc = &rdc_k_info[index];
2998 group = krdc->group;
3028 if (_rdc_init_diskq(krdc) < 0) {
3034 rdc_group_enter(krdc);
3037 for (kp = krdc->group_next; kp != krdc; kp = kp->group_next) {
3041 rdc_group_exit(krdc);
3059 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
3060 rdc_group_t *group = krdc->group;
3083 for (p = krdc->group_next; p != krdc; p = p->group_next) {
3109 rdc_k_info_t *krdc;
3124 krdc = &rdc_k_info[index];
3126 if (!RDC_IS_DISKQ(krdc->group)) {
3142 rdc_unintercept_diskq(krdc->group); /* stop protecting queue */
3143 rdc_group_enter(krdc); /* to prevent further flushing */
3145 rdc_group_exit(krdc);
3164 rdc_k_info_t *krdc;
3182 krdc = &rdc_k_info[index];
3190 krdc = krdc->group_next;
3191 urdc = &rdc_u_info[krdc->index];
3193 } while (krdc != this);
3198 if (!(group = krdc->group) || !(diskq = &group->diskq))
3222 if (!(group = krdc->group) || !(diskq = &group->diskq))