Lines Matching defs:rqp

115 	struct smb_rq *rqp,
119 rqp->sr_flags |= flags;
120 rqp->sr_lerror = error;
121 rqp->sr_rpgen++;
122 rqp->sr_state = SMBRQ_NOTIFIED;
123 cv_broadcast(&rqp->sr_cond);
128 struct smb_rq *rqp,
133 SMBRQ_LOCK(rqp);
134 smb_iod_rqprocessed_LH(rqp, error, flags);
135 SMBRQ_UNLOCK(rqp);
141 struct smb_rq *rqp;
147 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
148 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
201 smb_iod_sendrq(struct smb_rq *rqp)
203 struct smb_vc *vcp = rqp->sr_vc;
225 if (rqp->sr_sendcnt == 0) {
227 rqp->sr_mid = vcp->vc_next_mid++;
229 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
236 rqp->sr_seqno = vcp->vc_next_seq++;
237 rqp->sr_rseqno = vcp->vc_next_seq++;
241 smb_rq_fillhdr(rqp);
247 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
248 smb_rq_sign(rqp);
251 if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
252 smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
263 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
265 m = copymsg(rqp->sr_rq.mb_top);
269 (smb_rq_t *), rqp, (mblk_t *), m);
271 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
281 rqp->sr_lerror = error;
283 SMBRQ_LOCK(rqp);
284 rqp->sr_flags |= SMBR_SENT;
285 rqp->sr_state = SMBRQ_SENT;
286 if (rqp->sr_flags & SMBR_SENDWAIT)
287 cv_broadcast(&rqp->sr_cond);
288 SMBRQ_UNLOCK(rqp);
305 /* If proc waiting on rqp was signaled... */
306 if (smb_rq_intr(rqp))
307 smb_iod_rqprocessed(rqp, EINTR, 0);
360 struct smb_rq *rqp;
490 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
492 if (rqp->sr_mid != mid)
496 (smb_rq_t *), rqp, (mblk_t *), m);
499 SMBRQ_LOCK(rqp);
500 if (rqp->sr_rp.md_top == NULL) {
501 md_initm(&rqp->sr_rp, m);
503 if (rqp->sr_flags & SMBR_MULTIPACKET) {
504 md_append_record(&rqp->sr_rp, m);
506 SMBRQ_UNLOCK(rqp);
512 smb_iod_rqprocessed_LH(rqp, 0, 0);
513 SMBRQ_UNLOCK(rqp);
517 if (rqp == NULL) {
566 smb_iod_addrq(struct smb_rq *rqp)
568 struct smb_vc *vcp = rqp->sr_vc;
571 ASSERT(rqp->sr_cred);
590 rqp->sr_owner = curthread;
591 if (rqp->sr_owner == vcp->iod_thr) {
592 rqp->sr_flags |= SMBR_INTERNAL;
600 TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
613 error = smb_iod_sendrq(rqp);
625 smb_iod_removerq(rqp);
632 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
657 smb_iod_multirq(struct smb_rq *rqp)
659 struct smb_vc *vcp = rqp->sr_vc;
662 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
664 if (rqp->sr_flags & SMBR_INTERNAL)
675 rqp->sr_state = SMBRQ_NOTSENT;
697 smb_iod_removerq(struct smb_rq *rqp)
699 struct smb_vc *vcp = rqp->sr_vc;
708 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
710 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
724 smb_iod_waitrq(struct smb_rq *rqp)
726 struct smb_vc *vcp = rqp->sr_vc;
730 if (rqp->sr_flags & SMBR_INTERNAL) {
731 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
732 smb_iod_removerq(rqp);
742 SMBRQ_LOCK(rqp);
758 while (rqp->sr_state == SMBRQ_NOTSENT) {
759 rqp->sr_flags |= SMBR_SENDWAIT;
760 if (rqp->sr_flags & SMBR_NOINTR_SEND) {
761 cv_wait(&rqp->sr_cond, &rqp->sr_lock);
764 rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
765 rqp->sr_flags &= ~SMBR_SENDWAIT;
767 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
779 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
783 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
794 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
795 if (rqp->sr_flags & SMBR_NOINTR_RECV)
796 tr = cv_reltimedwait(&rqp->sr_cond,
797 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
799 tr = cv_reltimedwait_sig(&rqp->sr_cond,
800 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
808 (smb_rq_t *), rqp);
815 rqp->sr_mid, smb_timo_notice);
823 while (rqp->sr_rpgen == rqp->sr_rplast) {
824 if (rqp->sr_flags & SMBR_NOINTR_RECV)
825 tr = cv_timedwait(&rqp->sr_cond,
826 &rqp->sr_lock, tmo2);
828 tr = cv_timedwait_sig(&rqp->sr_cond,
829 &rqp->sr_lock, tmo2);
837 (smb_rq_t *), rqp);
844 rqp->sr_mid, rqp->sr_timo);
851 error = rqp->sr_lerror;
852 rqp->sr_rplast++;
855 SMBRQ_UNLOCK(rqp);
861 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
862 smb_iod_removerq(rqp);
885 struct smb_rq *rqp;
892 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
893 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
894 smb_iod_rqprocessed(rqp, EIO, 0);
906 struct smb_rq *rqp;
937 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
944 if (rqp->sr_state == SMBRQ_NOTSENT) {
945 error = smb_iod_sendrq(rqp);