Lines Matching defs:eqp

98  *	allocates an errorq element from eqp->eq_bitmap and returns a valid
105 * eqp->eq_bitmap and reset the associated nv_alloc structure.
121 * Unused elements are kept in the free pool, managed by eqp->eq_bitmap.
126 * pointed to by eqp->eq_pend, and linked together using eqe_prev. This
130 * The processing list is a doubly-linked list pointed to by eqp->eq_phead
131 * (the oldest element) and eqp->eq_ptail (the newest element). The
280 errorq_intr(caddr_t eqp)
282 errorq_drain((errorq_t *)eqp);
297 errorq_t *eqp = kmem_alloc(sizeof (errorq_t), KM_SLEEP);
315 &ibc, NULL, errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) {
318 kmem_free(eqp, sizeof (errorq_t));
322 if ((eqp->eq_ksp = kstat_create("unix", 0, name, "errorq",
329 kmem_free(eqp, sizeof (errorq_t));
333 bcopy(&errorq_kstat_template, &eqp->eq_kstat,
335 eqp->eq_ksp->ks_data = &eqp->eq_kstat;
336 eqp->eq_ksp->ks_private = eqp;
337 kstat_install(eqp->eq_ksp);
339 (void) strncpy(eqp->eq_name, name, ERRORQ_NAMELEN);
340 eqp->eq_name[ERRORQ_NAMELEN] = '\0';
341 eqp->eq_func = func;
342 eqp->eq_private = private;
343 eqp->eq_data = kmem_alloc(qlen * size, KM_SLEEP);
344 eqp->eq_qlen = qlen;
345 eqp->eq_size = size;
346 eqp->eq_ipl = ipl;
347 eqp->eq_flags = flags | ERRORQ_ACTIVE;
348 eqp->eq_id = id;
349 mutex_init(&eqp->eq_lock, NULL, MUTEX_DEFAULT, NULL);
350 eqp->eq_elems = kmem_alloc(qlen * sizeof (errorq_elem_t), KM_SLEEP);
351 eqp->eq_phead = NULL;
352 eqp->eq_ptail = NULL;
353 eqp->eq_pend = NULL;
354 eqp->eq_dump = NULL;
355 eqp->eq_bitmap = kmem_zalloc(BT_SIZEOFMAP(qlen), KM_SLEEP);
356 eqp->eq_rotor = 0;
362 for (eep = eqp->eq_elems, data = eqp->eq_data; qlen > 1; qlen--) {
380 eqp->eq_next = errorq_list;
381 errorq_list = eqp;
384 return (eqp);
396 errorq_t *eqp;
399 eqp = errorq_create(name, func, private, qlen,
402 if (eqp == NULL)
405 mutex_enter(&eqp->eq_lock);
407 for (eep = eqp->eq_elems; qlen != 0; eep++, qlen--) {
413 mutex_exit(&eqp->eq_lock);
414 return (eqp);
426 errorq_destroy(errorq_t *eqp)
432 ASSERT(eqp != NULL);
433 eqp->eq_flags &= ~ERRORQ_ACTIVE;
434 errorq_drain(eqp);
440 if (p == eqp) {
450 if (eqp->eq_flags & ERRORQ_NVLIST) {
451 for (eep = eqp->eq_elems, i = 0; i < eqp->eq_qlen; i++, eep++) {
457 mutex_destroy(&eqp->eq_lock);
458 kstat_delete(eqp->eq_ksp);
460 if (eqp->eq_id != NULL)
461 ddi_remove_softintr(eqp->eq_id);
463 kmem_free(eqp->eq_elems, eqp->eq_qlen * sizeof (errorq_elem_t));
464 kmem_free(eqp->eq_bitmap, BT_SIZEOFMAP(eqp->eq_qlen));
465 kmem_free(eqp->eq_data, eqp->eq_qlen * eqp->eq_size);
467 kmem_free(eqp, sizeof (errorq_t));
522 errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
526 if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
534 if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
535 eqp->eq_rotor)) == -1) {
536 atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
539 BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
541 eqp->eq_rotor = i;
542 eep = &eqp->eq_elems[i];
547 ASSERT(len <= eqp->eq_size);
548 bcopy(data, eep->eqe_data, MIN(eqp->eq_size, len));
550 if (len < eqp->eq_size)
551 bzero((caddr_t)eep->eqe_data + len, eqp->eq_size - len);
554 old = eqp->eq_pend;
558 if (atomic_cas_ptr(&eqp->eq_pend, old, eep) == old)
562 atomic_inc_64(&eqp->eq_kstat.eqk_dispatched.value.ui64);
564 if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
565 ddi_trigger_softintr(eqp->eq_id);
585 errorq_drain(errorq_t *eqp)
589 ASSERT(eqp != NULL);
590 mutex_enter(&eqp->eq_lock);
604 while ((eep = eqp->eq_pend) != NULL) {
605 eqp->eq_ptail = eep;
608 if (atomic_cas_ptr(&eqp->eq_pend, eep, NULL) == eep)
617 ASSERT(eqp->eq_ptail == NULL);
618 mutex_exit(&eqp->eq_lock);
641 eqp->eq_phead = eep;
644 eqp->eq_ptail = NULL;
652 if (panicstr && (dep = eqp->eq_dump) != NULL) {
665 while ((eep = eqp->eq_phead) != NULL) {
666 eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
667 eqp->eq_kstat.eqk_logged.value.ui64++;
669 eqp->eq_phead = eep->eqe_next;
680 if (panicstr && (eqp->eq_flags & ERRORQ_NVLIST)) {
681 if (eqp->eq_dump == NULL)
682 dep = eqp->eq_dump = eep;
690 BT_ATOMIC_CLEAR(eqp->eq_bitmap, eep - eqp->eq_elems);
693 mutex_exit(&eqp->eq_lock);
706 errorq_t *eqp;
713 for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
715 (ddi_iblock_cookie_t)(uintptr_t)ipltospl(eqp->eq_ipl);
717 if (eqp->eq_id != NULL)
721 errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) {
723 "for queue %s", eqp->eq_ipl, eqp->eq_name);
726 eqp->eq_id = id;
727 errorq_drain(eqp);
743 errorq_t *eqp;
747 for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
748 if ((eqp->eq_flags & (ERRORQ_VITAL | ERRORQ_NVLIST)) != what)
751 loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64;
763 for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) {
764 if (eep == eqp->eq_ptail) {
765 ASSERT(eqp->eq_phead == NULL);
766 eqp->eq_ptail = NULL;
779 if (eqp->eq_phead == NULL && (eep = eqp->eq_ptail) != NULL) {
784 eqp->eq_phead = eep;
785 eqp->eq_ptail = NULL;
799 for (eep = eqp->eq_phead; eep != NULL; eep = nep) {
800 eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
801 eqp->eq_kstat.eqk_logged.value.ui64++;
812 if (eqp->eq_flags & ERRORQ_NVLIST) {
813 if (eqp->eq_dump == NULL)
814 dep = eqp->eq_dump = eep;
822 BT_ATOMIC_CLEAR(eqp->eq_bitmap, eep - eqp->eq_elems);
831 errorq_drain(eqp);
833 logged += eqp->eq_kstat.eqk_logged.value.ui64 - loggedtmp;
865 errorq_reserve(errorq_t *eqp)
869 if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
877 if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
878 eqp->eq_rotor)) == -1) {
879 atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
882 BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
884 eqp->eq_rotor = i;
885 eqep = &eqp->eq_elems[i];
890 if (eqp->eq_flags & ERRORQ_NVLIST) {
896 atomic_inc_64(&eqp->eq_kstat.eqk_reserved.value.ui64);
906 errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
910 if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
911 atomic_inc_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64);
916 old = eqp->eq_pend;
920 if (atomic_cas_ptr(&eqp->eq_pend, old, eqep) == old)
924 atomic_inc_64(&eqp->eq_kstat.eqk_committed.value.ui64);
926 if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
927 ddi_trigger_softintr(eqp->eq_id);
935 errorq_cancel(errorq_t *eqp, errorq_elem_t *eqep)
937 if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE))
940 BT_ATOMIC_CLEAR(eqp->eq_bitmap, eqep - eqp->eq_elems);
942 atomic_inc_64(&eqp->eq_kstat.eqk_cancelled.value.ui64);
953 errorq_t *eqp;
958 for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
959 if (!(eqp->eq_flags & ERRORQ_NVLIST) ||
960 !(eqp->eq_flags & ERRORQ_ACTIVE))
963 for (eep = eqp->eq_dump; eep != NULL; eep = eep->eqe_dump) {
975 eqp->eq_name, (void *)eep, len);
984 eqp->eq_name, (void *)eep, err);
1004 errorq_elem_nvl(errorq_t *eqp, const errorq_elem_t *eqep)
1008 ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST);
1014 errorq_elem_nva(errorq_t *eqp, const errorq_elem_t *eqep)
1018 ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST);
1027 errorq_elem_dup(errorq_t *eqp, const errorq_elem_t *eqep, errorq_elem_t **neqep)
1029 ASSERT(eqp->eq_flags & ERRORQ_ACTIVE);
1030 ASSERT(!(eqp->eq_flags & ERRORQ_NVLIST));
1032 if ((*neqep = errorq_reserve(eqp)) == NULL)
1035 bcopy(eqep->eqe_data, (*neqep)->eqe_data, eqp->eq_size);