Lines Matching defs:immu

36 #include <sys/immu.h>
302 init_unit(immu_t *immu)
334 ASSERT(IMMU_ECAP_GET_QI(immu->immu_regs_excap));
337 if (!IMMU_ECAP_GET_EIM(immu->immu_regs_excap)) {
348 if (ddi_dma_alloc_handle(immu->immu_dip,
383 immu->immu_intrmap = intrmap;
391 immu_t *immu = NULL;
394 immu = immu_dmar_ioapic_immu(ioapic_index);
397 immu = immu_dmar_get_immu(dip);
400 return (immu);
479 intrmap_enable(immu_t *immu)
484 intrmap = immu->immu_intrmap;
491 immu_regs_intrmap_enable(immu, irta_reg);
498 * the fault event handler for a single immu unit
501 immu_intr_handler(immu_t *immu)
509 mutex_enter(&(immu->immu_intr_lock));
510 mutex_enter(&(immu->immu_regs_lock));
513 status = immu_regs_get32(immu, IMMU_REG_FAULT_STS);
515 idip = immu->immu_dip;
518 /* check if we have a pending fault for this immu unit */
520 mutex_exit(&(immu->immu_regs_lock));
521 mutex_exit(&(immu->immu_intr_lock));
529 max_fault_index = IMMU_CAP_GET_NFR(immu->immu_regs_cap) - 1;
530 fault_reg_offset = IMMU_CAP_GET_FRO(immu->immu_regs_cap);
543 val = immu_regs_get64(immu, fault_reg_offset + index * 16 + 8);
558 val = immu_regs_get64(immu, fault_reg_offset + index * 16);
563 immu_regs_put32(immu, fault_reg_offset + index * 16 + 12,
568 /* immu-remapping fault */
604 immu_regs_put32(immu, IMMU_REG_FAULT_STS, 1);
605 mutex_exit(&(immu->immu_regs_lock));
606 mutex_exit(&(immu->immu_intr_lock));
619 immu_t *immu;
628 immu = list_head(&immu_list);
629 for (; immu; immu = list_next(&immu_list, immu)) {
630 if ((immu->immu_intrmap_running == B_TRUE) &&
631 IMMU_ECAP_GET_IR(immu->immu_regs_excap)) {
632 if (init_unit(immu) == DDI_SUCCESS) {
651 immu_t *immu;
656 immu = list_head(&immu_list);
657 for (; immu; immu = list_next(&immu_list, immu)) {
658 if (immu->immu_intrmap_setup == B_TRUE) {
659 intrmap_enable(immu);
669 immu_t *immu;
685 immu = get_immu(dip, type, ioapic_index);
686 if ((immu != NULL) && (immu->immu_intrmap_running == B_TRUE)) {
687 intrmap_private->ir_immu = immu;
692 intrmap = immu->immu_intrmap;
712 if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) {
713 immu_qinv_intr_one_cache(immu, idx, iwp);
715 immu_regs_wbf_flush(immu);
724 INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_immu = immu;
730 if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) {
731 immu_qinv_intr_caches(immu, idx, count, iwp);
733 immu_regs_wbf_flush(immu);
749 immu_t *immu;
763 immu = INTRMAP_PRIVATE(intrmap_private)->ir_immu;
765 intrmap = immu->immu_intrmap;
805 immu_qinv_intr_one_cache(immu, idx, iwp);
820 immu_qinv_intr_caches(immu, idx, count, iwp);
828 immu_t *immu;
838 immu = INTRMAP_PRIVATE(*intrmap_privatep)->ir_immu;
840 intrmap = immu->immu_intrmap;
846 immu_qinv_intr_one_cache(immu, idx, iwp);
916 immu_t *immu;
935 immu = list_head(listp);
936 for (; immu; immu = list_next(listp, immu)) {
937 mutex_init(&(immu->immu_intrmap_lock), NULL,
939 mutex_enter(&(immu->immu_intrmap_lock));
940 immu_init_inv_wait(&immu->immu_intrmap_inv_wait,
942 immu->immu_intrmap_setup = B_TRUE;
943 mutex_exit(&(immu->immu_intrmap_lock));
948 immu_intrmap_startup(immu_t *immu)
951 mutex_enter(&(immu->immu_intrmap_lock));
952 if (immu->immu_intrmap_setup == B_TRUE) {
953 immu->immu_intrmap_running = B_TRUE;
955 mutex_exit(&(immu->immu_intrmap_lock));
963 immu_intr_register(immu_t *immu)
995 "%s-intr-handler", immu->immu_name);
999 (caddr_t)immu, NULL, NULL, NULL);
1001 immu_regs_intr_enable(immu, msi_addr, msi_data, uaddr);
1003 (void) immu_intr_handler(immu);