Lines Matching defs:idCpu

140     RTCPUID             idCpu;
306 static int gvmmR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM, PGVMM *ppGVMM);
408 pGVMM->aHostCpus[iCpu].idCpu = RTMpCpuIdFromSetIndex(iCpu);
434 pGVMM->aHostCpus[iCpu].idCpu = NIL_RTCPUID;
984 int rc = gvmmR0ByVMAndEMT(pVM, 0 /* idCpu */, &pGVM, &pGVMM);
1021 int rc = gvmmR0ByVMAndEMT(pVM, 0 /* idCpu */, &pGVM, &pGVMM);
1045 int rc = gvmmR0ByVMAndEMT(pVM, 0 /* idCpu */, &pGVM, &pGVMM);
1119 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1125 VMMR0ThreadCtxHooksRelease(&pVM->aCpus[idCpu]);
1331 * @param idCpu VCPU id.
1333 GVMMR0DECL(int) GVMMR0RegisterVCpu(PVM pVM, VMCPUID idCpu)
1335 AssertReturn(idCpu != 0, VERR_NOT_OWNER);
1346 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
1347 AssertReturn(pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD, VERR_ACCESS_DENIED);
1349 Assert(pVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD);
1351 pVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = RTThreadNativeSelf();
1353 rc = VMMR0ThreadCtxHooksCreate(&pVM->aCpus[idCpu]);
1492 * @param idCpu The Virtual CPU ID of the calling EMT.
1499 static int gvmmR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM, PGVMM *ppGVMM)
1527 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
1528 AssertReturn(pGVM->aCpus[idCpu].hEMT == hAllegedEMT, VERR_NOT_OWNER);
1542 * @param idCpu The Virtual CPU ID of the calling EMT.
1546 GVMMR0DECL(int) GVMMR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM)
1550 return gvmmR0ByVMAndEMT(pVM, idCpu, ppGVM, &pGVMM);
1613 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
1614 if (pGVM->aCpus[idCpu].hEMT == hEMT)
1667 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
1669 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
1708 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
1710 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
1738 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
1740 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
1773 * @param idCpu The Virtual CPU ID of the calling EMT.
1775 * @thread EMT(idCpu).
1777 GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
1786 int rc = gvmmR0ByVMAndEMT(pVM, idCpu, &pGVM, &pGVMM);
1791 PGVMCPU pCurGVCpu = &pGVM->aCpus[idCpu];
1908 * @param idCpu The Virtual CPU ID of the EMT to wake up.
1912 GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PVM pVM, VMCPUID idCpu, bool fTakeUsedLock)
1922 if (idCpu < pGVM->cCpus)
1927 rc = gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
1962 * @param idCpu The Virtual CPU ID of the EMT to wake up.
1965 GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM, VMCPUID idCpu)
1967 return GVMMR0SchedWakeUpEx(pVM, idCpu, true /* fTakeUsedLock */);
2006 * @param idCpu The ID of the virtual CPU to poke.
2009 GVMMR0DECL(int) GVMMR0SchedPokeEx(PVM pVM, VMCPUID idCpu, bool fTakeUsedLock)
2019 if (idCpu < pGVM->cCpus)
2020 rc = gvmmR0SchedPokeOne(pGVM, &pVM->aCpus[idCpu]);
2044 * @param idCpu The ID of the virtual CPU to poke.
2046 GVMMR0DECL(int) GVMMR0SchedPoke(PVM pVM, VMCPUID idCpu)
2048 return GVMMR0SchedPokeEx(pVM, idCpu, true /* fTakeUsedLock */);
2076 VMCPUID idCpu = pGVM->cCpus;
2077 while (idCpu-- > 0)
2080 if (pGVM->aCpus[idCpu].hEMT == hSelf)
2084 if (VMCPUSET_IS_PRESENT(pSleepSet, idCpu))
2085 gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2086 else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu))
2087 gvmmR0SchedPokeOne(pGVM, &pVM->aCpus[idCpu]);
2128 * @param idCpu The Virtual CPU ID of the calling EMT.
2132 * @thread EMT(idCpu).
2134 GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, VMCPUID idCpu, bool fYield)
2141 int rc = gvmmR0ByVMAndEMT(pVM, idCpu, &pGVM, &pGVMM);
2279 && pCpu->idCpu == idHostCpu,
2280 ("u32Magic=%#x idCpu=% idHostCpu=%d\n", pCpu->u32Magic, pCpu->idCpu, idHostCpu));
2422 if (pGVMM->aHostCpus[iSrcCpu].idCpu != NIL_RTCPUID)
2424 pStats->aHostCpus[iDstCpu].idCpu = pGVMM->aHostCpus[iSrcCpu].idCpu;