Lines Matching refs:vaddr

502                                     target_ulong vaddr);
1572 wp->vaddr = addr;
1597 if (addr == wp->vaddr && len_mask == wp->len_mask
1615 tlb_flush_page(env, watchpoint->vaddr);
2066 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
2200 target_ulong vaddr)
2337 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2339 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2340 tlb_entry->addr_write = vaddr;
2343 /* update the TLB corresponding to virtual page vaddr
2345 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2350 vaddr &= TARGET_PAGE_MASK;
2351 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2353 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2358 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2364 env->tlb_flush_addr = vaddr & mask;
2372 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2382 void tlb_set_page(CPUState *env, target_ulong vaddr,
2401 tlb_add_large_page(env, vaddr, size);
2410 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d size=" TARGET_FMT_lx " pd=0x%08lx\n",
2411 vaddr, (int)paddr, prot, mmu_idx, size, (long)pd);
2414 address = vaddr;
2481 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2491 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2492 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2494 te->addend = addend - vaddr;
2529 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2534 remR3FlushPage(env, vaddr);
2809 uintptr_t addr, target_ulong vaddr)
3465 target_ulong vaddr;
3476 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3478 if ((vaddr == (wp->vaddr & len_mask) ||
3479 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {