/ mb(); *(vip)CIA_IOC_CIA_CTRL = ctrl; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); } static inline void cia_prepare_tbia_workaround(int window) { unsigned long *ppte, pte; long i; /* Use minimal 1K map. */ ppte = memblock_alloc_or_panic(CIA_BROKEN_TBIA_SIZE, 32768); pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) ppte[i] = pte; *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3; *(vip)CIA_IOC_PCI_Wn_MASK(window) = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000; *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2; } static void __init verify_tb_operation(void) { static int page[PAGE_SIZE/4] __attribute__((aligned(PAGE_SIZE))) __initdata = { 0 }; struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; int ctrl, addr0, tag0, pte0, data0; int temp, use_tbia_try2 = 0; void __iomem *bus_addr; /* pyxis -- tbia is broken */ if (pci_isa_hose->dense_io_base) use_tbia_try2 = 1; /* Put the chip into PCI loopback mode. */ mb(); ctrl = *(vip)CIA_IOC_CIA_CTRL; *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); /* Write a valid entry directly into the TLB registers. */ addr0 = arena->dma_base; tag0 = addr0 | 1; pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1; *(vip)CIA_IOC_TB_TAGn(0) = tag0; *(vip)CIA_IOC_TB_TAGn(1) = 0; *(vip)CIA_IOC_TB_TAGn(2) = 0; *(vip)CIA_IOC_TB_TAGn(3) = 0; *(vip)CIA_IOC_TB_TAGn(4) = 0; *(vip)CIA_IOC_TB_TAGn(5) = 0; *(vip)CIA_IOC_TB_TAGn(6) = 0; *(vip)CIA_IOC_TB_TAGn(7) = 0; *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0; *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0; *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0; *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0; mb(); /* Get a usable bus address */ bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE); /* First, verify we can read back what we've written. If this fails, we can't be sure of any of the other testing we're going to do, so bail. */ /* ??? Actually, we could do the work with machine checks. By passing this register update test, we pretty much guarantee that cia_pci_tbi_try1 works. If this test fails, cia_pci_tbi_try2 might still work. */ temp = *(vip)CIA_IOC_TB_TAGn(0); if (temp != tag0) { printk("pci: failed tb register update test " "(tag0 %#x != %#x)\n", temp, tag0); goto failed; } temp = *(vip)CIA_IOC_TB_TAGn(1); if (temp != 0) { printk("pci: failed tb register update test " "(tag1 %#x != 0)\n", temp); goto failed; } temp = *(vip)CIA_IOC_TBn_PAGEm(0,0); if (temp != pte0) { printk("pci: failed tb register update test " "(pte0 %#x != %#x)\n", temp, pte0); goto failed; } printk("pci: passed tb register update test\n"); /* Second, verify we can actually do I/O through this entry. */ data0 = 0xdeadbeef; page[0] = data0; mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr); mb(); mcheck_expected(0) = 0; mb(); if (mcheck_taken(0)) { printk("pci: failed sg loopback i/o read test (mcheck)\n"); goto failed; } if (temp != data0) { printk("pci: failed sg loopback i/o read test " "(%#x != %#x)\n", temp, data0); goto failed; } printk("pci: passed sg loopback i/o read test\n"); /* Third, try to invalidate the TLB. */ if (! use_tbia_try2) { cia_pci_tbi(arena->hose, 0, -1); temp = *(vip)CIA_IOC_TB_TAGn(0); if (temp & 1) { use_tbia_try2 = 1; printk("pci: failed tbia test; workaround available\n"); } else { printk("pci: passed tbia test\n"); } } /* Fourth, verify the TLB snoops the EV5's caches when doing a tlb fill. */ data0 = 0x5adda15e; page[0] = data0; arena->ptes[4] = pte0; mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr + 4*PAGE_SIZE); mb(); mcheck_expected(0) = 0; mb(); if (mcheck_taken(0)) { printk("pci: failed pte write cache snoop test (mcheck)\n"); goto failed; } if (temp != data0) { printk("pci: failed pte write cache snoop test " "(%#x != %#x)\n", temp, data0); goto failed; } printk("pci: passed pte write cache snoop test\n"); /* Fifth, verify that a previously invalid PTE entry gets filled from the page table. */ data0 = 0xabcdef12; page[0] = data0; arena->ptes[5] = pte0; mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr + 5*PAGE_SIZE); mb(); mcheck_expected(0) = 0; mb(); if (mcheck_taken(0)) { printk("pci: failed valid tag invalid pte reload test " "(mcheck; workaround available)\n"); /* Work around this bug by aligning new allocations on 4 page boundaries. */ arena->align_entry = 4; } else if (temp != data0) { printk("pci: failed valid tag invalid pte reload test " "(%#x != %#x)\n", temp, data0); goto failed; } else { printk("pci: passed valid tag invalid pte reload test\n"); } /* Sixth, verify machine checks are working. Test invalid pte under the same valid tag as we used above. */ mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr + 6*PAGE_SIZE); mb(); mcheck_expected(0) = 0; mb(); printk("pci: %s pci machine check test\n", mcheck_taken(0) ? "passed" : "failed"); /* Clean up after the tests. */ arena->ptes[4] = 0; arena->ptes[5] = 0; if (use_tbia_try2) { alpha_mv.mv_pci_tbi = cia_pci_tbi_try2; /* Tags 0-3 must be disabled if we use this workaround. */ wmb(); *(vip)CIA_IOC_TB_TAGn(0) = 2; *(vip)CIA_IOC_TB_TAGn(1) = 2; *(vip)CIA_IOC_TB_TAGn(2) = 2; *(vip)CIA_IOC_TB_TAGn(3) = 2; printk("pci: tbia workaround enabled\n"); } alpha_mv.mv_pci_tbi(arena->hose, 0, -1); exit: /* unmap the bus addr */ cia_iounmap(bus_addr); /* Restore normal PCI operation. */ mb(); *(vip)CIA_IOC_CIA_CTRL = ctrl; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); return; failed: printk("pci: disabling sg translation window\n"); *(vip)CIA_IOC_PCI_W0_BASE = 0; *(vip)CIA_IOC_PCI_W1_BASE = 0; pci_isa_hose->sg_isa = NULL; alpha_mv.mv_pci_tbi = NULL; goto exit; }