void flush_tlb_mm(struct mm_struct *mm) { int i; int page_id = mm->context; unsigned long flags; D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); if(page_id == NO_CONTEXT) return; /* mark the TLB entries that match the page_id as invalid. * here we could also check the _PAGE_GLOBAL bit and NOT flush * global pages. is it worth the extra I/O ? */ save_and_cli(flags); /* flush needs to be atomic */ for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } restore_flags(flags); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int page_id = mm->context.page_id; int i; unsigned long flags; D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); if(page_id == NO_CONTEXT) return; addr &= PAGE_MASK; local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { unsigned long tlb_hi; *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); tlb_hi = *R_TLB_HI; if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && (tlb_hi & PAGE_MASK) == addr) { *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | addr; *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } local_irq_restore(flags); }
void flush_tlb_mm(struct mm_struct *mm) { int i; int page_id = mm->context.page_id; unsigned long flags; D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); if(page_id == NO_CONTEXT) return; local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } local_irq_restore(flags); }
void handle_mmu_bus_fault(struct pt_regs *regs) { int cause; int select; #ifdef DEBUG int index; int page_id; int acc, inv; #endif pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); pmd_t *pmd; pte_t pte; int miss, we, writeac; unsigned long address; unsigned long flags; cause = *R_MMU_CAUSE; address = cause & PAGE_MASK; /* get faulting address */ select = *R_TLB_SELECT; #ifdef DEBUG page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause); acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause); inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause); index = IO_EXTRACT(R_TLB_SELECT, index, select); #endif miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause); we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause); writeac = IO_EXTRACT(R_MMU_CAUSE, wr_rd, cause); D(printk("bus_fault from IRP 0x%lx: addr 0x%lx, miss %d, inv %d, we %d, acc %d, dx %d pid %d\n", regs->irp, address, miss, inv, we, acc, index, page_id)); /* leave it to the MM system fault handler */ if (miss) do_page_fault(address, regs, 0, writeac); else do_page_fault(address, regs, 1, we); /* Reload TLB with new entry to avoid an extra miss exception. * do_page_fault may have flushed the TLB so we have to restore * the MMU registers. */ local_save_flags(flags); local_irq_disable(); pmd = (pmd_t *)(pgd + pgd_index(address)); if (pmd_none(*pmd)) goto exit; pte = *pte_offset_kernel(pmd, address); if (!pte_present(pte)) goto exit; *R_TLB_SELECT = select; *R_TLB_HI = cause; *R_TLB_LO = pte_val(pte); exit: local_irq_restore(flags); }
static unsigned char parport_etrax_read_control( struct parport *p) { unsigned char ret = 0; struct etrax100par_struct *info = (struct etrax100par_struct *)p->private_data; if (IO_EXTRACT(R_PAR0_CTRL_DATA, strb, info->reg_ctrl_data_shadow)) ret |= PARPORT_CONTROL_STROBE; if (IO_EXTRACT(R_PAR0_CTRL_DATA, autofd, info->reg_ctrl_data_shadow)) ret |= PARPORT_CONTROL_AUTOFD; if (!IO_EXTRACT(R_PAR0_CTRL_DATA, init, info->reg_ctrl_data_shadow)) ret |= PARPORT_CONTROL_INIT; if (IO_EXTRACT(R_PAR0_CTRL_DATA, seli, info->reg_ctrl_data_shadow)) ret |= PARPORT_CONTROL_SELECT; DPRINTK("* E100 PP %d: etrax_read_control %02x\n", p->portnum, ret); return ret; }
static unsigned char parport_etrax_read_data(struct parport *p) { unsigned char ret; struct etrax100par_struct *info = (struct etrax100par_struct *)p->private_data; ret = IO_EXTRACT(R_PAR0_STATUS_DATA, data, *info->reg_status_data); DPRINTK("* E100 PP %d: etrax_read_data %02X\n", p->portnum, ret); return ret; }
void handle_mmu_bus_fault(struct pt_regs *regs) { int cause; int select; #ifdef DEBUG int index; int page_id; int acc, inv; #endif pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); pmd_t *pmd; pte_t pte; int miss, we, writeac; unsigned long address; unsigned long flags; cause = *R_MMU_CAUSE; address = cause & PAGE_MASK; select = *R_TLB_SELECT; #ifdef DEBUG page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause); acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause); inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause); index = IO_EXTRACT(R_TLB_SELECT, index, select); #endif miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause); we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause); writeac = IO_EXTRACT(R_MMU_CAUSE, wr_rd, cause); D(printk("bus_fault from IRP 0x%lx: addr 0x%lx, miss %d, inv %d, we %d, acc %d, dx %d pid %d\n", regs->irp, address, miss, inv, we, acc, index, page_id)); if (miss) do_page_fault(address, regs, 0, writeac); else do_page_fault(address, regs, 1, we); local_irq_save(flags); pmd = (pmd_t *)(pgd + pgd_index(address)); if (pmd_none(*pmd)) goto exit; pte = *pte_offset_kernel(pmd, address); if (!pte_present(pte)) goto exit; *R_TLB_SELECT = select; *R_TLB_HI = cause; *R_TLB_LO = pte_val(pte); exit: local_irq_restore(flags); }
static unsigned char parport_etrax_read_status(struct parport *p) { unsigned char ret = 0; struct etrax100par_struct *info = (struct etrax100par_struct *)p->private_data; if (IO_EXTRACT(R_PAR0_STATUS_DATA, fault, *info->reg_status_data)) ret |= PARPORT_STATUS_ERROR; if (IO_EXTRACT(R_PAR0_STATUS_DATA, sel, *info->reg_status_data)) ret |= PARPORT_STATUS_SELECT; if (IO_EXTRACT(R_PAR0_STATUS_DATA, perr, *info->reg_status_data)) ret |= PARPORT_STATUS_PAPEROUT; if (IO_EXTRACT(R_PAR0_STATUS_DATA, ack, *info->reg_status_data)) ret |= PARPORT_STATUS_ACK; if (!IO_EXTRACT(R_PAR0_STATUS_DATA, busy, *info->reg_status_data)) ret |= PARPORT_STATUS_BUSY; DPRINTK("* E100 PP %d: status register %04x\n", p->portnum, *info->reg_status_data); DPRINTK("* E100 PP %d: read_status %02x\n", p->portnum, ret); return ret; }
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int page_id = mm->context; int i; unsigned long flags; D(printk("tlb: flush range %p<->%p in context %d (%p)\n", start, end, page_id, mm)); if(page_id == NO_CONTEXT) return; start &= PAGE_MASK; /* probably not necessary */ end &= PAGE_MASK; /* dito */ /* invalidate those TLB entries that match both the mm context * and the virtual address range */ save_and_cli(flags); /* flush needs to be atomic */ for(i = 0; i < NUM_TLB_ENTRIES; i++) { unsigned long tlb_hi, vpn; *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); tlb_hi = *R_TLB_HI; vpn = tlb_hi & PAGE_MASK; if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && vpn >= start && vpn < end) { *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } restore_flags(flags); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int page_id = mm->context; int i; unsigned long flags; D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); if(page_id == NO_CONTEXT) return; addr &= PAGE_MASK; /* perhaps not necessary */ /* invalidate those TLB entries that match both the mm context * and the virtual address requested */ save_and_cli(flags); /* flush needs to be atomic */ for(i = 0; i < NUM_TLB_ENTRIES; i++) { unsigned long tlb_hi; *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); tlb_hi = *R_TLB_HI; if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && (tlb_hi & PAGE_MASK) == addr) { *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | addr; /* same addr as before works. */ *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } restore_flags(flags); }
void cris_free_dma(unsigned int dmanr, const char * device_id) { unsigned long flags; if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) { printk(KERN_CRIT "cris_free_dma: invalid DMA channel %u\n", dmanr); return; } local_irq_save(flags); if (!used_dma_channels[dmanr]) { printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated\n", dmanr); } else if (device_id != used_dma_channels_users[dmanr]) { printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated by device\n", dmanr); } else { switch(dmanr) { case 0: *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH0_CMD, cmd, *R_DMA_CH0_CMD) == IO_STATE_VALUE(R_DMA_CH0_CMD, cmd, reset)); break; case 1: *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH1_CMD, cmd, *R_DMA_CH1_CMD) == IO_STATE_VALUE(R_DMA_CH1_CMD, cmd, reset)); break; case 2: *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH2_CMD, cmd, *R_DMA_CH2_CMD) == IO_STATE_VALUE(R_DMA_CH2_CMD, cmd, reset)); break; case 3: *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH3_CMD, cmd, *R_DMA_CH3_CMD) == IO_STATE_VALUE(R_DMA_CH3_CMD, cmd, reset)); break; case 4: *R_DMA_CH4_CMD = IO_STATE(R_DMA_CH4_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH4_CMD, cmd, *R_DMA_CH4_CMD) == IO_STATE_VALUE(R_DMA_CH4_CMD, cmd, reset)); break; case 5: *R_DMA_CH5_CMD = IO_STATE(R_DMA_CH5_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH5_CMD, cmd, *R_DMA_CH5_CMD) == IO_STATE_VALUE(R_DMA_CH5_CMD, cmd, reset)); break; case 6: *R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *R_DMA_CH6_CMD) == IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset)); break; case 7: *R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH7_CMD, cmd, *R_DMA_CH7_CMD) == IO_STATE_VALUE(R_DMA_CH7_CMD, cmd, reset)); break; case 8: *R_DMA_CH8_CMD = IO_STATE(R_DMA_CH8_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH8_CMD, cmd, *R_DMA_CH8_CMD) == IO_STATE_VALUE(R_DMA_CH8_CMD, cmd, reset)); break; case 9: *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH9_CMD, cmd, *R_DMA_CH9_CMD) == IO_STATE_VALUE(R_DMA_CH9_CMD, cmd, reset)); break; } used_dma_channels[dmanr] = 0; } local_irq_restore(flags); }