static void __pmac pmac_set_irq_mask(unsigned int irq_nr) { unsigned long bit = 1UL << (irq_nr & 0x1f); int i = irq_nr >> 5; if ((unsigned)irq_nr >= max_irqs) return; /* enable unmasked interrupts */ out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); do { /* make sure mask gets to controller before we return to user */ mb(); } while((in_le32(&pmac_irq_hw[i]->enable) & bit) != (ppc_cached_irq_mask[i] & bit)); /* * Unfortunately, setting the bit in the enable register * when the device interrupt is already on *doesn't* set * the bit in the flag register or request another interrupt. */ if ((bit & ppc_cached_irq_mask[i]) && (ld_le32(&pmac_irq_hw[i]->level) & bit) && !(ld_le32(&pmac_irq_hw[i]->flag) & bit)) { if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) atomic_inc(&ppc_n_lost_interrupts); } }
void __init ibm_prep_init(void) { if (have_residual_data) { u32 addr, real_addr, len, offset; PPC_DEVICE *mpic; PnP_TAG_PACKET *pkt; /* Use the PReP residual data to determine if an OpenPIC is * present. If so, get the large vendor packet which will * tell us the base address and length in memory. * If we are successful, ioremap the memory area and set * OpenPIC_Addr (this indicates that the OpenPIC was found). */ mpic = residual_find_device(-1, NULL, SystemPeripheral, ProgrammableInterruptController, MPIC, 0); if (!mpic) return; pkt = PnP_find_large_vendor_packet(res->DevicePnPHeap + mpic->AllocatedOffset, 9, 0); if (!pkt) return; #define p pkt->L4_Pack.L4_Data.L4_PPCPack if (p.PPCData[1] == 32) { switch (p.PPCData[0]) { case 1: offset = PREP_ISA_IO_BASE; break; case 2: offset = PREP_ISA_MEM_BASE; break; default: return; /* Not I/O or memory?? */ } } else return; /* Not a 32-bit address */ real_addr = ld_le32((unsigned int *) (p.PPCData + 4)); if (real_addr == 0xffffffff) return; /* Adjust address to be as seen by CPU */ addr = real_addr + offset; len = ld_le32((unsigned int *) (p.PPCData + 12)); if (!len) return; #undef p OpenPIC_Addr = ioremap(addr, len); ppc_md.get_irq = openpic_get_irq; OpenPIC_InitSenses = prep_openpic_initsenses; OpenPIC_NumInitSenses = sizeof(prep_openpic_initsenses); printk(KERN_INFO "MPIC at 0x%08x (0x%08x), length 0x%08x " "mapped to 0x%p\n", addr, real_addr, len, OpenPIC_Addr); } }
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; if (run->mmio.len > sizeof(*gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); return; } if (vcpu->arch.mmio_is_bigendian) { switch (run->mmio.len) { case 4: *gpr = *(u32 *)run->mmio.data; break; case 2: *gpr = *(u16 *)run->mmio.data; break; case 1: *gpr = *(u8 *)run->mmio.data; break; } } else { /* Convert BE data from userland back to LE. */ switch (run->mmio.len) { case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; case 1: *gpr = *(u8 *)run->mmio.data; break; } } }
/* * Handle DEAD DMA transfers: * if the TX status comes up "DEAD" - reported on some Power Computing machines * we need to re-start the dbdma - but from a different physical start address * and with a different transfer length. It would get very messy to do this * with the normal dbdma_cmd blocks - we would have to re-write the buffer start * addresses each time. So, we will keep a single dbdma_cmd block which can be * fiddled with. * When DEAD status is first reported the content of the faulted dbdma block is * copied into the emergency buffer and we note that the buffer is in use. * we then bump the start physical address by the amount that was successfully * output before it died. * On any subsequent DEAD result we just do the bump-ups (we know that we are * already using the emergency dbdma_cmd). * CHECK: this just tries to "do it". It is possible that we should abandon * xfers when the number of residual bytes gets below a certain value - I can * see that this might cause a loop-forever if a too small transfer causes * DEAD status. However this is a TODO for now - we'll see what gets reported. * When we get a successful transfer result with the emergency buffer we just * pretend that it completed using the original dmdma_cmd and carry on. The * 'next_cmd' field will already point back to the original loop of blocks. */ static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec, volatile struct dbdma_cmd __iomem *cp) { unsigned short req, res ; unsigned int phy ; /* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */ /* to clear DEAD status we must first clear RUN set it to quiescent to be on the safe side */ (void)in_le32(&rec->dma->status); out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); if (!emergency_in_use) { /* new problem */ memcpy((void *)emergency_dbdma.cmds, (void *)cp, sizeof(struct dbdma_cmd)); emergency_in_use = 1; st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); cp = emergency_dbdma.cmds; } /* now bump the values to reflect the amount we haven't yet shifted */ req = ld_le16(&cp->req_count); res = ld_le16(&cp->res_count); phy = ld_le32(&cp->phy_addr); phy += (req - res); st_le16(&cp->req_count, res); st_le16(&cp->res_count, 0); st_le16(&cp->xfer_status, 0); st_le32(&cp->phy_addr, phy); st_le32(&cp->cmd_dep, rec->cmd.addr + sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods)); st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS); /* point at our patched up command block */ out_le32(&rec->dma->cmdptr, emergency_dbdma.addr); /* we must re-start the controller */ (void)in_le32(&rec->dma->status); /* should complete clearing the DEAD status */ out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); }
/* config_addr size value -- status */ static int rc_write_pcicfg( ulong args[], ulong ret[] ) { pci_addr_t addr; /* printm("RTAS: write_pci_config (%ld:%02lX) + 0x%02lx %08lX [%ld]\n", (args[0]>>16)&0xff, (args[0]>>8)&0xff, args[0]&0xff, args[2], args[1] ); */ if( args[1] == 2 ) args[2] = ld_le32( &args[2] ) >> 16; if( args[1] == 4 ) args[2] = ld_le32( &args[2] ); /* XXX: don't know how to handle different PCI domains... */ addr = PCIADDR_FROM_BUS_DEVFN( 0, (args[0]>>16)&0xff, (args[0]>>8)&0xff ); write_pci_config( addr, args[0]&0xff, args[2], args[1] ); ret[0] = 0; return 0; }
static void gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) { int irq, bits; for (irq = max_irqs - 1; irq > max_real_irqs; irq -= 32) { int i = irq >> 5; bits = ld_le32(&pmac_irq_hw[i]->flag) | ppc_lost_interrupts[i]; if (bits == 0) continue; irq -= cntlzw(bits); break; } /* The previous version of this code allowed for this case, we * don't. Put this here to check for it. * -- Cort */ if ( irq_desc[irq].ctl != &gatwick_pic ) printk("gatwick irq not from gatwick pic\n"); else ppc_irq_dispatch_handler( regs, irq ); }
/* config_addr size -- status value */ static int rc_read_pcicfg( ulong args[], ulong ret[] ) { pci_addr_t addr; ulong v; /* XXX: don't know how to handle different PCI domains... */ /* 0,bus,devfn,reg */ addr = PCIADDR_FROM_BUS_DEVFN( 0, (args[0]>>16)&0xff, (args[0]>>8)&0xff ); v = read_pci_config( addr, args[0]&0xff, args[1] ); if( args[1] == 2 ) v = ld_le32(&v) >> 16; if( args[1] == 4 ) v = ld_le32(&v); ret[1] = v; /* printm("RTAS: read_pci_config (%ld:%02lX) + 0x%02lx [%ld] : %08lX\n", (args[0]>>16)&0xff, (args[0]>>8)&0xff, args[0]&0xff, args[1], ret[1] );*/ ret[0] = 0; return 0; }
/* bandit, chaos and uni-north. * * CFA0: [21 bits, device select (dev 11-31)] [3 bits function] [8 bits offset ~0x3] * CFA1: [zeros?] [8 bits bus] [8 bits devfn] [8 bits offset ~0x3] */ static pci_addr_t macrisc_interpret_addr( pci_bridge_t *b, ulong pciaddr_le, int *offs ) { ulong tbit, pciaddr_be; int val, hit=0; int dev_fn, bus; pciaddr_be = ld_le32( &pciaddr_le ); *offs = (pciaddr_be & ~3) & 0xff; if( !(pciaddr_be & 1) ) { /* CFA0 */ dev_fn = (pciaddr_be >> 8) & 7; bus = b->first_bus; for( val=11, tbit=0x800; tbit; tbit=tbit<<1, val++ ) { if( pciaddr_be & tbit ) { hit++; dev_fn |= val<<3; } } if( hit != 1 ) return -1; } else {
void pmac_do_IRQ(struct pt_regs *regs, int cpu, int isfake) { int irq; unsigned long bits = 0; #ifdef __SMP__ /* IPI's are a hack on the powersurge -- Cort */ if ( cpu != 0 ) { if (!isfake) { #ifdef CONFIG_XMON static int xmon_2nd; if (xmon_2nd) xmon(regs); #endif pmac_smp_message_recv(); goto out; } /* could be here due to a do_fake_interrupt call but we don't mess with the controller from the second cpu -- Cort */ goto out; } { unsigned int loops = MAXCOUNT; while (test_bit(0, &global_irq_lock)) { if (smp_processor_id() == global_irq_holder) { printk("uh oh, interrupt while we hold global irq lock!\n"); #ifdef CONFIG_XMON xmon(0); #endif break; } if (loops-- == 0) { printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder); #ifdef CONFIG_XMON xmon(0); #endif } } } #endif /* __SMP__ */ /* Yeah, I know, this could be a separate do_IRQ function */ if (has_openpic) { irq = openpic_irq(0); if (irq == OPENPIC_VEC_SPURIOUS) { /* Spurious interrupts should never be ack'ed */ ppc_spurious_interrupts++; } else { /* Can this happen ? (comes from CHRP code) */ if (irq < 0) { printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n", irq, regs->nip); ppc_spurious_interrupts++; } else { if (!irq_desc[irq].level) openpic_eoi(0); ppc_irq_dispatch_handler( regs, irq ); if (irq_desc[irq].level) openpic_eoi(0); } } return; } for (irq = max_real_irqs - 1; irq > 0; irq -= 32) { int i = irq >> 5; bits = ld_le32(&pmac_irq_hw[i]->flag) | ppc_lost_interrupts[i]; if (bits == 0) continue; irq -= cntlzw(bits); break; } if (irq < 0) { printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n", irq, regs->nip); ppc_spurious_interrupts++; } else { ppc_irq_dispatch_handler( regs, irq ); } #ifdef CONFIG_SMP out: #endif /* CONFIG_SMP */ } /* This routine will fix some missing interrupt values in the device tree * on the gatwick mac-io controller used by some PowerBooks */ static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) { struct device_node *node; int count; memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); node = gw->child; count = 0; while(node) { /* Fix SCC */ if (strcasecmp(node->name, "escc") == 0) if (node->child) { if (node->child->n_intrs < 3) { node->child->intrs = &gatwick_int_pool[count]; count += 3; } node->child->n_intrs = 3; node->child->intrs[0].line = 15+irq_base; node->child->intrs[1].line = 4+irq_base; node->child->intrs[2].line = 5+irq_base; printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n", node->child->intrs[0].line, node->child->intrs[1].line, node->child->intrs[2].line); } /* Fix media-bay & left SWIM */ if (strcasecmp(node->name, "media-bay") == 0) { struct device_node* ya_node; if (node->n_intrs == 0) node->intrs = &gatwick_int_pool[count++]; node->n_intrs = 1; node->intrs[0].line = 29+irq_base; printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n", node->intrs[0].line); ya_node = node->child; while(ya_node) { if (strcasecmp(ya_node->name, "floppy") == 0) { if (ya_node->n_intrs < 2) { ya_node->intrs = &gatwick_int_pool[count]; count += 2; } ya_node->n_intrs = 2; ya_node->intrs[0].line = 19+irq_base; ya_node->intrs[1].line = 1+irq_base; printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", ya_node->intrs[0].line, ya_node->intrs[1].line); } if (strcasecmp(ya_node->name, "ata4") == 0) { if (ya_node->n_intrs < 2) { ya_node->intrs = &gatwick_int_pool[count]; count += 2; } ya_node->n_intrs = 2; ya_node->intrs[0].line = 14+irq_base; ya_node->intrs[1].line = 3+irq_base; printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", ya_node->intrs[0].line, ya_node->intrs[1].line); } ya_node = ya_node->sibling; } } node = node->sibling; } if (count > 10) { printk("WARNING !! Gatwick interrupt pool overflow\n"); printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); printk(" requested = %d\n", count); } } /* * The PowerBook 3400/2400/3500 can have a combo ethernet/modem * card which includes an ohare chip that acts as a second interrupt * controller. If we find this second ohare, set it up and fix the * interrupt value in the device tree for the ethernet chip. */ static void __init enable_second_ohare(void) { unsigned char bus, devfn; unsigned short cmd; unsigned long addr; int second_irq; struct device_node *irqctrler = find_devices("pci106b,7"); struct device_node *ether; if (irqctrler == NULL || irqctrler->n_addrs <= 0) return; addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40); pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20); max_irqs = 64; if (pci_device_loc(irqctrler, &bus, &devfn) == 0) { pmac_pcibios_read_config_word(bus, devfn, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; cmd &= ~PCI_COMMAND_IO; pmac_pcibios_write_config_word(bus, devfn, PCI_COMMAND, cmd); } second_irq = irqctrler->intrs[0].line; printk(KERN_INFO "irq: secondary controller on irq %d\n", second_irq); request_irq(second_irq, gatwick_action, SA_INTERRUPT, "interrupt cascade", 0 ); /* Fix interrupt for the modem/ethernet combo controller. The number in the device tree (27) is bogus (correct for the ethernet-only board but not the combo ethernet/modem board). The real interrupt is 28 on the second controller -> 28+32 = 60. */ ether = find_devices("pci1011,14"); if (ether && ether->n_intrs > 0) { ether->intrs[0].line = 60; printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", ether->intrs[0].line); } }
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { u64 uninitialized_var(gpr); if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); return; } if (vcpu->arch.mmio_is_bigendian) { switch (run->mmio.len) { case 8: gpr = *(u64 *)run->mmio.data; break; case 4: gpr = *(u32 *)run->mmio.data; break; case 2: gpr = *(u16 *)run->mmio.data; break; case 1: gpr = *(u8 *)run->mmio.data; break; } } else { /* Convert BE data from userland back to LE. */ switch (run->mmio.len) { case 4: gpr = ld_le32((u32 *)run->mmio.data); break; case 2: gpr = ld_le16((u16 *)run->mmio.data); break; case 1: gpr = *(u8 *)run->mmio.data; break; } } if (vcpu->arch.mmio_sign_extend) { switch (run->mmio.len) { #ifdef CONFIG_PPC64 case 4: gpr = (s64)(s32)gpr; break; #endif case 2: gpr = (s64)(s16)gpr; break; case 1: gpr = (s64)(s8)gpr; break; } } kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif default: BUG(); } }
/* returns 1 if GPRs have been modified */ int _rvec_io_write( int dummy_rvec, ulong mphys_ioaddr, void *usr_data ) { int op, op_ext, rS, rA, rB, d, flag=0, ret=0; ulong ea, cont, len, inst=mregs->inst_opcode; enum { byte=1, half=2, word=4, len_mask=7, indexed=8, update=16, reverse=32, nop=64, fpinst=128 }; /* do_io_write() is considered FPU-unsafe */ shield_fpu( mregs ); /* break instruction into parts */ op = OPCODE_PRIM( inst ); op_ext = OPCODE_EXT( inst ); rS = B1( inst ); /* bit 6-10 */ rA = B2( inst ); /* bit 11-15 */ rB = B3( inst ); /* bit 16-20 */ d = BD( inst ); /* bit 16-31 sign extended */ switch( op ) { case 38: /* stb rS,d(rA) */ flag = byte ; break; case 39: /* stbu rS,d(rA) */ flag = byte | update; break; case 44: /* sth rS,d(rA) */ flag = half ; break; case 45: /* sthu rS,d(rA) */ flag = half | update; break; case 36: /* stw rS,d(rA) */ flag = word ; break; case 37: /* stwud rS,d(rA) */ flag = word | update; break; case 54: /* stfd frS,d(rA) */ /* FPU */ flag = word | fpinst; break; case 55: /* stfdu frS,d(rA) */ /* FPU */ flag = word | fpinst | update; break; } if( !flag && op==31 ) { switch( op_ext ) { case 215: /* stbx rS,rA,rB */ flag = byte | indexed ; break; case 247: /* stbux rS,rA,rB */ flag = byte | indexed | update; break; case 407: /* sthx rS,rA,rB */ flag = half | indexed ; break; case 439: /* sthux rS,rA,rB */ flag = half | indexed | update; break; case 151: /* stwx rS,rA,rB */ flag = word | indexed ; break; case 183: /* stwux rS,rA,rB */ flag = word | indexed | update; break; case 918: /* sthbrx rS,rA,rB */ flag = half | indexed | reverse; break; case 662: /* stwbrx rS,rA,rB */ flag = word | indexed | reverse; break; case 727: /* stfdx frS,rA,rB */ /* printm("FPU store inst\n"); */ flag = word | indexed | fpinst; break; case 759: /* stfdux frS,rA,rB */ /* printm("FPU store inst\n"); */ flag = word | indexed | update | fpinst; break; } } if( flag & len_mask ) { /* instruction found */ if( flag & indexed ) { /* stxxx rS,rA,rB */ ea = mregs->gpr[rB]; ea += rA ? mregs->gpr[rA] : 0; } else { /* stxxx rS,d(rA) */ ea = rA ? mregs->gpr[rA] : 0; ea += d; } if( !(flag & fpinst ) ) cont = mregs->gpr[rS]; else { save_fpu_completely( mregs ); cont = mregs->fpr[rS].h; } len = flag & len_mask; if( flag & byte ) { cont &= 0xff; } else if( flag & half ) { if( !(flag & reverse) ) cont &= 0xffff; else cont = bswap_16( cont ); } else if( flag & reverse ) cont = ld_le32(&cont); /* ea is the mac untranslated address, * mregs->mmu_ioaddr holds the translated address */ do_io_write( usr_data, mphys_ioaddr, cont, len ); if( flag & fpinst ) { if( ((mphys_ioaddr+4) & 0xfff) < 4 ) printm("emulate store data inst: Possible MMU translation error\n"); do_io_write( usr_data, mphys_ioaddr+4, mregs->fpr[rS].l, 4 ); } if( (flag & update) && rA ) { mregs->gpr[rA] = ea; ret = 1; } } if( flag ) mregs->nip += 4; else { printm("Unimplemented store instruction %08lX\n", inst ); stop_emulation(); } return ret; }
/* returns 1 if GPRs have been modified */ int _rvec_io_read( int dummy_rvec, ulong mphys_ioaddr, void *usr_data ) { ulong ea, cont, inst=mregs->inst_opcode; int op, op_ext, rD, rA, rB, d; int flag=0, ret=1; enum { byte=1, half=2, word=4, len_mask=7, indexed=8, update=16, zero=32, reverse=64, nop=128, fpinst=256 }; /* do_io_read() is considered FPU-unsafe */ shield_fpu( mregs ); /* break instruction into parts */ op = OPCODE_PRIM( inst ); /* bit 0-5 */ op_ext = OPCODE_EXT( inst ); rD = B1( inst ); /* bit 6-10 */ rA = B2( inst ); /* bit 11-15 */ rB = B3( inst ); /* bit 16-20 */ d = BD( inst ); /* bit 16-31 sign extended */ switch( op ) { case 34: /* lbz rD,d(rA) */ flag = byte | zero; break; case 35: /* lbzu rD,d(rA) */ flag = byte | zero | update; break; case 40: /* lhz rD,d(rA) */ flag = half | zero; break; case 41: /* lhzu rD,d(rA) */ flag = half | zero | update; break; case 42: /* lha rD,d(rA) */ flag = half; break; case 43: /* lhau rD,d(rA) */ flag = half | update; break; case 32: /* lwz rD,d(rA) */ flag = word | zero; break; case 33: /* lwzu, rD,d(rA) */ flag = word | zero | update; break; case 50: /* lfd frD,d(rA) */ /* FPU */ flag = word | fpinst | zero; break; case 51: /* lfdu frD, d(rA) */ /* FPU */ flag = word | fpinst | update | zero; break; } if( !flag && op==31 ) { switch( op_ext ) { /* lxxx rD,rA,rB */ case 87: /* lbzx rD,rA,rB */ flag = byte | indexed | zero; break; case 119: /* lbzux rD,rA,rB */ flag = byte | indexed | zero | update; break; case 279: /* lhzx rD,rA,rB */ flag = half | indexed | zero; break; case 311: /* lhzux rD,rA,rB */ flag = half | indexed | zero | update; break; case 343: /* lhax rD,rA,rB */ flag = half | indexed; break; case 375: /* lhaux rD,rA,rB */ flag = half | indexed | update; break; case 23: /* lwzx rD,rA,rB */ flag = word | indexed | zero; break; case 55: /* lwzux rD,rA,rB */ flag = word | indexed | zero | update; break; case 790: /* lhbrx rS,rA,rB */ flag = half | indexed | zero | reverse; break; case 534: /* lwbrx rS,rA,rB */ flag = word | indexed | zero | reverse; break; case 599: /* lfdx frD,rA,rB */ /* FPU */ flag = word | indexed | zero | fpinst; break; case 631: /* lfdux frD,rA,rB */ /* FPU */ flag = word | indexed | zero | update | fpinst; break; case 86: /* dcbf rA,rB - cache instruction*/ /* treat as nop if data-translation is off */ flag = (mregs->msr & MSR_DR) ? 0 : nop; break; } } if( flag & len_mask) { /* instruction found */ if( flag & indexed ) { /* lxxx rD,rA,rB */ ea = mregs->gpr[rB]; ea += rA ? mregs->gpr[rA] : 0; } else { /* lxxx rD,d(rA) */ ea = rA ? mregs->gpr[rA] : 0; ea += d; } /* ea is the mac untranslated address, */ /* mphys_ioaddr is the mac-physical address */ cont = 0; do_io_read( usr_data, mphys_ioaddr, (flag & len_mask), &cont ); if( flag & byte ){ cont &= 0xff; } else if( flag & half ) { cont &= 0xffff; if( !(flag & zero) ) /* algebraic */ cont |= (cont & 0x8000)? 0xffff0000 : 0; if( flag & reverse ) cont = bswap_16( cont ); } else if( flag & reverse) cont = ld_le32(&cont); if( !(flag & fpinst) ) mregs->gpr[rD] = cont; else { /* FPU instruction */ save_fpu_completely( mregs ); ret = 0; mregs->fpr[rD].h = cont; /* check for 4K boundary crossings... */ if( ((mphys_ioaddr+4) & 0xfff) < 4 ) printm("emulate_load_data_inst: MMU translation might be bad\n"); do_io_read( usr_data, mphys_ioaddr+4, 4, &cont ); mregs->fpr[rD].l = cont; reload_tophalf_fpu( mregs ); } if( (flag & update) && rA && (rA!=rD || (flag & fpinst)) ) { ret = 1; mregs->gpr[rA] = ea; } } if( flag ) mregs->nip += 4; else { printm("Unimplemented load instruction %08lX\n", inst ); stop_emulation(); } return ret; }
bfd_vma bfd_getl32( const unsigned char *p ) { return ld_le32( (unsigned long*)p ); }
uint32_t _ld_le32(uint32_t *addr) { return ld_le32(addr); }
uint32_t pci_mem_le_ld_le32(uint32_t *adr) { return ld_le32(adr); }