Exemplo n.º 1
0
void
sh_mmu_information(void)
{
#ifdef DEBUG
	uint32_t r;
#ifdef SH3
	if (CPU_IS_SH3) {
		printf("cpu0: 4-way set-associative 128 TLB entries\n");
		r = _reg_read_4(SH3_MMUCR);
		printf("cpu0: %s mode, %s virtual storage mode\n",
		    r & SH3_MMUCR_IX ? "ASID+VPN" : "VPN",
		    r & SH3_MMUCR_SV ? "single" : "multiple");
	}
#endif
#ifdef SH4
	if (CPU_IS_SH4) {
		unsigned int urb;
		printf("cpu0: fully-associative 4 ITLB, 64 UTLB entries\n");
		r = _reg_read_4(SH4_MMUCR);
		urb = (r & SH4_MMUCR_URB_MASK) >> SH4_MMUCR_URB_SHIFT;
		printf("cpu0: %s virtual storage mode, SQ access: kernel%s, ",
		    r & SH3_MMUCR_SV ? "single" : "multiple",
		    r & SH4_MMUCR_SQMD ? "" : "/user");
		printf("wired %d\n",
		    urb ? 64 - urb : 0);
	}
#endif
#endif /* DEBUG */
}
Exemplo n.º 2
0
void
MemoryManager_SHMMU::CacheDump()
{
	static const char *able[] = {"dis", "en" };
	int write_through_p0_u0_p3;
	int write_through_p1;
	u_int32_t r;
	int kmode;

	DPRINTF_SETUP();

	kmode = SetKMode(1);
	switch (SHArchitecture::cpu_type()) {
	default:
		DPRINTF((TEXT("unknown architecture.\n")));		
		SetKMode(kmode);
		return;
	case 3:
		r = _reg_read_4(SH3_CCR);
		DPRINTF((TEXT("cache %Sabled"),
		    able[(r & SH3_CCR_CE ? 1 : 0)]));
		if (r & SH3_CCR_RA)
			DPRINTF((TEXT(" ram-mode")));

		write_through_p0_u0_p3 = r & SH3_CCR_WT;
		write_through_p1 = !(r & SH3_CCR_CB);
		break;
	case 4:
		r = _reg_read_4(SH4_CCR);
		DPRINTF((TEXT("I-cache %Sabled"),
		    able[(r & SH4_CCR_ICE) ? 1 : 0]));
		if (r & SH4_CCR_IIX)
			DPRINTF((TEXT(" index-mode ")));
		DPRINTF((TEXT(" D-cache %Sabled"),
		    able[(r & SH4_CCR_OCE) ? 1 : 0]));
		if (r & SH4_CCR_OIX)
			DPRINTF((TEXT(" index-mode")));
		if (r & SH4_CCR_ORA)
			DPRINTF((TEXT(" ram-mode")));

		write_through_p0_u0_p3 = r & SH4_CCR_WT;
		write_through_p1 = !(r & SH4_CCR_CB);
		break;
	}
	DPRINTF((TEXT(".")));

	// Write-through/back 
	DPRINTF((TEXT(" P0, U0, P3 write-%S P1 write-%S\n"),
	    write_through_p0_u0_p3 ? "through" : "back",
	    write_through_p1 ? "through" : "back"));

	SetKMode(kmode);
}
Exemplo n.º 3
0
void
intc_intr(int ssr, int spc, int ssp)
{
	struct intc_intrhand *ih;
	int s, evtcode;

	evtcode = _reg_read_4(SH4_INTEVT);

	ih = EVTCODE_IH(evtcode);
	KDASSERT(ih->ih_func);
	/*
	 * On entry, all interrrupts are disabled, and exception is enabled.
	 * Enable higher level interrupt here.
	 */
	s = _cpu_intr_resume(ih->ih_level);

	if (evtcode == SH_INTEVT_TMU0_TUNI0) {	/* hardclock */
		struct clockframe cf;
		cf.spc = spc;
		cf.ssr = ssr;
		cf.ssp = ssp;
		(*ih->ih_func)(&cf);
	} else {
		(*ih->ih_func)(ih->ih_arg);
	}
}
Exemplo n.º 4
0
//
// Get physical address from memory mapped TLB.
// SH3 version. SH4 can't do this method. because address/data array must be
// accessed from P2.
//
paddr_t
MemoryManager_SHMMU::searchPage(vaddr_t vaddr)
{
	u_int32_t vpn, idx, s, dum, aae, dae, entry_idx, asid;
	paddr_t paddr = ~0;
	int way, kmode;

	vpn = vaddr & SH3_PAGE_MASK;
	// Windows CE uses VPN-only index-mode.
	idx = vaddr & SH3_MMU_VPN_MASK;

	kmode = SetKMode(1);
	// Get current ASID 
	asid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK;

	// to avoid another TLB access, disable external interrupt.
	s = suspendIntr();

	do {
		// load target address page to TLB
		dum = _reg_read_4(vaddr);
		_reg_write_4(vaddr, dum);

		for (way = 0; way < SH3_MMU_WAY; way++) {
			entry_idx = idx | (way << SH3_MMU_WAY_SHIFT);
			// inquire MMU address array.
			aae = _reg_read_4(SH3_MMUAA | entry_idx);
						      
			if (!(aae & SH3_MMU_D_VALID) ||
			    ((aae & SH3_MMUAA_D_ASID_MASK) != asid) ||
			    (((aae | idx) & SH3_PAGE_MASK) != vpn))
				continue;

			// entry found.
			// inquire MMU data array to get its physical address.
			dae = _reg_read_4(SH3_MMUDA | entry_idx);
			paddr = (dae & SH3_PAGE_MASK) | (vaddr & ~SH3_PAGE_MASK);
			break;
		}
	} while (paddr == ~0);

	resumeIntr(s);
	SetKMode(kmode);

	return paddr;
}
Exemplo n.º 5
0
/* 
 * interrupt handler for clock interrupt (100Hz) 
 */
int
timer0_intr(void *arg)
{

	_reg_write_4(T0_MODE_REG, _reg_read_4(T0_MODE_REG) | T_MODE_EQUF);

	_playstation2_evcnt.clock.ev_count++;

	hardclock(&playstation2_clockframe);

	return (1);
}
Exemplo n.º 6
0
pcireg_t
shpcic_conf_read(void *v, pcitag_t tag, int reg)
{
	pcireg_t data;
	int s;

	s = splhigh();
	_reg_write_4(SH4_PCIPAR, tag | reg);
	data = _reg_read_4(SH4_PCIPDR);
	_reg_write_4(SH4_PCIPAR, 0);
	splx(s);

	return data;
}
void
SH3dev::tmu_channel_dump(int unit, paddr_t tcor, paddr_t tcnt,
    paddr_t tcr)
{
	uint32_t r32;
	uint16_t r16;

	DPRINTF((TEXT("TMU#%d:"), unit));
#define	DBG_BIT_PRINT(r, m)	_dbg_bit_print(r, SH3_TCR_##m, #m)
	/* TCR*/
	r16 = _reg_read_2(tcr);
	DBG_BIT_PRINT(r16, UNF);
	DBG_BIT_PRINT(r16, UNIE);
	DBG_BIT_PRINT(r16, CKEG1);
	DBG_BIT_PRINT(r16, CKEG0);
	DBG_BIT_PRINT(r16, TPSC2);
	DBG_BIT_PRINT(r16, TPSC1);
	DBG_BIT_PRINT(r16, TPSC0);
	/* channel 2 has input capture. */
	if (unit == 2) {
		DBG_BIT_PRINT(r16, ICPF);
		DBG_BIT_PRINT(r16, ICPE1);
		DBG_BIT_PRINT(r16, ICPE0);
	}
#undef DBG_BIT_PRINT
	/* TCNT0  timer counter */
	r32 = _reg_read_4(tcnt);
	DPRINTF((TEXT("\ncnt=0x%08x"), r32));
	/* TCOR0  timer constant register */
	r32 = _reg_read_4(tcor);
	DPRINTF((TEXT(" constant=0x%04x"), r32));

	if (unit == 2)
		DPRINTF((TEXT(" input capture=0x%08x\n"), SH3_TCPR2));
	else
		DPRINTF((TEXT("\n")));
}
Exemplo n.º 8
0
void
intc_intr(int ssr, int spc, int ssp)
{
	struct intc_intrhand *ih;
	int evtcode;
	u_int16_t r;

	evtcode = _reg_read_4(CPU_IS_SH3 ? SH7709_INTEVT2 : SH4_INTEVT);

	ih = EVTCODE_IH(evtcode);
	KDASSERT(ih->ih_func);
	/*
	 * On entry, all interrrupts are disabled,
	 * and exception is enabled for P3 access. (kernel stack is P3,
	 * SH3 may or may not cause TLB miss when access stack.)
	 * Enable higher level interrupt here.
	 */
	r = _reg_read_2(HD6446X_NIRR);

	splx(ih->ih_level);

	if (evtcode == SH_INTEVT_TMU0_TUNI0) {
		struct clockframe cf;
		cf.spc = spc;
		cf.ssr = ssr;
		cf.ssp = ssp;
		(*ih->ih_func)(&cf);
		__dbg_heart_beat(HEART_BEAT_RED);
	} else if (evtcode ==
	    (CPU_IS_SH3 ? SH7709_INTEVT2_IRQ4 : SH_INTEVT_IRL11)) {
		int cause = r & hd6446x_ienable;
		struct hd6446x_intrhand *hh = &hd6446x_intrhand[ffs(cause) - 1];
		if (cause == 0) {
			printf("masked HD6446x interrupt.0x%04x\n", r);
			_reg_write_2(HD6446X_NIRR, 0x0000);
			return;
		}
		/* Enable higher level interrupt*/
		hd6446x_intr_resume(hh->hh_ipl);
		KDASSERT(hh->hh_func != NULL);
		(*hh->hh_func)(hh->hh_arg);
		__dbg_heart_beat(HEART_BEAT_GREEN);
	} else {
		(*ih->ih_func)(ih->ih_arg);
		__dbg_heart_beat(HEART_BEAT_BLUE);
	}
}
Exemplo n.º 9
0
void
sh4_tlb_invalidate_asid(int asid)
{
	u_int32_t a;
	int e;

	/* Invalidate entry attribute to ASID */
	RUN_P2;
	for (e = 0; e < SH4_UTLB_ENTRY; e++) {
		a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
		if ((_reg_read_4(a) & SH4_UTLB_AA_ASID_MASK) == asid)
			_reg_write_4(a, 0);
	}

	__sh4_itlb_invalidate_all();
	RUN_P1;
}
Exemplo n.º 10
0
void
sh3_tlb_invalidate_asid(int asid)
{
	uint32_t aw, a;
	int e, w;

	/* Invalidate entry attribute to ASID */
	for (w = 0; w < SH3_MMU_WAY; w++) {
		aw = (w << SH3_MMU_WAY_SHIFT);
		for (e = 0; e < SH3_MMU_ENTRY; e++) {
			a = aw | (e << SH3_MMU_VPN_SHIFT);
			if ((_reg_read_4(SH3_MMUAA | a) &
			    SH3_MMUAA_D_ASID_MASK) == asid) {
				_reg_write_4(SH3_MMUAA | a, 0);
			}
		}
	}
}
Exemplo n.º 11
0
void
sh4_tlb_invalidate_all()
{
	u_int32_t a;
	int e, eend;

	/* If non-wired entry limit is zero, clear all entry. */
	a = _reg_read_4(SH4_MMUCR) & SH4_MMUCR_URB_MASK;
	eend = a ? (a >> SH4_MMUCR_URB_SHIFT) : SH4_UTLB_ENTRY;

	RUN_P2;
	for (e = 0; e < eend; e++) {
		a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
		_reg_write_4(a, 0);
	}
	__sh4_itlb_invalidate_all();
	RUN_P1;
}
Exemplo n.º 12
0
void
sh4_tlb_invalidate_addr(int asid, vaddr_t va)
{
	u_int32_t pteh;
	va &= SH4_PTEH_VPN_MASK;

	/* Save current ASID */
	pteh = _reg_read_4(SH4_PTEH);
	/* Set ASID for associative write */
	_reg_write_4(SH4_PTEH, asid);

	/* Associative write(UTLB/ITLB). not required ITLB invalidate. */
	RUN_P2;
	_reg_write_4(SH4_UTLB_AA | SH4_UTLB_A, va); /* Clear D, V */
	RUN_P1;
	/* Restore ASID */
	_reg_write_4(SH4_PTEH, pteh);
}
Exemplo n.º 13
0
static int
shpcic_match(device_t parent, cfdata_t cf, void *aux)
{
	pcireg_t id;

	if (shpcic_found)
		return (0);

	switch (cpu_product) {
	case CPU_PRODUCT_7751:
	case CPU_PRODUCT_7751R:
		break;

	default:
		return (0);
	}


	id = _reg_read_4(SH4_PCICONF0);

	switch (PCI_VENDOR(id)) {
	case PCI_VENDOR_HITACHI:
		break;

	default:
		return (0);
	}


	switch (PCI_PRODUCT(id)) {
	case PCI_PRODUCT_HITACHI_SH7751: /* FALLTHROUGH */
	case PCI_PRODUCT_HITACHI_SH7751R:
		break;

	default:
		return (0);
	}

	if (_reg_read_2(SH4_BCR2) & BCR2_PORTEN)
		return (0);

	return (1);
}
Exemplo n.º 14
0
void
sh3_tlb_invalidate_addr(int asid, vaddr_t va)
{
	uint32_t a, d;
	int w;

	d = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;  /* 4K page */
	va = va & SH3_MMU_VPN_MASK;   /* [16:12] entry index */

	/* Probe entry and invalidate it. */
	for (w = 0; w < SH3_MMU_WAY; w++) {
		a = va | (w << SH3_MMU_WAY_SHIFT); /* way [9:8] */
		if ((_reg_read_4(SH3_MMUAA | a) &
		    (SH3_MMUAA_D_VPN_MASK_4K | SH3_MMUAA_D_ASID_MASK)) == d) {
			_reg_write_4(SH3_MMUAA | a, 0);
			break;
		}
	}
}
Exemplo n.º 15
0
void
sh3_tlb_update(int asid, vaddr_t va, uint32_t pte)
{
	uint32_t oasid;

	KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0);

	/* Save old ASID */
	oasid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK;

	/* Invalidate old entry (if any) */
	sh3_tlb_invalidate_addr(asid, va);

	/* Load new entry */
	_reg_write_4(SH3_PTEH, (va & ~PGOFSET) | asid);
	_reg_write_4(SH3_PTEL, pte & PG_HW_BITS);
	__asm volatile("ldtlb");

	/* Restore old ASID */
	if (asid != oasid)
		_reg_write_4(SH3_PTEH, oasid);
}
Exemplo n.º 16
0
void
sh4_tlb_update(int asid, vaddr_t va, u_int32_t pte)
{
	u_int32_t oasid;
	u_int32_t ptel;

	KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0);

	/* Save old ASID */
	oasid = _reg_read_4(SH4_PTEH) & SH4_PTEH_ASID_MASK;

	/* Invalidate old entry (if any) */
	sh4_tlb_invalidate_addr(asid, va);

	_reg_write_4(SH4_PTEH, asid);
	/* Load new entry */
	_reg_write_4(SH4_PTEH, (va & ~PGOFSET) | asid);
	ptel = pte & PG_HW_BITS;
	if (pte & _PG_PCMCIA) {
		_reg_write_4(SH4_PTEA,
		    (pte >> _PG_PCMCIA_SHIFT) & SH4_PTEA_SA_MASK);
	} else {
Exemplo n.º 17
0
Arquivo: sbus.c Projeto: MarginC/kame
int
sbus_intr(void *arg)
{
	u_int32_t stat;

	_playstation2_evcnt.sbus.ev_count++;
	stat = _reg_read_4(SBUS_SMFLG_REG);

	if (stat & SMFLG_PCMCIA_INT) {
		(*sbus_pcmcia_intr_clear)();
		_reg_write_4(SBUS_SMFLG_REG, SMFLG_PCMCIA_INT);
		(*sbus_pcmcia_intr)(sbus_pcmcia_context);
	}

	if (stat & SMFLG_USB_INT) {
		_reg_write_4(SBUS_SMFLG_REG, SMFLG_USB_INT);
		(*sbus_usb_intr)(sbus_usb_context);
	}
	
	(*sbus_pcmcia_intr_reinstall)();

	return (1);
}
Exemplo n.º 18
0
// INTC
void
SH4dev::icu_dump()
{
#define ON(x, c)	((x) & (c) ? check[1] : check[0])
#define _(n)		DPRINTF((TEXT("%S %S "), #n, ON(r, SH4_ICR_ ## n)))
    static const char *check[] = { "[_]", "[x]" };
    u_int16_t r;

    super::icu_dump_priority(_ipr_table);

    r = _reg_read_2(SH4_ICR);
    DPRINTF((TEXT("ICR: ")));
    _(NMIL);
    _(MAI);
    _(NMIB);
    _(NMIE);
    _(IRLM);
    DPRINTF((TEXT("0x%04x\n"), r));

#if 0 // monitoring SH4 interrupt request.
    // disable SH3 internal devices interrupt.
    suspendIntr();
    _reg_write_2(SH4_IPRA, 0);
    _reg_write_2(SH4_IPRB, 0);
    _reg_write_2(SH4_IPRC, 0);
//	_reg_write_2(SH4_IPRD, 0);  SH7709S only.
    resumeIntr(0);	// all interrupts enable.
    while (1) {
        DPRINTF((TEXT("%04x ", _reg_read_2(HD64465_NIRR))));
        bitdisp(_reg_read_4(SH4_INTEVT));
    }
    /* NOTREACHED */
#endif
#undef _
#undef ON
}
Exemplo n.º 19
0
static void
shpcic_attach(device_t parent, device_t self, void *aux)
{
	struct pcibus_attach_args pba;
#ifdef PCI_NETBSD_CONFIGURE
	struct extent *ioext, *memext;
#endif
	pcireg_t id, class;
	char devinfo[256];

	shpcic_found = 1;

	aprint_naive("\n");

	id = _reg_read_4(SH4_PCICONF0);
	class = _reg_read_4(SH4_PCICONF2);
	pci_devinfo(id, class, 1, devinfo, sizeof(devinfo));
	aprint_normal(": %s\n", devinfo);

	/* allow PCIC request */
	_reg_write_4(SH4_BCR1, _reg_read_4(SH4_BCR1) | BCR1_BREQEN);

	/* Initialize PCIC */
	_reg_write_4(SH4_PCICR, PCICR_BASE | PCICR_RSTCTL);
	delay(10 * 1000);
	_reg_write_4(SH4_PCICR, PCICR_BASE);

	/* Class: Host-Bridge */
	_reg_write_4(SH4_PCICONF2,
	    PCI_CLASS_CODE(PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_HOST, 0x00));

#if !defined(DONT_INIT_PCIBSC)
#if defined(PCIBCR_BCR1_VAL)
	_reg_write_4(SH4_PCIBCR1, PCIBCR_BCR1_VAL);
#else
	_reg_write_4(SH4_PCIBCR1, _reg_read_4(SH4_BCR1) | BCR1_MASTER);
#endif
#if defined(PCIBCR_BCR2_VAL)
	_reg_write_4(SH4_PCIBCR2, PCIBCR_BCR2_VAL);
#else
	_reg_write_4(SH4_PCIBCR2, _reg_read_2(SH4_BCR2));
#endif
#if defined(SH4) && defined(SH7751R)
	if (cpu_product == CPU_PRODUCT_7751R) {
#if defined(PCIBCR_BCR3_VAL)
		_reg_write_4(SH4_PCIBCR3, PCIBCR_BCR3_VAL);
#else
		_reg_write_4(SH4_PCIBCR3, _reg_read_2(SH4_BCR3));
#endif
	}
#endif	/* SH4 && SH7751R && PCIBCR_BCR3_VAL */
#if defined(PCIBCR_WCR1_VAL)
	_reg_write_4(SH4_PCIWCR1, PCIBCR_WCR1_VAL);
#else
	_reg_write_4(SH4_PCIWCR1, _reg_read_4(SH4_WCR1));
#endif
#if defined(PCIBCR_WCR2_VAL)
	_reg_write_4(SH4_PCIWCR2, PCIBCR_WCR2_VAL);
#else
	_reg_write_4(SH4_PCIWCR2, _reg_read_4(SH4_WCR2));
#endif
#if defined(PCIBCR_WCR3_VAL)
	_reg_write_4(SH4_PCIWCR3, PCIBCR_WCR3_VAL);
#else
	_reg_write_4(SH4_PCIWCR3, _reg_read_4(SH4_WCR3));
#endif
#if defined(PCIBCR_MCR_VAL)
	_reg_write_4(SH4_PCIMCR, PCIBCR_MCR_VAL);
#else
	_reg_write_4(SH4_PCIMCR, _reg_read_4(SH4_MCR));
#endif
#endif	/* !DONT_INIT_PCIBSC */

	/* set PCI I/O, memory base address */
	_reg_write_4(SH4_PCIIOBR, SH4_PCIC_IO);
	_reg_write_4(SH4_PCIMBR, SH4_PCIC_MEM);

	/* set PCI local address 0 */
	_reg_write_4(SH4_PCILSR0, (64 - 1) << 20);
	_reg_write_4(SH4_PCILAR0, 0xac000000);
	_reg_write_4(SH4_PCICONF5, 0xac000000);

	/* set PCI local address 1 */
	_reg_write_4(SH4_PCILSR1, (64 - 1) << 20);
	_reg_write_4(SH4_PCILAR1, 0xac000000);
	_reg_write_4(SH4_PCICONF6, 0x8c000000);

	/* Enable I/O, memory, bus-master */
	_reg_write_4(SH4_PCICONF1, PCI_COMMAND_IO_ENABLE
	                           | PCI_COMMAND_MEM_ENABLE
	                           | PCI_COMMAND_MASTER_ENABLE
	                           | PCI_COMMAND_STEPPING_ENABLE
				   | PCI_STATUS_DEVSEL_MEDIUM);

	/* Initialize done. */
	_reg_write_4(SH4_PCICR, PCICR_BASE | PCICR_CFINIT);

	/* set PCI controller interrupt priority */
	intpri_intr_priority(SH4_INTEVT_PCIERR, shpcic_intr_priority[0]);
	intpri_intr_priority(SH4_INTEVT_PCISERR, shpcic_intr_priority[1]);

	/* PCI bus */
#ifdef PCI_NETBSD_CONFIGURE
	ioext  = extent_create("pciio",
	    SH4_PCIC_IO, SH4_PCIC_IO + SH4_PCIC_IO_SIZE - 1,
	    M_DEVBUF, NULL, 0, EX_NOWAIT);
	memext = extent_create("pcimem",
	    SH4_PCIC_MEM, SH4_PCIC_MEM + SH4_PCIC_MEM_SIZE - 1,
	    M_DEVBUF, NULL, 0, EX_NOWAIT);

	pci_configure_bus(NULL, ioext, memext, NULL, 0, sh_cache_line_size);

	extent_destroy(ioext);
	extent_destroy(memext);
#endif

	/* PCI bus */
	memset(&pba, 0, sizeof(pba));
	pba.pba_iot = shpcic_get_bus_io_tag();
	pba.pba_memt = shpcic_get_bus_mem_tag();
	pba.pba_dmat = shpcic_get_bus_dma_tag();
	pba.pba_dmat64 = NULL;
	pba.pba_pc = NULL;
	pba.pba_bus = 0;
	pba.pba_bridgetag = NULL;
	pba.pba_flags = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
	config_found(self, &pba, NULL);
}
Exemplo n.º 20
0
void
smap_rxeof(void *arg)
{
	struct smap_softc *sc = arg;
	struct smap_desc *d;
	struct ifnet *ifp = &sc->ethercom.ec_if;
	struct mbuf *m;
	u_int16_t r16, stat;
	u_int32_t *p;
	int i, j, sz, rxsz, cnt;

	FUNC_ENTER();

	i = sc->rx_done_index;

	for (cnt = 0;; cnt++, i = (i + 1) & 0x3f) {
		m = NULL;
		d = &sc->rx_desc[i];
		stat = d->stat;

		if ((stat & SMAP_RXDESC_EMPTY) != 0) {
			break;
		} else if (stat & 0x7fff) {
			ifp->if_ierrors++;
			goto next_packet;
		}

		sz = d->sz;
		rxsz = ROUND4(sz);

		KDASSERT(sz >= ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN);
		KDASSERT(sz <= ETHER_MAX_LEN);

		/* load data from FIFO */
		_reg_write_2(SMAP_RXFIFO_PTR_REG16, d->ptr & 0x3ffc);
		p = sc->rx_buf;
		for (j = 0; j < rxsz; j += sizeof(u_int32_t)) {
			*p++ = _reg_read_4(SMAP_RXFIFO_DATA_REG);
		}

		/* put to mbuf */
		MGETHDR(m, M_DONTWAIT, MT_DATA);
		if (m == NULL) {
			printf("%s: unable to allocate Rx mbuf\n", DEVNAME);
			ifp->if_ierrors++;
			goto next_packet;
		}

		if (sz > (MHLEN - 2)) {
			MCLGET(m, M_DONTWAIT);
			if ((m->m_flags & M_EXT) == 0) {
				printf("%s: unable to allocate Rx cluster\n",
				    DEVNAME);
				m_freem(m);
				m = NULL;
				ifp->if_ierrors++;
				goto next_packet;
			}
		}

		m->m_data += 2; /* for alignment */
		m->m_pkthdr.rcvif = ifp;
		m->m_pkthdr.len = m->m_len = sz;
		memcpy(mtod(m, void *), (void *)sc->rx_buf, sz);

	next_packet:
		ifp->if_ipackets++;

		_reg_write_1(SMAP_RXFIFO_FRAME_DEC_REG8, 1);

		/* free descriptor */
		d->sz	= 0;
		d->ptr	= 0;
		d->stat	= SMAP_RXDESC_EMPTY;
		_wbflush();
		
		if (m != NULL) {
			if (ifp->if_bpf)
				bpf_mtap(ifp->if_bpf, m);
			(*ifp->if_input)(ifp, m);
		}
	}
	sc->rx_done_index = i;

	r16 = _reg_read_2(SPD_INTR_ENABLE_REG16);
	if (((r16 & SPD_INTR_RXDNV) == 0) && cnt > 0) {
		r16  |= SPD_INTR_RXDNV;
		_reg_write_2(SPD_INTR_ENABLE_REG16, r16);
	}

	FUNC_EXIT();
}
Exemplo n.º 21
0
u_int
sh_timecounter_get(struct timecounter *tc)
{
	return 0xffffffff - _reg_read_4(SH_(TCNT2));
}
Exemplo n.º 22
0
void
MemoryManager_SHMMU::MMUDump()
{
#define ON(x, c)	((x) & (c) ? '|' : '.')
	u_int32_t r, e, a;
	int i, kmode;

	DPRINTF_SETUP();

	kmode = SetKMode(1);
	DPRINTF((TEXT("MMU:\n")));
	switch (SHArchitecture::cpu_type()) {
	default:
		DPRINTF((TEXT("unknown architecture.\n")));		
		SetKMode(kmode);
		return;
	case 3:
		r = _reg_read_4(SH3_MMUCR);
		if (!(r & SH3_MMUCR_AT))
			goto disabled;

		// MMU configuration.
		DPRINTF((TEXT("%s index-mode, %s virtual storage mode\n"),
		    r & SH3_MMUCR_IX
		    ? TEXT("ASID + VPN") : TEXT("VPN only"),
		    r & SH3_MMUCR_SV ? TEXT("single") : TEXT("multiple")));

		// Dump TLB.
		DPRINTF((TEXT("---TLB---\n")));
		DPRINTF((TEXT("   VPN    ASID    PFN     VDCG PR SZ\n")));
		for (i = 0; i < SH3_MMU_WAY; i++) {
			DPRINTF((TEXT(" [way %d]\n"), i));
			for (e = 0; e < SH3_MMU_ENTRY; e++) {
				// address/data array common offset.
				a = (e << SH3_MMU_VPN_SHIFT) |
				    (i << SH3_MMU_WAY_SHIFT);

				r = _reg_read_4(SH3_MMUAA | a);
				DPRINTF((TEXT("0x%08x %3d"),
				    r & SH3_MMUAA_D_VPN_MASK,
				    r & SH3_MMUAA_D_ASID_MASK));
				r = _reg_read_4(SH3_MMUDA | a);
				DPRINTF((TEXT(" 0x%08x %c%c%c%c  %d %dK\n"),
				    r & SH3_MMUDA_D_PPN_MASK,
				    ON(r, SH3_MMUDA_D_V),
				    ON(r, SH3_MMUDA_D_D),
				    ON(r, SH3_MMUDA_D_C),
				    ON(r, SH3_MMUDA_D_SH),
				    (r & SH3_MMUDA_D_PR_MASK) >>
				    SH3_MMUDA_D_PR_SHIFT,
				    r & SH3_MMUDA_D_SZ ? 4 : 1));
			}
		}

		break;
	case 4:
		r = _reg_read_4(SH4_MMUCR);
		if (!(r & SH4_MMUCR_AT))
			goto disabled;
		DPRINTF((TEXT("%s virtual storage mode,"), 
		    r & SH3_MMUCR_SV ? TEXT("single") : TEXT("multiple")));
		DPRINTF((TEXT(" SQ access: (priviledge%S)"),
		    r & SH4_MMUCR_SQMD ? "" : "/user"));
		DPRINTF((TEXT("\n")));
#if sample_code
		//
		// Memory mapped TLB accessing program must run on P2. 
		// This is sample code.
		// 
		// Dump ITLB
		DPRINTF((TEXT("---ITLB---\n")));
		for (i = 0; i < 4; i++) {
			e = i << SH4_ITLB_E_SHIFT;
			r = _reg_read_4(SH4_ITLB_AA | e);
			DPRINTF((TEXT("%08x %3d _%c"),
			    r & SH4_ITLB_AA_VPN_MASK,
			    r & SH4_ITLB_AA_ASID_MASK,
			    ON(r, SH4_ITLB_AA_V)));
			r = _reg_read_4(SH4_ITLB_DA1 | e);
			DPRINTF((TEXT(" %08x %c%c_%c_ %1d"),
			    r & SH4_ITLB_DA1_PPN_MASK,
			    ON(r, SH4_ITLB_DA1_V),
			    ON(r, SH4_ITLB_DA1_C),
			    ON(r, SH4_ITLB_DA1_SH),
			    (r & SH4_ITLB_DA1_PR) >> SH4_UTLB_DA1_PR_SHIFT
			    ));
			r = _reg_read_4(SH4_ITLB_DA2 | e);
			DPRINTF((TEXT(" %c%d\n"),
			    ON(r, SH4_ITLB_DA2_TC), 
			    r & SH4_ITLB_DA2_SA_MASK));
		}
		// Dump UTLB
		DPRINTF((TEXT("---UTLB---\n")));
		for (i = 0; i < 64; i++) {
			e = i << SH4_UTLB_E_SHIFT;
			r = _reg_read_4(SH4_UTLB_AA | e);
			DPRINTF((TEXT("%08x %3d %c%c"),
			    r & SH4_UTLB_AA_VPN_MASK,
			    ON(r, SH4_UTLB_AA_D),
			    ON(r, SH4_UTLB_AA_V),
			    r & SH4_UTLB_AA_ASID_MASK));
			r = _reg_read_4(SH4_UTLB_DA1 | e);
			DPRINTF((TEXT(" %08x %c%c%c%c%c %1d"),
			    r & SH4_UTLB_DA1_PPN_MASK,
			    ON(r, SH4_UTLB_DA1_V),
			    ON(r, SH4_UTLB_DA1_C),
			    ON(r, SH4_UTLB_DA1_D),
			    ON(r, SH4_UTLB_DA1_SH),
			    ON(r, SH4_UTLB_DA1_WT),
			    (r & SH4_UTLB_DA1_PR_MASK) >> SH4_UTLB_DA1_PR_SHIFT
			    ));
			r = _reg_read_4(SH4_UTLB_DA2 | e);
			DPRINTF((TEXT(" %c%d\n"),
			    ON(r, SH4_UTLB_DA2_TC),
			    r & SH4_UTLB_DA2_SA_MASK));
		}
#endif //sample_code
		break;
	}

	SetKMode(kmode);
	return;

 disabled:
	DPRINTF((TEXT("disabled.\n")));
	SetKMode(kmode);
#undef ON
}