Ejemplo n.º 1
0
void octeon_check_cpu_bist(void)
{
    const int coreid = octeon_get_core_num();
    uint64_t mask;
    uint64_t bist_val;

    /* Check BIST results for COP0 registers */
    mask     = 0x1f00000000ull;
    bist_val = __read_64bit_c0_register($27,0);
    if (bist_val & mask)
        printk("Core%d BIST Failure: CacheErr(icache) = 0x%lx\n", coreid, bist_val);

    bist_val = __read_64bit_c0_register($27,1);
    if (bist_val & 1)
        printk("Core%d L1 Dcache parity error: CacheErr(dcache) = 0x%lx\n", coreid, bist_val);

    mask     = 0xfc00000000000000ull;
    bist_val = __read_64bit_c0_register($11,7);
    if (bist_val & mask)
        printk("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%lx\n", coreid, bist_val);

    __write_64bit_c0_register($27,1,0);

    mask     = 0x18ull;
    bist_val = octeon_read_csr(OCTEON_L2D_ERR);
#ifdef CONFIG_NK_SUPPORT_CN5010
    octeon_write_csr(OCTEON_L2D_ERR, bist_val); /* Clear error bits */
#else
    octeon_write_csr(OCTEON_L2D_ERR, mask); /* Clear error bits */
#endif
    if (bist_val & mask)
        printk("Core%d L2 Parity error: L2D_ERR = 0x%lx\n", coreid, bist_val);
}
Ejemplo n.º 2
0
static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
{
	u32 reg_val;
	u32 q_no;

	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
				 0xFFFFFFFF);

		reg_val =
		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));

		reg_val &= 0xEFFFFFFFFFFFFFFFL;

		reg_val =
		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));

		/* clear IPTR */
		reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;

		/* set DPTR */
		reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;

		/* reset BMODE */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);

		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
		 * for Output Queue ScatterList reset ROR_P, NSR_P
		 */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);

#ifdef __LITTLE_ENDIAN_BITFIELD
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
#else
		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
#endif
		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
		 * for Output Queue Data reset ROR, NSR
		 */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
		/* set the ES bit */
		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);

		/* write all the selected settings */
		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
				 reg_val);
	}
}
static void octeon_mask_irq_all(unsigned int irq)
{
    unsigned long flags;
    spin_lock_irqsave(&octeon_irq_lock, flags);
    if (irq < 8)
    {
        /* Core local interrupts, irq 0-7 */
        clear_c0_status(0x100 << irq);
    }
    else if (irq<72)
    {
        /* Interrupts from the CIU, irq 8-71 */
        uint64_t bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */

#ifdef CONFIG_SMP
        int cpu;
        for (cpu=0; cpu<NR_CPUS; cpu++)
        {
            if (cpu_present(cpu))
            {
                uint64_t coreid = cpu_logical_map(cpu);
                uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
                en0 &= ~(1ull<<bit);
                octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
            }
        }
        /* We need to do a read after the last update to make sure all of
            them are done */
        octeon_read_csr(OCTEON_CIU_INTX_EN0(octeon_get_core_num()*2));
#else
        const uint64_t coreid = octeon_get_core_num();
        uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
        en0 &= ~(1ull<<bit);
        octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
        octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
#endif
    }
    else if (irq<88)
    {
        /* Interrupts from the master 8259, irq 80-87 */
        outb(inb(0x21) & ~(1<<(irq-80)), 0x21);
    }
    else if (irq<96)
    {
        /* Interrupts from the slave 8259, irq 88-95 */
        outb(inb(0xa1) & ~(1<<(irq-88)), 0xa1);
    }
    spin_unlock_irqrestore(&octeon_irq_lock, flags);
}
Ejemplo n.º 4
0
static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
			       *intr_coal)
{
	int ret;
	struct octeon_device *oct = lio->oct_dev;
	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
	u32 time_threshold, rx_coalesce_usecs;

	if (!intr_coal->rx_coalesce_usecs)
		rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
	else
		rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;

	/* Disable adaptive interrupt modulation */
	ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
	if (ret)
		return ret;

	/* Config Time based interrupt values */
	time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
	CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);

	return 0;
}
static void octeon_mask_irq(unsigned int irq)
{
    unsigned long flags;
    spin_lock_irqsave(&octeon_irq_lock, flags);
    if (irq < 8)
    {
        /* Core local interrupts, irq 0-7 */
        clear_c0_status(0x100 << irq);
    }
    else if (irq<72)
    {
        /* Interrupts from the CIU, irq 8-71 */
        const uint64_t coreid = octeon_get_core_num();
        uint64_t bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */
        uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
        en0 &= ~(1ull<<bit);
        octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
        octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
    }
    else if (irq<88)
    {
        /* Interrupts from the master 8259, irq 80-87 */
        outb(inb(0x21) | (1<<(irq-80)), 0x21);
    }
    else if (irq<96)
    {
        /* Interrupts from the slave 8259, irq 88-95 */
        outb(inb(0xa1) | (1<<(irq-88)), 0xa1);
    }
    spin_unlock_irqrestore(&octeon_irq_lock, flags);
}
static void octeon_irq_set_affinity(unsigned int irq, cpumask_t dest)
{
#ifdef CONFIG_SMP

    /* Interrupts from the CIU, irq 8-71 */
    if ((irq > 8) && (irq < 72))
    {
        int             cpu;
        unsigned long   flags;
        irq_desc_t *    desc = irq_desc + irq;
        uint64_t        bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */
        spin_lock_irqsave(&desc->lock, flags);
        for (cpu=0; cpu<NR_CPUS; cpu++)
        {
            if (cpu_present(cpu))
            {
                uint64_t coreid = cpu_logical_map(cpu);
                uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
                if (cpu_isset(cpu, dest))
                    en0 |= 1ull<<bit;
                else
                    en0 &= ~(1ull<<bit);
                octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
            }
        }
        /* We need to do a read after the last update to make sure all of
            them are done */
        octeon_read_csr(OCTEON_CIU_INTX_EN0(octeon_get_core_num()*2));
        spin_unlock_irqrestore(&desc->lock, flags);
    }
#endif
}
Ejemplo n.º 7
0
static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
{
	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
	u64 pkt_in_done;

	/* Write the start of the input queue's ring and its size */
	octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
			   iq->base_addr_dma);
	octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);

	/* Remember the doorbell & instruction count register addr
	 * for this queue
	 */
	iq->doorbell_reg =
	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
	iq->inst_cnt_reg =
	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
	dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
		iq_no, iq->doorbell_reg, iq->inst_cnt_reg);

	/* Store the current instruction counter (used in flush_iq
	 * calculation)
	 */
	pkt_in_done = readq(iq->inst_cnt_reg);

	if (oct->msix_on) {
		/* Set CINT_ENB to enable IQ interrupt */
		writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
		       iq->inst_cnt_reg);
	}
	iq->reset_instr_cnt = 0;
}
Ejemplo n.º 8
0
static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
{
	struct octeon_droq *droq = oct->droq[oq_no];

	octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
			   droq->desc_ring_dma);
	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);

	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
			 (droq->buffer_size | (OCT_RH_SIZE << 16)));

	/* Get the mapped address of the pkt_sent and pkts_credit regs */
	droq->pkts_sent_reg =
	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
	droq->pkts_credit_reg =
	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
}
Ejemplo n.º 9
0
static int __init octeon_pci_setup(void)
{
	int index;
	octeon_pci_bar1_indexx_t bar1_index;
	octeon_npi_mem_access_subid_t mem_access;
	/* PCI I/O and PCI MEM values */
	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
	ioport_resource.start = 0;
	ioport_resource.end   = OCTEON_PCI_IOSPACE_SIZE - 1;

	if (!(octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST))
	{
		printk("Not in host mode, PCI Controller not initialized\n");
		return 0;
	}

	octeon_pci_initialize();
	mem_access.u64 = 0;
	mem_access.s.esr = 1;
			    /**< Endian-Swap on read. */
	mem_access.s.esw = 1;
			    /**< Endian-Swap on write. */
	mem_access.s.nsr = 0;
			    /**< No-Snoop on read. */
	mem_access.s.nsw = 0;
			    /**< No-Snoop on write. */
	mem_access.s.ror = 0;
			    /**< Relax Read on read. */
	mem_access.s.row = 0;
			    /**< Relax Order on write. */
	mem_access.s.ba = OCTEON_PCI_MEMSPACE_BASE >> 36;
						       /**< PCI Address bits [63:36]. */
	octeon_write_csr(OCTEON_NPI_MEM_ACCESS_SUBID3, mem_access.u64);

    /* place Octeon BAR 0 at zero, so pci scan remaps */
	npi_write32(OCTEON_NPI_PCI_CFG04, 0);
	npi_write32(OCTEON_NPI_PCI_CFG05, 0);

	/* Remap the Octeon BAR 1 to map 0-128MB */
	bar1_index.u32 = 0;
	bar1_index.s.ca = 1;	/* 1 = Put in L2 cache */
	bar1_index.s.end_swp = 1;	/* 1 = Byte swapping */
	bar1_index.s.addr_v = 1;	/* This entry is valid */
	for (index = 0; index < 32; index++) {
		bar1_index.s.addr_idx = index;
		npi_write32(OCTEON_NPI_PCI_BAR1_INDEXX(index), bar1_index.u32);
	}
	npi_write32(OCTEON_NPI_PCI_CFG06, 0);
	npi_write32(OCTEON_NPI_PCI_CFG07, 0);

    /* place Octeon BAR 2 at zero, so pci scan remaps */
	npi_write32(OCTEON_NPI_PCI_CFG08, 0);
	npi_write32(OCTEON_NPI_PCI_CFG09, 0);

	register_pci_controller(&octeon_pci_controller);
	return 0;
}
Ejemplo n.º 10
0
static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
{
	u32 q_no;

	for (q_no = 0; q_no < oct->num_iqs; q_no++) {
		u64 reg_val;

		/* set the corresponding IQ IS_64B bit */
		if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
			octeon_write_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
		}

		/* set the corresponding IQ ENB bit */
		if (oct->io_qmask.iq & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
			octeon_write_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
		}
	}
	for (q_no = 0; q_no < oct->num_oqs; q_no++) {
		u32 reg_val;

		/* set the corresponding OQ ENB bit */
		if (oct->io_qmask.oq & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr(
			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
			reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			octeon_write_csr(
			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
		}
	}

	return 0;
}
Ejemplo n.º 11
0
static int
oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
{
	int ret;
	struct octeon_device *oct = lio->oct_dev;
	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
	u32 rx_max_coalesced_frames;

	if (!intr_coal->rx_max_coalesced_frames)
		rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
	else
		rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;

	/* Disable adaptive interrupt modulation */
	ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
	if (ret)
		return ret;

	/* Config Cnt based interrupt values */
	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
			 rx_max_coalesced_frames);
	CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
	return 0;
}
Ejemplo n.º 12
0
static int octeon_l2_lock_line(uint64_t addr)
{
    int                     retval = 0;
    octeon_l2c_dbg_t        l2cdbg = {0};
    octeon_l2c_lckbase_t    lckbase = {0};
    octeon_l2c_lckoff_t     lckoff = {0};
    octeon_l2t_err_t        l2t_err;

    addr &= 0x7fffffff;

    /* Clear l2t error bits if set */
    l2t_err.u64 = octeon_read_csr(OCTEON_L2T_ERR);
    l2t_err.s.lckerr = 1;
    l2t_err.s.lckerr2 = 1;
    octeon_write_csr(OCTEON_L2T_ERR, l2t_err.u64);

    addr &= ~(cpu_icache_line_size()-1);

    /* Set this core as debug core */
    l2cdbg.s.ppnum = octeon_get_core_num();
    mb();
    octeon_write_csr(OCTEON_L2C_DBG, l2cdbg.u64);
    octeon_read_csr(OCTEON_L2C_DBG);

    lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
    octeon_write_csr(OCTEON_L2C_LCKOFF, lckoff.u64);
    octeon_read_csr(OCTEON_L2C_LCKOFF);

    if (((octeon_l2c_cfg_t)(octeon_read_csr(OCTEON_L2C_CFG))).s.idxalias)
    {
        struct cpuinfo_mips *c = &current_cpu_data;
        int l2_set_bits;
        int alias_shift;
        uint64_t addr_tmp;

        switch (c->cputype)
        {
            case CPU_CAVIUM_CN56XX:
            case CPU_CAVIUM_CN58XX:
                l2_set_bits =  11; /* 2048 sets */
                break;
            case CPU_CAVIUM_CN38XX:
                l2_set_bits =  10; /* 1024 sets */
                break;
            case CPU_CAVIUM_CN31XX:
                l2_set_bits =  9; /* 512 sets */
                break;
            case CPU_CAVIUM_CN30XX:
                l2_set_bits =  8; /* 256 sets */
                break;
            default:
                panic("Unknown L2 cache\n");
                break;
        }

        alias_shift = 7 + 2 * l2_set_bits - 1;
        addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> l2_set_bits;
        lckbase.s.lck_base = addr_tmp >> 7;
    }
    else
    {
Ejemplo n.º 13
0
void octeon_hal_init(void)
{
    /* Make sure we got the boot descriptor block */
    if ((octeon_boot_desc_ptr == (void *)0xEADBEEFULL))
        panic("Boot descriptor block wasn't passed properly\n");

    octeon_bootinfo = octeon_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);

    spin_lock_init(&octeon_led_lock);
#ifndef CONFIG_CAVIUM_OCTEON_SIMULATOR
    /* Only enable the LED controller if we're running on a CN38XX or CN58XX.
        The CN30XX and CN31XX don't have an LED controller */
    if ((current_cpu_data.cputype == CPU_CAVIUM_CN38XX) ||
        (current_cpu_data.cputype == CPU_CAVIUM_CN58XX))
    {
        octeon_write_csr(OCTEON_LED_EN, 0);
        octeon_write_csr(OCTEON_LED_PRT, 0);
        octeon_write_csr(OCTEON_LED_DBG, 0);
        octeon_write_csr(OCTEON_LED_PRT_FMT, 0);
        octeon_write_csr(OCTEON_LED_UDD_CNTX(0), 32);
        octeon_write_csr(OCTEON_LED_UDD_CNTX(1), 32);
        octeon_write_csr(OCTEON_LED_UDD_DATX(0), 0);
        octeon_write_csr(OCTEON_LED_UDD_DATX(1), 0);
        octeon_write_csr(OCTEON_LED_EN, 1);
    }
#endif

#if CONFIG_CAVIUM_RESERVE32
    {
        cvmx_bootmem_desc_t *bootmem_desc = octeon_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
        octeon_reserve32_memory = octeon_phy_mem_named_block_alloc(bootmem_desc, CONFIG_CAVIUM_RESERVE32<<20, 0, 0, 2<<20, "CAVIUM_RESERVE32");
        if (octeon_reserve32_memory == 0)
            printk("Failed to allocate CAVIUM_RESERVE32 memory area\n");
    }
#endif

#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
    if (octeon_read_csr(OCTEON_L2D_FUS3) & (3ull<<34))
    {
        printk("Skipping L2 locking due to reduced L2 cache size\n");
    }
    else
    {
	extern asmlinkage void octeon_handle_irq(void);
        uint64_t ebase = read_c0_ebase() & 0x3ffff000;
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
        octeon_l2_lock_range(ebase, 0x100);        /* TLB refill */
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
        octeon_l2_lock_range(ebase + 0x180, 0x80);  /* General exception */
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
        octeon_l2_lock_range(ebase + 0x200, 0x80);  /* Interrupt handler */
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
        octeon_l2_lock_range((uint64_t)octeon_handle_irq, 0x280);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
        octeon_l2_lock_range((uint64_t)memcpy, 0x480);
#endif
    }
#endif
}
Ejemplo n.º 14
0
/**
 * Setup the counters using the current config
 */
static void proc_perf_setup(void)
{
    int i;
    proc_perf_l2_control_t l2control;

    proc_perf_counter_control[0] = 0;
    proc_perf_counter_control[1] = 0;
    proc_perf_l2counter_control[0] = 0;
    proc_perf_l2counter_control[1] = 0;
    proc_perf_l2counter_control[2] = 0;
    proc_perf_l2counter_control[3] = 0;

    /* Cleanup junk on end of param strings */
    clean_string(counter0, sizeof(counter0));
    clean_string(counter1, sizeof(counter1));
    clean_string(l2counter0, sizeof(l2counter0));
    clean_string(l2counter1, sizeof(l2counter1));
    clean_string(l2counter2, sizeof(l2counter2));
    clean_string(l2counter3, sizeof(l2counter3));

    /* Set the core counters to match the string parameters */
    for (i=0; i<PROC_PERF_CORE_MAX; i++)
    {
        if (proc_perf_label[i])
        {
            if (strcmp(proc_perf_label[i], counter0) == 0)
                proc_perf_counter_control[0] = i;
            if (strcmp(proc_perf_label[i], counter1) == 0)
                proc_perf_counter_control[1] = i;
        }
    }

    /* Set the L2 counters to match the string parameters */
    for (i=0; i<PROC_PERF_L2_MAX; i++)
    {
        if (proc_perf_l2label[i])
        {
            if (strcmp(proc_perf_l2label[i], l2counter0) == 0)
                proc_perf_l2counter_control[0] = i;
            if (strcmp(proc_perf_l2label[i], l2counter1) == 0)
                proc_perf_l2counter_control[1] = i;
            if (strcmp(proc_perf_l2label[i], l2counter2) == 0)
                proc_perf_l2counter_control[2] = i;
            if (strcmp(proc_perf_l2label[i], l2counter3) == 0)
                proc_perf_l2counter_control[3] = i;
        }
    }

    /* Update strings to match final config */
    strcpy(counter0, proc_perf_label[proc_perf_counter_control[0]]);
    strcpy(counter1, proc_perf_label[proc_perf_counter_control[1]]);
    strcpy(l2counter0, proc_perf_l2label[proc_perf_l2counter_control[0]]);
    strcpy(l2counter1, proc_perf_l2label[proc_perf_l2counter_control[1]]);
    strcpy(l2counter2, proc_perf_l2label[proc_perf_l2counter_control[2]]);
    strcpy(l2counter3, proc_perf_l2label[proc_perf_l2counter_control[3]]);

    on_each_cpu(proc_perf_setup_counters, NULL, 1, 1);

    l2control.u64 = 0;
    l2control.s.cnt3ena = 1;
    l2control.s.cnt3clr = 1;
    l2control.s.cnt3sel = proc_perf_l2counter_control[3];
    l2control.s.cnt2ena = 1;
    l2control.s.cnt2clr = 1;
    l2control.s.cnt2sel = proc_perf_l2counter_control[2];
    l2control.s.cnt1ena = 1;
    l2control.s.cnt1clr = 1;
    l2control.s.cnt1sel = proc_perf_l2counter_control[1];
    l2control.s.cnt0ena = 1;
    l2control.s.cnt0clr = 1;
    l2control.s.cnt0sel = proc_perf_l2counter_control[0];

    octeon_write_csr(OCTEON_L2C_PFCTL, l2control.u64);
}
static int octeon_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int combined)
{
    uint64_t data = 0;
    int i;
    int timeout = 0;
    octeon_mio_tws_sw_twsi_t temp, mio_tws_sw_twsi;
    octeon_mio_tws_sw_twsi_ext_t mio_tws_sw_twsi_ext;

    DEB2("addr: 0x%04x, len: %d, flags: 0x%x, buf[0] = %x\n", msg->addr, msg->len, msg->flags, msg->buf[0]);


    mio_tws_sw_twsi.u64 = 0x0;

    mio_tws_sw_twsi.s.v = 1;

    //ten bit address op<1> = 1
    if( msg->flags & I2C_M_TEN) mio_tws_sw_twsi.s.op |= 0x2;
    mio_tws_sw_twsi.s.a = msg->addr & 0x3ff;

    // check the msg->len  0<=len <8
    if( msg->len > 8 ){
	printk("%s %d Error len msg->len %d\n", __FILE__, __LINE__, msg->len);
	return (-1);
    }
    mio_tws_sw_twsi.s.sovr = 1;			// size override.
    if ( msg->len == 0 )	
       mio_tws_sw_twsi.s.size = 0;
    else
       mio_tws_sw_twsi.s.size = msg->len-1;	// Size: 0 = 1 byte, 1 = 2 bytes, ..., 7 = 8 bytes

    if( msg->flags & I2C_M_RD ){
	mio_tws_sw_twsi.s.r = 1;		// Enable Read bit 
    }else{	
	for(i =0; i <= mio_tws_sw_twsi.s.size; i++){
	    data = data << 8;	
	    data |= msg->buf[i];
	}

	mio_tws_sw_twsi.s.d = data;
	mio_tws_sw_twsi_ext.s.d = data >> 32;	
    }
	
#ifdef I2C_OCTEON_DEBUG
    if ( mio_tws_sw_twsi.s.r == 1 )
	printk("twsi-read  op: data=%llx %llx len=%d\n", mio_tws_sw_twsi.u64, mio_tws_sw_twsi_ext.u64, msg->len);
    else
        printk("twsi-write op: data=%llx %llx len=%d\n", mio_tws_sw_twsi.u64, mio_tws_sw_twsi_ext.u64, msg->len);
#endif

    octeon_write_csr(OCTEON_MIO_TWS_SW_TWSI_EXT, mio_tws_sw_twsi_ext.u64);
    octeon_write_csr(OCTEON_MIO_TWS_SW_TWSI, mio_tws_sw_twsi.u64);


    //Poll! wait the transfer complete and timeout (10ms).
    do{
	temp.u64 = octeon_read_csr(OCTEON_MIO_TWS_SW_TWSI);	
	udelay(1);
    }while (temp.s.v && (timeout++ < I2C_MAX_TIMEOUT));

    mio_tws_sw_twsi.u64 = octeon_read_csr(OCTEON_MIO_TWS_SW_TWSI);

    if (timeout >= I2C_MAX_TIMEOUT) {
	printk("Octeon twsi I2C Timeout!\n");
	octeon_i2c_reset();
	return -EIO;
    }

    //transfer ERROR
    if (!mio_tws_sw_twsi.s.r){
	octeon_i2c_reset();
	return -EIO;
    }

    if (msg->flags & I2C_M_RD){

	mio_tws_sw_twsi_ext.u64 = octeon_read_csr(OCTEON_MIO_TWS_SW_TWSI_EXT);
	data = ((uint64_t) mio_tws_sw_twsi_ext.s.d << 32) | mio_tws_sw_twsi.s.d;
	
#ifdef I2C_OCTEON_DEBUG
	printk("twsi-read result: data=%llx %llx len=%d\n", mio_tws_sw_twsi.u64, mio_tws_sw_twsi_ext.u64, msg->len);
#endif

	for(i = mio_tws_sw_twsi.s.size; i >= 0; i--){
		msg->buf[i] = data;
		data = data >> 8; 
	}	
    }
Ejemplo n.º 16
0
/**
 * Low level initialize the Octeon PCI controller
 *
 * @return
 */
static inline void octeon_pci_initialize(void)
{
	int64_t stat;
	octeon_pci_cfg01_t cfg01;
	octeon_npi_ctl_status_t ctl_status;
	octeon_pci_ctl_status_2_t ctl_status_2;
	octeon_pci_cfg19_t cfg19;
	octeon_pci_cfg16_t cfg16;
	octeon_pci_cfg22_t cfg22;
	octeon_pci_cfg56_t cfg56;

	/* Reset the PCI Bus */
	octeon_write_csr(OCTEON_CIU_SOFT_PRST, 0x1);
	stat = octeon_read_csr(OCTEON_CIU_SOFT_PRST);

	_mdelay(2);		/* Hold  PCI reset for 2 ms */

	ctl_status.u64 = 0;
	ctl_status.s.max_word = 1;
	ctl_status.s.timer = 1;
	octeon_write_csr(OCTEON_NPI_CTL_STATUS, ctl_status.u64);

	/* Deassert PCI reset and advertize PCX Host Mode Device Capability (64b) */
	octeon_write_csr(OCTEON_CIU_SOFT_PRST, 0x4);
	stat = octeon_read_csr(OCTEON_CIU_SOFT_PRST);

	_mdelay(2);		/* Wait 2 ms after deasserting PCI reset */

	ctl_status_2.u32 = 0;
	ctl_status_2.s.bar2pres = 1;   /* bar2 present */
	ctl_status_2.s.bar2_enb = 1;   /* bar2 enable  */
	ctl_status_2.s.tsr_hwm = 1;	/* Initializes to 0.  Must be set before any PCI reads. */
	npi_write32(OCTEON_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
	_mdelay(4);		/* Wait 2 ms before doing PCI reads */

	ctl_status_2.u32 = npi_read32(OCTEON_NPI_PCI_CTL_STATUS_2);
	printk("PCI Status: %s %s-bit\n",
	       ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
	       ctl_status_2.s.ap_64ad ? "64" : "32");

	/*
	 ** TDOMC must be set to one in PCI mode. TDOMC should be set to 4
	 ** in PCI-X mode to allow four oustanding splits. Otherwise,
	 ** should not change from its reset value. Don't write PCI_CFG19
	 ** in PCI mode (0x82000001 reset value), write it to 0x82000004
	 ** after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
	 ** MRBCM -> must be one.
	 */
	if (ctl_status_2.s.ap_pcix) {
		cfg19.u32 = 0;
		cfg19.s.tdomc = 4;	/* Target Delayed/Split request
					   outstanding maximum count. [1..31]
					   and 0=32.  NOTE: If the user
					   programs these bits beyond the
					   Designed Maximum outstanding count,
					   then the designed maximum table
					   depth will be used instead.  No
					   additional Deferred/Split
					   transactions will be accepted if
					   this outstanding maximum count is
					   reached. Furthermore, no additional
					   deferred/split transactions will be
					   accepted if the I/O delay/ I/O
					   Split Request outstanding maximum
					   is reached. */
		cfg19.s.mdrrmc = 2;	/* Master Deferred Read Request Outstanding Max
					   Count (PCI only).
					   CR4C[26:24]  Max SAC cycles   MAX DAC cycles
					   000              8                4
					   001              1                0
					   010              2                1
					   011              3                1
					   100              4                2
					   101              5                2
					   110              6                3
					   111              7                3
					   For example, if these bits are programmed to
					   100, the core can support 2 DAC cycles, 4 SAC
					   cycles or a combination of 1 DAC and 2 SAC cycles.
					   NOTE: For the PCI-X maximum outstanding split
					   transactions, refer to CRE0[22:20]  */

		cfg19.s.mrbcm = 1;	/* Master Request (Memory Read) Byte Count/Byte
					   Enable select.
					   0 = Byte Enables valid. In PCI mode, a burst
					   transaction cannot be performed using
					   Memory Read command=4?h6.
					   1 = DWORD Byte Count valid (default). In PCI
					   Mode, the memory read byte enables are
					   automatically generated by the core.
					   Note: N3 Master Request transaction sizes are
					   always determined through the
					   am_attr[<35:32>|<7:0>] field.  */
		npi_write32(OCTEON_NPI_PCI_CFG19, cfg19.u32);
	}

	cfg01.u32 = 0;
	cfg01.s.msae = 1;	/* Memory Space Access Enable */
	cfg01.s.me = 1;		/* Master Enable */
	cfg01.s.pee = 1;	/* PERR# Enable */
	cfg01.s.see = 1;	/* System Error Enable */
	cfg01.s.fbbe = 1;	/* Fast Back to Back Transaction Enable */

	npi_write32(OCTEON_NPI_PCI_CFG01, cfg01.u32);
	npi_read32(OCTEON_NPI_PCI_CFG01);

#ifdef USE_OCTEON_INTERNAL_ARBITER
	/*
	 ** When OCTEON is a PCI host, most systems will use OCTEON's
	 ** internal arbiter, so must enable it before any PCI/PCI-X
	 ** traffic can occur.
	 */
	{
		octeon_npi_pci_int_arb_cfg_t pci_int_arb_cfg;

		pci_int_arb_cfg.u64 = 0;
		pci_int_arb_cfg.s.en = 1;	/* Internal arbiter enable */
		octeon_write_csr(OCTEON_NPI_PCI_INT_ARB_CFG,
				 pci_int_arb_cfg.u64);
	}
#endif				/* USE_OCTEON_INTERNAL_ARBITER */

	/*
	 ** Preferrably written to 1 to set MLTD. [RDSATI,TRTAE,
	 ** TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
	 ** 1..7.
	 */
	cfg16.u32 = 0;
	cfg16.s.mltd = 1;	/* Master Latency Timer Disable */
	npi_write32(OCTEON_NPI_PCI_CFG16, cfg16.u32);

	/*
	 ** Should be written to 0x4ff00. MTTV -> must be zero.
	 ** FLUSH -> must be 1. MRV -> should be 0xFF.
	 */
	cfg22.u32 = 0;
	cfg22.s.mrv = 0xff;	/* Master Retry Value [1..255] and 0=infinite */
	cfg22.s.flush = 1;	/* AM_DO_FLUSH_I control NOTE: This
				   bit MUST BE ONE for proper N3K
				   operation */
	npi_write32(OCTEON_NPI_PCI_CFG22, cfg22.u32);

	/*
	 ** MOST Indicates the maximum number of outstanding splits (in -1
	 ** notation) when OCTEON is in PCI-X mode.  PCI-X performance is
	 ** affected by the MOST selection.  Should generally be written
	 ** with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
	 ** depending on the desired MOST of 3, 2, 1, or 0, respectively.
	 */
	cfg56.u32 = 0;
	cfg56.s.pxcid = 7;	/* RO - PCI-X Capability ID */
	cfg56.s.ncp = 0xe8;	/* RO - Next Capability Pointer */
	cfg56.s.dpere = 1;	/* Data Parity Error Recovery Enable */
	cfg56.s.roe = 1;	/* Relaxed Ordering Enable */
	cfg56.s.mmbc = 1;	/* Maximum Memory Byte Count [0=512B,1=1024B,2=2048B,3=4096B] */
	cfg56.s.most = 3;	/* Maximum outstanding Split transactions [0=1 .. 7=32] */

	npi_write32(OCTEON_NPI_PCI_CFG56, cfg56.u32);

	/*
	 ** Affects PCI performance when OCTEON services reads to its
	 ** BAR1/BAR2. Refer to Section 10.6.1.  The recommended values are
	 ** 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
	 ** PCI_READ_CMD_E, respectively. Note that these values differ
	 ** from their reset values.
	 */
	npi_write32(OCTEON_NPI_PCI_READ_CMD_6, 0x22);
	npi_write32(OCTEON_NPI_PCI_READ_CMD_C, 0x33);
	npi_write32(OCTEON_NPI_PCI_READ_CMD_E, 0x33);
}
/**
 *
 * @return
 */
void octeon_user_io_init(void)
{
    octeon_cvmemctl_t cvmmemctl;
    octeon_iob_fau_timeout_t fau_timeout;
    octeon_pow_nw_tim_t nm_tim;

    /* Get the current settings for CP0_CVMMEMCTL_REG */
    cvmmemctl.u64 = __read_64bit_c0_register($11, 7);

    cvmmemctl.s.dismarkwblongto = 0;        /**< R/W If set, marked write-buffer entries time out the same as
                                                as other entries; if clear, marked write-buffer entries use the
                                                maximum timeout. */
    cvmmemctl.s.dismrgclrwbto = 0;          /**< R/W If set, a merged store does not clear the write-buffer entry
                                                timeout state. */
    cvmmemctl.s.iobdmascrmsb = 0;           /**< R/W Two bits that are the MSBs of the resultant CVMSEG LM word
                                                location for an IOBDMA. The other 8 bits come from the SCRADDR
                                                field of the IOBDMA. */
    cvmmemctl.s.syncwsmarked = 0;           /**< R/W If set, SYNCWS and SYNCS only order marked stores; if clear,
                                                SYNCWS and SYNCS only order unmarked stores. SYNCWSMARKED has no
                                                effect when DISSYNCWS is set. */
    cvmmemctl.s.dissyncws = 0;              /**< R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
    if (octeon_is_pass1())
        cvmmemctl.s.diswbfst = 0;           /**< R/W If set, no stall happens on write buffer full. */
    else
        cvmmemctl.s.diswbfst = 1;           /**< R/W If set, no stall happens on write buffer full. */
    cvmmemctl.s.xkmemenas = 0;              /**< R/W If set (and SX set), supervisor-level loads/stores can use
                                                XKPHYS addresses with VA<48>==0 */
#ifdef CONFIG_CAVIUM_OCTEON_USER_MEM
    cvmmemctl.s.xkmemenau = 1;              /**< R/W If set (and UX set), user-level loads/stores can use XKPHYS
                                                addresses with VA<48>==0 */
#else
    cvmmemctl.s.xkmemenau = 0;
#endif
    cvmmemctl.s.xkioenas = 0;               /**< R/W If set (and SX set), supervisor-level loads/stores can use
                                                XKPHYS addresses with VA<48>==1 */
    cvmmemctl.s.xkioenau = 1;               /**< R/W If set (and UX set), user-level loads/stores can use XKPHYS
                                                addresses with VA<48>==1 */
    cvmmemctl.s.allsyncw = 0;               /**< R/W If set, all stores act as SYNCW (NOMERGE must be set when
                                                this is set) RW, reset to 0. */
    cvmmemctl.s.nomerge = 0;                /**< R/W If set, no stores merge, and all stores reach the coherent
                                                bus in order. */
    cvmmemctl.s.didtto = 0;                 /**< R/W Selects the bit in the counter used for DID time-outs
                                                0 = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is between
                                                1× and 2× this interval. For example, with DIDTTO=3, expiration
                                                interval is between 16K and 32K. */
    cvmmemctl.s.csrckalwys = 0;             /**< R/W If set, the (mem) CSR clock never turns off. */
    cvmmemctl.s.mclkalwys = 0;              /**< R/W If set, mclk never turns off. */
    cvmmemctl.s.wbfltime = 0;               /**< R/W Selects the bit in the counter used for write buffer flush
                                                time-outs (WBFLT+11) is the bit position in an internal counter
                                                used to determine expiration. The write buffer expires between
                                                1× and 2× this interval. For example, with WBFLT = 0, a write
                                                buffer expires between 2K and 4K cycles after the write buffer
                                                entry is allocated. */
    cvmmemctl.s.istrnol2 = 0;               /**< R/W If set, do not put Istream in the L2 cache. */
    cvmmemctl.s.wbthresh = 10;              /**< R/W The write buffer threshold. */
    cvmmemctl.s.cvmsegenak = 1;             /**< R/W If set, CVMSEG is available for loads/stores in kernel/debug mode. */
    cvmmemctl.s.cvmsegenas = 0;             /**< R/W If set, CVMSEG is available for loads/stores in supervisor mode. */
    cvmmemctl.s.cvmsegenau = 0;             /**< R/W If set, CVMSEG is available for loads/stores in user mode. */
    cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; /**< R/W Size of local memory in cache blocks, 54 (6912 bytes) is max legal value. */

    if (smp_processor_id() == 0)
        printk("CVMSEG size: %d cache lines (%d bytes)\n",
               CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);

    __write_64bit_c0_register($11, 7, cvmmemctl.u64);

    /* Set a default for the hardware timeouts */
    fau_timeout.u64 = 0;
    fau_timeout.s.tout_enb = 1;
    fau_timeout.s.tout_val = 16; /* 4096 cycles */
    octeon_write_csr(OCTEON_IOB_FAU_TIMEOUT, fau_timeout.u64);

    nm_tim.u64 = 0;
    nm_tim.s.nw_tim = 3; /* 4096 cycles */
    octeon_write_csr(OCTEON_POW_NW_TIM, nm_tim.u64);
}