static void octeon_mask_irq(unsigned int irq)
{
    unsigned long flags;
    spin_lock_irqsave(&octeon_irq_lock, flags);
    if (irq < 8)
    {
        /* Core local interrupts, irq 0-7 */
        clear_c0_status(0x100 << irq);
    }
    else if (irq<72)
    {
        /* Interrupts from the CIU, irq 8-71 */
        const uint64_t coreid = octeon_get_core_num();
        uint64_t bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */
        uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
        en0 &= ~(1ull<<bit);
        octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
        octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
    }
    else if (irq<88)
    {
        /* Interrupts from the master 8259, irq 80-87 */
        outb(inb(0x21) | (1<<(irq-80)), 0x21);
    }
    else if (irq<96)
    {
        /* Interrupts from the slave 8259, irq 88-95 */
        outb(inb(0xa1) | (1<<(irq-88)), 0xa1);
    }
    spin_unlock_irqrestore(&octeon_irq_lock, flags);
}
static void octeon_irq_set_affinity(unsigned int irq, cpumask_t dest)
{
#ifdef CONFIG_SMP

    /* Interrupts from the CIU, irq 8-71 */
    if ((irq > 8) && (irq < 72))
    {
        int             cpu;
        unsigned long   flags;
        irq_desc_t *    desc = irq_desc + irq;
        uint64_t        bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */
        spin_lock_irqsave(&desc->lock, flags);
        for (cpu=0; cpu<NR_CPUS; cpu++)
        {
            if (cpu_present(cpu))
            {
                uint64_t coreid = cpu_logical_map(cpu);
                uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
                if (cpu_isset(cpu, dest))
                    en0 |= 1ull<<bit;
                else
                    en0 &= ~(1ull<<bit);
                octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
            }
        }
        /* We need to do a read after the last update to make sure all of
            them are done */
        octeon_read_csr(OCTEON_CIU_INTX_EN0(octeon_get_core_num()*2));
        spin_unlock_irqrestore(&desc->lock, flags);
    }
#endif
}
Example #3
0
static void proc_perf_update(void)
{
    on_each_cpu(proc_perf_update_counters, NULL, 1, 1);
    mb();
    proc_perf_l2counter_data[0] = octeon_read_csr(OCTEON_L2C_PFC0);
    proc_perf_l2counter_data[1] = octeon_read_csr(OCTEON_L2C_PFC1);
    proc_perf_l2counter_data[2] = octeon_read_csr(OCTEON_L2C_PFC2);
    proc_perf_l2counter_data[3] = octeon_read_csr(OCTEON_L2C_PFC3);
}
Example #4
0
static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
{
	u32 reg_val;
	u32 q_no;

	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
				 0xFFFFFFFF);

		reg_val =
		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));

		reg_val &= 0xEFFFFFFFFFFFFFFFL;

		reg_val =
		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));

		/* clear IPTR */
		reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;

		/* set DPTR */
		reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;

		/* reset BMODE */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);

		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
		 * for Output Queue ScatterList reset ROR_P, NSR_P
		 */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);

#ifdef __LITTLE_ENDIAN_BITFIELD
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
#else
		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
#endif
		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
		 * for Output Queue Data reset ROR, NSR
		 */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
		/* set the ES bit */
		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);

		/* write all the selected settings */
		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
				 reg_val);
	}
}
static void octeon_mask_irq_all(unsigned int irq)
{
    unsigned long flags;
    spin_lock_irqsave(&octeon_irq_lock, flags);
    if (irq < 8)
    {
        /* Core local interrupts, irq 0-7 */
        clear_c0_status(0x100 << irq);
    }
    else if (irq<72)
    {
        /* Interrupts from the CIU, irq 8-71 */
        uint64_t bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */

#ifdef CONFIG_SMP
        int cpu;
        for (cpu=0; cpu<NR_CPUS; cpu++)
        {
            if (cpu_present(cpu))
            {
                uint64_t coreid = cpu_logical_map(cpu);
                uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
                en0 &= ~(1ull<<bit);
                octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
            }
        }
        /* We need to do a read after the last update to make sure all of
            them are done */
        octeon_read_csr(OCTEON_CIU_INTX_EN0(octeon_get_core_num()*2));
#else
        const uint64_t coreid = octeon_get_core_num();
        uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
        en0 &= ~(1ull<<bit);
        octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
        octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
#endif
    }
    else if (irq<88)
    {
        /* Interrupts from the master 8259, irq 80-87 */
        outb(inb(0x21) & ~(1<<(irq-80)), 0x21);
    }
    else if (irq<96)
    {
        /* Interrupts from the slave 8259, irq 88-95 */
        outb(inb(0xa1) & ~(1<<(irq-88)), 0xa1);
    }
    spin_unlock_irqrestore(&octeon_irq_lock, flags);
}
Example #6
0
void octeon_check_cpu_bist(void)
{
    const int coreid = octeon_get_core_num();
    uint64_t mask;
    uint64_t bist_val;

    /* Check BIST results for COP0 registers */
    mask     = 0x1f00000000ull;
    bist_val = __read_64bit_c0_register($27,0);
    if (bist_val & mask)
        printk("Core%d BIST Failure: CacheErr(icache) = 0x%lx\n", coreid, bist_val);

    bist_val = __read_64bit_c0_register($27,1);
    if (bist_val & 1)
        printk("Core%d L1 Dcache parity error: CacheErr(dcache) = 0x%lx\n", coreid, bist_val);

    mask     = 0xfc00000000000000ull;
    bist_val = __read_64bit_c0_register($11,7);
    if (bist_val & mask)
        printk("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%lx\n", coreid, bist_val);

    __write_64bit_c0_register($27,1,0);

    mask     = 0x18ull;
    bist_val = octeon_read_csr(OCTEON_L2D_ERR);
    octeon_write_csr(OCTEON_L2D_ERR, mask); /* Clear error bits */
    if (bist_val & mask)
        printk("Core%d L2 Parity error: L2D_ERR = 0x%lx\n", coreid, bist_val);
}
Example #7
0
static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
{
	u32 q_no;

	for (q_no = 0; q_no < oct->num_iqs; q_no++) {
		u64 reg_val;

		/* set the corresponding IQ IS_64B bit */
		if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
			octeon_write_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
		}

		/* set the corresponding IQ ENB bit */
		if (oct->io_qmask.iq & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
			octeon_write_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
		}
	}
	for (q_no = 0; q_no < oct->num_oqs; q_no++) {
		u32 reg_val;

		/* set the corresponding OQ ENB bit */
		if (oct->io_qmask.oq & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr(
			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
			reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			octeon_write_csr(
			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
		}
	}

	return 0;
}
Example #8
0
static int octeon_l2_lock_line(uint64_t addr)
{
    int                     retval = 0;
    octeon_l2c_dbg_t        l2cdbg = {0};
    octeon_l2c_lckbase_t    lckbase = {0};
    octeon_l2c_lckoff_t     lckoff = {0};
    octeon_l2t_err_t        l2t_err;

    addr &= 0x7fffffff;

    /* Clear l2t error bits if set */
    l2t_err.u64 = octeon_read_csr(OCTEON_L2T_ERR);
    l2t_err.s.lckerr = 1;
    l2t_err.s.lckerr2 = 1;
    octeon_write_csr(OCTEON_L2T_ERR, l2t_err.u64);

    addr &= ~(cpu_icache_line_size()-1);

    /* Set this core as debug core */
    l2cdbg.s.ppnum = octeon_get_core_num();
    mb();
    octeon_write_csr(OCTEON_L2C_DBG, l2cdbg.u64);
    octeon_read_csr(OCTEON_L2C_DBG);

    lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
    octeon_write_csr(OCTEON_L2C_LCKOFF, lckoff.u64);
    octeon_read_csr(OCTEON_L2C_LCKOFF);

    if (((octeon_l2c_cfg_t)(octeon_read_csr(OCTEON_L2C_CFG))).s.idxalias)
    {
        struct cpuinfo_mips *c = &current_cpu_data;
        int l2_set_bits;
        int alias_shift;
        uint64_t addr_tmp;

        switch (c->cputype)
        {
            case CPU_CAVIUM_CN56XX:
            case CPU_CAVIUM_CN58XX:
                l2_set_bits =  11; /* 2048 sets */
                break;
            case CPU_CAVIUM_CN38XX:
                l2_set_bits =  10; /* 1024 sets */
                break;
            case CPU_CAVIUM_CN31XX:
                l2_set_bits =  9; /* 512 sets */
                break;
            case CPU_CAVIUM_CN30XX:
                l2_set_bits =  8; /* 256 sets */
                break;
            default:
                panic("Unknown L2 cache\n");
                break;
        }

        alias_shift = 7 + 2 * l2_set_bits - 1;
        addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> l2_set_bits;
        lckbase.s.lck_base = addr_tmp >> 7;
    }
    else
    {
Example #9
0
void octeon_hal_init(void)
{
    /* Make sure we got the boot descriptor block */
    if ((octeon_boot_desc_ptr == (void *)0xEADBEEFULL))
        panic("Boot descriptor block wasn't passed properly\n");

    octeon_bootinfo = octeon_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);

    spin_lock_init(&octeon_led_lock);
#ifndef CONFIG_CAVIUM_OCTEON_SIMULATOR
    /* Only enable the LED controller if we're running on a CN38XX or CN58XX.
        The CN30XX and CN31XX don't have an LED controller */
    if ((current_cpu_data.cputype == CPU_CAVIUM_CN38XX) ||
        (current_cpu_data.cputype == CPU_CAVIUM_CN58XX))
    {
        octeon_write_csr(OCTEON_LED_EN, 0);
        octeon_write_csr(OCTEON_LED_PRT, 0);
        octeon_write_csr(OCTEON_LED_DBG, 0);
        octeon_write_csr(OCTEON_LED_PRT_FMT, 0);
        octeon_write_csr(OCTEON_LED_UDD_CNTX(0), 32);
        octeon_write_csr(OCTEON_LED_UDD_CNTX(1), 32);
        octeon_write_csr(OCTEON_LED_UDD_DATX(0), 0);
        octeon_write_csr(OCTEON_LED_UDD_DATX(1), 0);
        octeon_write_csr(OCTEON_LED_EN, 1);
    }
#endif

#if CONFIG_CAVIUM_RESERVE32
    {
        cvmx_bootmem_desc_t *bootmem_desc = octeon_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
        octeon_reserve32_memory = octeon_phy_mem_named_block_alloc(bootmem_desc, CONFIG_CAVIUM_RESERVE32<<20, 0, 0, 2<<20, "CAVIUM_RESERVE32");
        if (octeon_reserve32_memory == 0)
            printk("Failed to allocate CAVIUM_RESERVE32 memory area\n");
    }
#endif

#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
    if (octeon_read_csr(OCTEON_L2D_FUS3) & (3ull<<34))
    {
        printk("Skipping L2 locking due to reduced L2 cache size\n");
    }
    else
    {
	extern asmlinkage void octeon_handle_irq(void);
        uint64_t ebase = read_c0_ebase() & 0x3ffff000;
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
        octeon_l2_lock_range(ebase, 0x100);        /* TLB refill */
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
        octeon_l2_lock_range(ebase + 0x180, 0x80);  /* General exception */
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
        octeon_l2_lock_range(ebase + 0x200, 0x80);  /* Interrupt handler */
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
        octeon_l2_lock_range((uint64_t)octeon_handle_irq, 0x280);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
        octeon_l2_lock_range((uint64_t)memcpy, 0x480);
#endif
    }
#endif
}
Example #10
0
/**
 * Show the counters to the user
 *
 * @param m
 * @param v
 * @return
 */
static int proc_perf_show(struct seq_file *m, void *v)
{
    int cpu;
    int i;
    uint64_t dram_clocks;
    uint64_t dram_operations;

    proc_perf_update();

    seq_printf(m, "       %16s %16s\n",
               proc_perf_label[proc_perf_counter_control[0]],
               proc_perf_label[proc_perf_counter_control[1]]);
    for (cpu=0; cpu<NR_CPUS; cpu++)
    {
        if (cpu_online(cpu))
		    seq_printf(m, "CPU%2d: %16lu %16lu\n", cpu, proc_perf_counter_data[cpu][0], proc_perf_counter_data[cpu][1]);
    }

    seq_printf(m, "\n");
    for (i=0; i<4; i++)
        seq_printf(m, "%s: %lu\n", proc_perf_l2label[proc_perf_l2counter_control[i]], proc_perf_l2counter_data[i]);

    /* Compute DRAM utilization */
    dram_operations = (octeon_read_csr(OCTEON_LMC_OPS_CNT_HI) << 32) | octeon_read_csr(OCTEON_LMC_OPS_CNT_LO);
    dram_clocks = (octeon_read_csr(OCTEON_LMC_DCLK_CNT_HI) << 32) | octeon_read_csr(OCTEON_LMC_DCLK_CNT_LO);
    if (dram_clocks > proc_perf_dram_clocks)
    {
        uint64_t delta_clocks = dram_clocks - proc_perf_dram_clocks;
        uint64_t delta_operations = dram_operations - proc_perf_dram_operations;
        uint64_t percent_x100 = 10000 * delta_operations / delta_clocks;
        seq_printf(m, "\nDRAM ops count: %lu, dclk count: %lu, utilization: %lu.%02lu%%\n",
                   delta_operations, delta_clocks, percent_x100 / 100, percent_x100 % 100);
    }
    proc_perf_dram_operations = dram_operations;
    proc_perf_dram_clocks = dram_clocks;

    seq_printf(m,
               "\n"
               "Configuration of the performance counters is controller by writing\n"
               "one of the following values to:\n"
               "    /sys/module/perf_counters/parameters/counter{0,1}\n"
               "    /sys/module/perf_counters/parameters/l2counter{0-3}\n"
               "\n"
               "Possible CPU counters:");
    for (i=0; i<PROC_PERF_CORE_MAX; i++)
    {
        if ((i & 7) == 0)
            seq_printf(m, "\n    ");
        if (proc_perf_label[i])
            seq_printf(m, "%s ", proc_perf_label[i]);
    }

    seq_printf(m, "\n\nPossible L2 counters:");
    for (i=0; i<PROC_PERF_L2_MAX; i++)
    {
        if ((i & 3) == 0)
            seq_printf(m, "\n    ");
        if (proc_perf_l2label[i])
            seq_printf(m, "%s ", proc_perf_l2label[i]);
    }
    seq_printf(m, "\nWarning: Counter configuration doesn't update till you access /proc/octeon_perf.\n");

    proc_perf_setup();
    return 0;
}
Example #11
0
/**
 * Low level initialize the Octeon PCI controller
 *
 * @return
 */
static inline void octeon_pci_initialize(void)
{
	int64_t stat;
	octeon_pci_cfg01_t cfg01;
	octeon_npi_ctl_status_t ctl_status;
	octeon_pci_ctl_status_2_t ctl_status_2;
	octeon_pci_cfg19_t cfg19;
	octeon_pci_cfg16_t cfg16;
	octeon_pci_cfg22_t cfg22;
	octeon_pci_cfg56_t cfg56;

	/* Reset the PCI Bus */
	octeon_write_csr(OCTEON_CIU_SOFT_PRST, 0x1);
	stat = octeon_read_csr(OCTEON_CIU_SOFT_PRST);

	_mdelay(2);		/* Hold  PCI reset for 2 ms */

	ctl_status.u64 = 0;
	ctl_status.s.max_word = 1;
	ctl_status.s.timer = 1;
	octeon_write_csr(OCTEON_NPI_CTL_STATUS, ctl_status.u64);

	/* Deassert PCI reset and advertize PCX Host Mode Device Capability (64b) */
	octeon_write_csr(OCTEON_CIU_SOFT_PRST, 0x4);
	stat = octeon_read_csr(OCTEON_CIU_SOFT_PRST);

	_mdelay(2);		/* Wait 2 ms after deasserting PCI reset */

	ctl_status_2.u32 = 0;
	ctl_status_2.s.bar2pres = 1;   /* bar2 present */
	ctl_status_2.s.bar2_enb = 1;   /* bar2 enable  */
	ctl_status_2.s.tsr_hwm = 1;	/* Initializes to 0.  Must be set before any PCI reads. */
	npi_write32(OCTEON_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
	_mdelay(4);		/* Wait 2 ms before doing PCI reads */

	ctl_status_2.u32 = npi_read32(OCTEON_NPI_PCI_CTL_STATUS_2);
	printk("PCI Status: %s %s-bit\n",
	       ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
	       ctl_status_2.s.ap_64ad ? "64" : "32");

	/*
	 ** TDOMC must be set to one in PCI mode. TDOMC should be set to 4
	 ** in PCI-X mode to allow four oustanding splits. Otherwise,
	 ** should not change from its reset value. Don't write PCI_CFG19
	 ** in PCI mode (0x82000001 reset value), write it to 0x82000004
	 ** after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
	 ** MRBCM -> must be one.
	 */
	if (ctl_status_2.s.ap_pcix) {
		cfg19.u32 = 0;
		cfg19.s.tdomc = 4;	/* Target Delayed/Split request
					   outstanding maximum count. [1..31]
					   and 0=32.  NOTE: If the user
					   programs these bits beyond the
					   Designed Maximum outstanding count,
					   then the designed maximum table
					   depth will be used instead.  No
					   additional Deferred/Split
					   transactions will be accepted if
					   this outstanding maximum count is
					   reached. Furthermore, no additional
					   deferred/split transactions will be
					   accepted if the I/O delay/ I/O
					   Split Request outstanding maximum
					   is reached. */
		cfg19.s.mdrrmc = 2;	/* Master Deferred Read Request Outstanding Max
					   Count (PCI only).
					   CR4C[26:24]  Max SAC cycles   MAX DAC cycles
					   000              8                4
					   001              1                0
					   010              2                1
					   011              3                1
					   100              4                2
					   101              5                2
					   110              6                3
					   111              7                3
					   For example, if these bits are programmed to
					   100, the core can support 2 DAC cycles, 4 SAC
					   cycles or a combination of 1 DAC and 2 SAC cycles.
					   NOTE: For the PCI-X maximum outstanding split
					   transactions, refer to CRE0[22:20]  */

		cfg19.s.mrbcm = 1;	/* Master Request (Memory Read) Byte Count/Byte
					   Enable select.
					   0 = Byte Enables valid. In PCI mode, a burst
					   transaction cannot be performed using
					   Memory Read command=4?h6.
					   1 = DWORD Byte Count valid (default). In PCI
					   Mode, the memory read byte enables are
					   automatically generated by the core.
					   Note: N3 Master Request transaction sizes are
					   always determined through the
					   am_attr[<35:32>|<7:0>] field.  */
		npi_write32(OCTEON_NPI_PCI_CFG19, cfg19.u32);
	}

	cfg01.u32 = 0;
	cfg01.s.msae = 1;	/* Memory Space Access Enable */
	cfg01.s.me = 1;		/* Master Enable */
	cfg01.s.pee = 1;	/* PERR# Enable */
	cfg01.s.see = 1;	/* System Error Enable */
	cfg01.s.fbbe = 1;	/* Fast Back to Back Transaction Enable */

	npi_write32(OCTEON_NPI_PCI_CFG01, cfg01.u32);
	npi_read32(OCTEON_NPI_PCI_CFG01);

#ifdef USE_OCTEON_INTERNAL_ARBITER
	/*
	 ** When OCTEON is a PCI host, most systems will use OCTEON's
	 ** internal arbiter, so must enable it before any PCI/PCI-X
	 ** traffic can occur.
	 */
	{
		octeon_npi_pci_int_arb_cfg_t pci_int_arb_cfg;

		pci_int_arb_cfg.u64 = 0;
		pci_int_arb_cfg.s.en = 1;	/* Internal arbiter enable */
		octeon_write_csr(OCTEON_NPI_PCI_INT_ARB_CFG,
				 pci_int_arb_cfg.u64);
	}
#endif				/* USE_OCTEON_INTERNAL_ARBITER */

	/*
	 ** Preferrably written to 1 to set MLTD. [RDSATI,TRTAE,
	 ** TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
	 ** 1..7.
	 */
	cfg16.u32 = 0;
	cfg16.s.mltd = 1;	/* Master Latency Timer Disable */
	npi_write32(OCTEON_NPI_PCI_CFG16, cfg16.u32);

	/*
	 ** Should be written to 0x4ff00. MTTV -> must be zero.
	 ** FLUSH -> must be 1. MRV -> should be 0xFF.
	 */
	cfg22.u32 = 0;
	cfg22.s.mrv = 0xff;	/* Master Retry Value [1..255] and 0=infinite */
	cfg22.s.flush = 1;	/* AM_DO_FLUSH_I control NOTE: This
				   bit MUST BE ONE for proper N3K
				   operation */
	npi_write32(OCTEON_NPI_PCI_CFG22, cfg22.u32);

	/*
	 ** MOST Indicates the maximum number of outstanding splits (in -1
	 ** notation) when OCTEON is in PCI-X mode.  PCI-X performance is
	 ** affected by the MOST selection.  Should generally be written
	 ** with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
	 ** depending on the desired MOST of 3, 2, 1, or 0, respectively.
	 */
	cfg56.u32 = 0;
	cfg56.s.pxcid = 7;	/* RO - PCI-X Capability ID */
	cfg56.s.ncp = 0xe8;	/* RO - Next Capability Pointer */
	cfg56.s.dpere = 1;	/* Data Parity Error Recovery Enable */
	cfg56.s.roe = 1;	/* Relaxed Ordering Enable */
	cfg56.s.mmbc = 1;	/* Maximum Memory Byte Count [0=512B,1=1024B,2=2048B,3=4096B] */
	cfg56.s.most = 3;	/* Maximum outstanding Split transactions [0=1 .. 7=32] */

	npi_write32(OCTEON_NPI_PCI_CFG56, cfg56.u32);

	/*
	 ** Affects PCI performance when OCTEON services reads to its
	 ** BAR1/BAR2. Refer to Section 10.6.1.  The recommended values are
	 ** 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
	 ** PCI_READ_CMD_E, respectively. Note that these values differ
	 ** from their reset values.
	 */
	npi_write32(OCTEON_NPI_PCI_READ_CMD_6, 0x22);
	npi_write32(OCTEON_NPI_PCI_READ_CMD_C, 0x33);
	npi_write32(OCTEON_NPI_PCI_READ_CMD_E, 0x33);
}
static int octeon_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int combined)
{
    uint64_t data = 0;
    int i;
    int timeout = 0;
    octeon_mio_tws_sw_twsi_t temp, mio_tws_sw_twsi;
    octeon_mio_tws_sw_twsi_ext_t mio_tws_sw_twsi_ext;

    DEB2("addr: 0x%04x, len: %d, flags: 0x%x, buf[0] = %x\n", msg->addr, msg->len, msg->flags, msg->buf[0]);


    mio_tws_sw_twsi.u64 = 0x0;

    mio_tws_sw_twsi.s.v = 1;

    //ten bit address op<1> = 1
    if( msg->flags & I2C_M_TEN) mio_tws_sw_twsi.s.op |= 0x2;
    mio_tws_sw_twsi.s.a = msg->addr & 0x3ff;

    // check the msg->len  0<=len <8
    if( msg->len > 8 ){
	printk("%s %d Error len msg->len %d\n", __FILE__, __LINE__, msg->len);
	return (-1);
    }
    mio_tws_sw_twsi.s.sovr = 1;			// size override.
    if ( msg->len == 0 )	
       mio_tws_sw_twsi.s.size = 0;
    else
       mio_tws_sw_twsi.s.size = msg->len-1;	// Size: 0 = 1 byte, 1 = 2 bytes, ..., 7 = 8 bytes

    if( msg->flags & I2C_M_RD ){
	mio_tws_sw_twsi.s.r = 1;		// Enable Read bit 
    }else{	
	for(i =0; i <= mio_tws_sw_twsi.s.size; i++){
	    data = data << 8;	
	    data |= msg->buf[i];
	}

	mio_tws_sw_twsi.s.d = data;
	mio_tws_sw_twsi_ext.s.d = data >> 32;	
    }
	
#ifdef I2C_OCTEON_DEBUG
    if ( mio_tws_sw_twsi.s.r == 1 )
	printk("twsi-read  op: data=%llx %llx len=%d\n", mio_tws_sw_twsi.u64, mio_tws_sw_twsi_ext.u64, msg->len);
    else
        printk("twsi-write op: data=%llx %llx len=%d\n", mio_tws_sw_twsi.u64, mio_tws_sw_twsi_ext.u64, msg->len);
#endif

    octeon_write_csr(OCTEON_MIO_TWS_SW_TWSI_EXT, mio_tws_sw_twsi_ext.u64);
    octeon_write_csr(OCTEON_MIO_TWS_SW_TWSI, mio_tws_sw_twsi.u64);


    //Poll! wait the transfer complete and timeout (10ms).
    do{
	temp.u64 = octeon_read_csr(OCTEON_MIO_TWS_SW_TWSI);	
	udelay(1);
    }while (temp.s.v && (timeout++ < I2C_MAX_TIMEOUT));

    mio_tws_sw_twsi.u64 = octeon_read_csr(OCTEON_MIO_TWS_SW_TWSI);

    if (timeout >= I2C_MAX_TIMEOUT) {
	printk("Octeon twsi I2C Timeout!\n");
	octeon_i2c_reset();
	return -EIO;
    }

    //transfer ERROR
    if (!mio_tws_sw_twsi.s.r){
	octeon_i2c_reset();
	return -EIO;
    }

    if (msg->flags & I2C_M_RD){

	mio_tws_sw_twsi_ext.u64 = octeon_read_csr(OCTEON_MIO_TWS_SW_TWSI_EXT);
	data = ((uint64_t) mio_tws_sw_twsi_ext.s.d << 32) | mio_tws_sw_twsi.s.d;
	
#ifdef I2C_OCTEON_DEBUG
	printk("twsi-read result: data=%llx %llx len=%d\n", mio_tws_sw_twsi.u64, mio_tws_sw_twsi_ext.u64, msg->len);
#endif

	for(i = mio_tws_sw_twsi.s.size; i >= 0; i--){
		msg->buf[i] = data;
		data = data >> 8; 
	}	
    }
Example #13
0
static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
{
	u32 reg;
	int i, len = 0;

	/* PCI  Window Registers */

	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
	reg = CN6XXX_WIN_WR_ADDR_LO;
	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
	reg = CN6XXX_WIN_WR_ADDR_HI;
	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
	reg = CN6XXX_WIN_RD_ADDR_LO;
	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
	reg = CN6XXX_WIN_RD_ADDR_HI;
	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
	reg = CN6XXX_WIN_WR_DATA_LO;
	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
	reg = CN6XXX_WIN_WR_DATA_HI;
	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
		       CN6XXX_WIN_WR_MASK_REG,
		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));

	/* PCI  Interrupt Register */
	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
						CN6XXX_SLI_INT_ENB64_PORT0));
	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
		       CN6XXX_SLI_INT_ENB64_PORT1,
		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));

	/* PCI  Output queue registers */
	for (i = 0; i < oct->num_oqs; i++) {
		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
			       reg, i, octeon_read_csr(oct, reg));
		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
			       reg, i, octeon_read_csr(oct, reg));
	}
	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
		       reg, octeon_read_csr(oct, reg));
	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
		       reg, octeon_read_csr(oct, reg));

	/* PCI  Input queue registers */
	for (i = 0; i <= 3; i++) {
		u32 reg;

		reg = CN6XXX_SLI_IQ_DOORBELL(i);
		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
			       reg, i, octeon_read_csr(oct, reg));
		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
			       reg, i, octeon_read_csr(oct, reg));
	}

	/* PCI  DMA registers */

	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
		       CN6XXX_DMA_CNT(0),
		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
		       CN6XXX_DMA_TIME_INT_LEVEL(0),
		       octeon_read_csr(oct, reg));

	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
		       CN6XXX_DMA_CNT(1),
		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
		       CN6XXX_DMA_PKT_INT_LEVEL(1),
		       octeon_read_csr(oct, reg));
	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
		       CN6XXX_DMA_TIME_INT_LEVEL(1),
		       octeon_read_csr(oct, reg));

	/* PCI  Index registers */

	len += sprintf(s + len, "\n");

	for (i = 0; i < 16; i++) {
		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
	}

	return len;
}