static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec, int thread_tx, int thread_rx) { uec_init_cmd_pram_t *p_init_enet_param; u32 init_enet_param_offset; uec_info_t *uec_info; int i; int snum; u32 init_enet_offset; u32 entry_val; u32 command; u32 cecr_subblock; uec_info = uec->uec_info; /* Allocate init enet command parameter */ uec->init_enet_param_offset = qe_muram_alloc( sizeof(uec_init_cmd_pram_t), 4); init_enet_param_offset = uec->init_enet_param_offset; uec->p_init_enet_param = (uec_init_cmd_pram_t *) qe_muram_addr(uec->init_enet_param_offset); /* Zero init enet command struct */ memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t)); /* Init the command struct */ p_init_enet_param = uec->p_init_enet_param; p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0; p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1; p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2; p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3; p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4; p_init_enet_param->largestexternallookupkeysize = 0; p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx) << ENET_INIT_PARAM_RGF_SHIFT; p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx) << ENET_INIT_PARAM_TGF_SHIFT; /* Init Rx global parameter pointer */ p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset | (u32)uec_info->risc_rx; /* Init Rx threads */ for (i = 0; i < (thread_rx + 1); i++) { if ((snum = qe_get_snum()) < 0) { printf("%s can not get snum\n", __FUNCTION__); return -ENOMEM; } if (i==0) { init_enet_offset = 0; } else { init_enet_offset = qe_muram_alloc( sizeof(uec_thread_rx_pram_t), UEC_THREAD_RX_PRAM_ALIGNMENT); } entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | (u32)uec_info->risc_rx; p_init_enet_param->rxthread[i] = entry_val; } /* Init Tx global parameter pointer */ p_init_enet_param->txglobal = uec->tx_glbl_pram_offset | (u32)uec_info->risc_tx; /* Init Tx threads */ for (i = 0; i < thread_tx; i++) { if ((snum = qe_get_snum()) < 0) { printf("%s can not get snum\n", __FUNCTION__); return -ENOMEM; } init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t), UEC_THREAD_TX_PRAM_ALIGNMENT); entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | (u32)uec_info->risc_tx; p_init_enet_param->txthread[i] = entry_val; } __asm__ __volatile__("sync"); /* Issue QE command */ command = QE_INIT_TX_RX; cecr_subblock = ucc_fast_get_qe_cr_subblock( uec->uec_info->uf_info.ucc_num); qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, init_enet_param_offset); return 0; }
static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx) { uec_info_t *uec_info; u32 end_bd; u8 bmrx = 0; int i; uec_info = uec->uec_info; /* Alloc global Tx parameter RAM page */ uec->tx_glbl_pram_offset = qe_muram_alloc( sizeof(uec_tx_global_pram_t), UEC_TX_GLOBAL_PRAM_ALIGNMENT); uec->p_tx_glbl_pram = (uec_tx_global_pram_t *) qe_muram_addr(uec->tx_glbl_pram_offset); /* Zero the global Tx prameter RAM */ memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t)); /* Init global Tx parameter RAM */ /* TEMODER, RMON statistics disable, one Tx queue */ out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE); /* SQPTR */ uec->send_q_mem_reg_offset = qe_muram_alloc( sizeof(uec_send_queue_qd_t), UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *) qe_muram_addr(uec->send_q_mem_reg_offset); out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset); /* Setup the table with TxBDs ring */ end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1) * SIZEOFBD; out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base, (u32)(uec->p_tx_bd_ring)); out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address, end_bd); /* Scheduler Base Pointer, we have only one Tx queue, no need it */ out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0); /* TxRMON Base Pointer, TxRMON disable, we don't need it */ out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0); /* TSTATE, global snooping, big endian, the CSB bus selected */ bmrx = BMR_INIT_VALUE; out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT)); /* IPH_Offset */ for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) { out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0); } /* VTAG table */ for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) { out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0); } /* TQPTR */ uec->thread_dat_tx_offset = qe_muram_alloc( num_threads_tx * sizeof(uec_thread_data_tx_t) + 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT); uec->p_thread_data_tx = (uec_thread_data_tx_t *) qe_muram_addr(uec->thread_dat_tx_offset); out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset); }
static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx) { u8 bmrx = 0; int i; uec_82xx_address_filtering_pram_t *p_af_pram; /* Allocate global Rx parameter RAM page */ uec->rx_glbl_pram_offset = qe_muram_alloc( sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT); uec->p_rx_glbl_pram = (uec_rx_global_pram_t *) qe_muram_addr(uec->rx_glbl_pram_offset); /* Zero Global Rx parameter RAM */ memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t)); /* Init global Rx parameter RAM */ /* REMODER, Extended feature mode disable, VLAN disable, LossLess flow control disable, Receive firmware statisic disable, Extended address parsing mode disable, One Rx queues, Dynamic maximum/minimum frame length disable, IP checksum check disable, IP address alignment disable */ out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE); /* RQPTR */ uec->thread_dat_rx_offset = qe_muram_alloc( num_threads_rx * sizeof(uec_thread_data_rx_t), UEC_THREAD_DATA_ALIGNMENT); uec->p_thread_data_rx = (uec_thread_data_rx_t *) qe_muram_addr(uec->thread_dat_rx_offset); out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset); /* Type_or_Len */ out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072); /* RxRMON base pointer, we don't need it */ out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0); /* IntCoalescingPTR, we don't need it, no interrupt */ out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0); /* RSTATE, global snooping, big endian, the CSB bus selected */ bmrx = BMR_INIT_VALUE; out_8(&uec->p_rx_glbl_pram->rstate, bmrx); /* MRBLR */ out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN); /* RBDQPTR */ uec->rx_bd_qs_tbl_offset = qe_muram_alloc( sizeof(uec_rx_bd_queues_entry_t) + \ sizeof(uec_rx_prefetched_bds_t), UEC_RX_BD_QUEUES_ALIGNMENT); uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *) qe_muram_addr(uec->rx_bd_qs_tbl_offset); /* Zero it */ memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \ sizeof(uec_rx_prefetched_bds_t)); out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset); out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr, (u32)uec->p_rx_bd_ring); /* MFLR */ out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN); /* MINFLR */ out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN); /* MAXD1 */ out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN); /* MAXD2 */ out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN); /* ECAM_PTR */ out_be32(&uec->p_rx_glbl_pram->ecamptr, 0); /* L2QT */ out_be32(&uec->p_rx_glbl_pram->l2qt, 0); /* L3QT */ for (i = 0; i < 8; i++) { out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0); } /* VLAN_TYPE */ out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100); /* TCI */ out_be16(&uec->p_rx_glbl_pram->vlantci, 0); /* Clear PQ2 style address filtering hash table */ p_af_pram = (uec_82xx_address_filtering_pram_t *) \ uec->p_rx_glbl_pram->addressfiltering; p_af_pram->iaddr_h = 0; p_af_pram->iaddr_l = 0; p_af_pram->gaddr_h = 0; p_af_pram->gaddr_l = 0; }
/* Initialize the UCC for Slow operations * * The caller should initialize the following us_info */ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) { struct ucc_slow_private *uccs; u32 i; struct ucc_slow __iomem *us_regs; u32 gumr; struct qe_bd *bd; u32 id; u32 command; int ret = 0; if (!us_info) return -EINVAL; /* check if the UCC port number is in range. */ if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { printk(KERN_ERR "%s: illegal UCC number\n", __func__); return -EINVAL; } /* * Set mrblr * Check that 'max_rx_buf_length' is properly aligned (4), unless * rfw is 1, meaning that QE accepts one byte at a time, unlike normal * case when QE accepts 32 bits at a time. */ if ((!us_info->rfw) && (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { printk(KERN_ERR "max_rx_buf_length not aligned.\n"); return -EINVAL; } uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); if (!uccs) { printk(KERN_ERR "%s: Cannot allocate private data\n", __func__); return -ENOMEM; } /* Fill slow UCC structure */ uccs->us_info = us_info; /* Set the PHY base address */ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); if (uccs->us_regs == NULL) { printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); kfree(uccs); return -ENOMEM; } uccs->saved_uccm = 0; uccs->p_rx_frame = 0; us_regs = uccs->us_regs; uccs->p_ucce = (u16 *) & (us_regs->ucce); uccs->p_uccm = (u16 *) & (us_regs->uccm); #ifdef STATISTICS uccs->rx_frames = 0; uccs->tx_frames = 0; uccs->rx_discarded = 0; #endif /* STATISTICS */ /* Get PRAM base */ uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); if (IS_ERR_VALUE(uccs->us_pram_offset)) { printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); ucc_slow_free(uccs); return -ENOMEM; } id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol, uccs->us_pram_offset); uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); /* Set UCC to slow type */ ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW); if (ret) { printk(KERN_ERR "%s: cannot set UCC type", __func__); ucc_slow_free(uccs); return ret; } out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); INIT_LIST_HEAD(&uccs->confQ); /* Allocate BDs. */ uccs->rx_base_offset = qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); if (IS_ERR_VALUE(uccs->rx_base_offset)) { printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, us_info->rx_bd_ring_len); uccs->rx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } uccs->tx_base_offset = qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); if (IS_ERR_VALUE(uccs->tx_base_offset)) { printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); uccs->tx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } /* Init Tx bds */ bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { /* clear bd buffer */ out_be32(&bd->buf, 0); /* set bd status and length */ out_be32((u32 *) bd, 0); bd++; } /* for last BD set Wrap bit */ out_be32(&bd->buf, 0); out_be32((u32 *) bd, cpu_to_be32(T_W)); /* Init Rx bds */ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { /* set bd status and length */ out_be32((u32*)bd, 0); /* clear bd buffer */ out_be32(&bd->buf, 0); bd++; } /* for last BD set Wrap bit */ out_be32((u32*)bd, cpu_to_be32(R_W)); out_be32(&bd->buf, 0); /* Set GUMR (For more details see the hardware spec.). */ /* gumr_h */ gumr = us_info->tcrc; if (us_info->cdp) gumr |= UCC_SLOW_GUMR_H_CDP; if (us_info->ctsp) gumr |= UCC_SLOW_GUMR_H_CTSP; if (us_info->cds) gumr |= UCC_SLOW_GUMR_H_CDS; if (us_info->ctss) gumr |= UCC_SLOW_GUMR_H_CTSS; if (us_info->tfl) gumr |= UCC_SLOW_GUMR_H_TFL; if (us_info->rfw) gumr |= UCC_SLOW_GUMR_H_RFW; if (us_info->txsy) gumr |= UCC_SLOW_GUMR_H_TXSY; if (us_info->rtsm) gumr |= UCC_SLOW_GUMR_H_RTSM; out_be32(&us_regs->gumr_h, gumr); /* gumr_l */ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | us_info->diag | us_info->mode; if (us_info->tci) gumr |= UCC_SLOW_GUMR_L_TCI; if (us_info->rinv) gumr |= UCC_SLOW_GUMR_L_RINV; if (us_info->tinv) gumr |= UCC_SLOW_GUMR_L_TINV; if (us_info->tend) gumr |= UCC_SLOW_GUMR_L_TEND; out_be32(&us_regs->gumr_l, gumr); /* Function code registers */ /* if the data is in cachable memory, the 'global' */ /* in the function code should be set. */ uccs->us_pram->tbmr = UCC_BMR_BO_BE; uccs->us_pram->rbmr = UCC_BMR_BO_BE; /* rbase, tbase are offsets from MURAM base */ out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); /* Mux clocking */ /* Grant Support */ ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); /* Breakpoint Support */ ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); /* Set Tsa or NMSI mode. */ ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); /* If NMSI (not Tsa), set Tx and Rx clock. */ if (!us_info->tsa) { /* Rx clock routing */ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, COMM_DIR_RX)) { printk(KERN_ERR "%s: illegal value for RX clock\n", __func__); ucc_slow_free(uccs); return -EINVAL; } /* Tx clock routing */ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, COMM_DIR_TX)) { printk(KERN_ERR "%s: illegal value for TX clock\n", __func__); ucc_slow_free(uccs); return -EINVAL; } } /* Set interrupt mask register at UCC level. */ out_be16(&us_regs->uccm, us_info->uccm_mask); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ out_be16(&us_regs->ucce, 0xffff); /* Issue QE Init command */ if (us_info->init_tx && us_info->init_rx) command = QE_INIT_TX_RX; else if (us_info->init_tx) command = QE_INIT_TX; else command = QE_INIT_RX; /* We know at least one is TRUE */ qe_issue_cmd(command, id, us_info->protocol, 0); *uccs_ret = uccs; return 0; }