void bfa_timer_beat(struct bfa_timer_mod_s *mod) { struct list_head *qh = &mod->timer_q; struct list_head *qe, *qe_next; struct bfa_timer_s *elem; struct list_head timedout_q; INIT_LIST_HEAD(&timedout_q); qe = bfa_q_next(qh); while (qe != qh) { qe_next = bfa_q_next(qe); elem = (struct bfa_timer_s *) qe; if (elem->timeout <= BFA_TIMER_FREQ) { elem->timeout = 0; list_del(&elem->qe); list_add_tail(&elem->qe, &timedout_q); } else { elem->timeout -= BFA_TIMER_FREQ; } qe = qe_next; /* go to next elem */ } /* * Pop all the timeout entries */ while (!list_empty(&timedout_q)) { bfa_q_deq(&timedout_q, &elem); elem->timercb(elem->arg); } }
/* * Iterate's through all the rport's in the given port to * determine the maximum operating speed. * * To be used in TRL Functionality only */ enum bfa_pport_speed bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port) { struct list_head *qh, *qe; struct bfa_fcs_rport_s *rport = NULL; struct bfa_fcs_s *fcs; enum bfa_pport_speed max_speed = 0; struct bfa_pport_attr_s pport_attr; enum bfa_pport_speed pport_speed, rport_speed; bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa); if (port == NULL) return 0; fcs = port->fcs; /* * Get Physical port's current speed */ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); pport_speed = pport_attr.speed; bfa_trc(fcs, pport_speed); qh = &port->rport_q; qe = bfa_q_first(qh); while (qe != qh) { rport = (struct bfa_fcs_rport_s *) qe; if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) || (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE)) { qe = bfa_q_next(qe); continue; } rport_speed = rport->rpf.rpsc_speed; if ((trl_enabled) && (rport_speed == BFA_PPORT_SPEED_UNKNOWN)) { /* Use default ratelim speed setting */ rport_speed = bfa_fcport_get_ratelim_speed(port->fcs->bfa); } if ((rport_speed == BFA_PPORT_SPEED_8GBPS) || (rport_speed > pport_speed)) { max_speed = rport_speed; break; } else if (rport_speed > max_speed) { max_speed = rport_speed; } qe = bfa_q_next(qe); } bfa_trc(fcs, max_speed); return max_speed; }
int bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) { struct list_head *tqe; tqe = bfa_q_next(q); while (tqe != q) { if (tqe == qe) return 1; tqe = bfa_q_next(tqe); if (tqe == NULL) break; } return 0; }
wwn_t bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index, int nrports, bfa_boolean_t bwwn) { struct list_head *qh, *qe; struct bfa_fcs_rport_s *rport = NULL; int i; struct bfa_fcs_s *fcs; if (port == NULL || nrports == 0) return (wwn_t) 0; fcs = port->fcs; bfa_trc(fcs, (u32) nrports); i = 0; qh = &port->rport_q; qe = bfa_q_first(qh); while ((qe != qh) && (i < nrports)) { rport = (struct bfa_fcs_rport_s *)qe; if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { qe = bfa_q_next(qe); bfa_trc(fcs, (u32) rport->pwwn); bfa_trc(fcs, rport->pid); bfa_trc(fcs, i); continue; } if (bwwn) { if (!memcmp(&wwn, &rport->pwwn, 8)) break; } else { if (i == index) break; } i++; qe = bfa_q_next(qe); } bfa_trc(fcs, i); if (rport) return rport->pwwn; else return (wwn_t) 0; }
static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) { int sgeid, nsges, i; struct bfi_sge_s *sge; struct bfa_sgpg_s *sgpg; u32 pgcumsz; u64 addr; struct scatterlist *sg; struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; sgeid = BFI_SGE_INLINE; ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q); sg = scsi_sglist(cmnd); sg = sg_next(sg); do { sge = sgpg->sgpg->sges; nsges = ioim->nsges - sgeid; if (nsges > BFI_SGPG_DATA_SGES) nsges = BFI_SGPG_DATA_SGES; pgcumsz = 0; for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) { addr = bfa_os_sgaddr(sg_dma_address(sg)); sge->sga = *(union bfi_addr_u *) &addr; sge->sg_len = sg_dma_len(sg); pgcumsz += sge->sg_len; /** * set flags */ if (i < (nsges - 1)) sge->flags = BFI_SGE_DATA; else if (sgeid < (ioim->nsges - 1)) sge->flags = BFI_SGE_DATA_CPL; else sge->flags = BFI_SGE_DATA_LAST; } sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); /** * set the link element of each page */ if (sgeid == ioim->nsges) { sge->flags = BFI_SGE_PGDLEN; sge->sga.a32.addr_lo = 0; sge->sga.a32.addr_hi = 0; } else { sge->flags = BFI_SGE_LINK; sge->sga = sgpg->sgpg_pa; } sge->sg_len = pgcumsz; } while (sgeid < ioim->nsges); }
void bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, wwn_t rport_wwns[], int *nrports) { struct list_head *qh, *qe; struct bfa_fcs_rport_s *rport = NULL; int i; struct bfa_fcs_s *fcs; if (port == NULL || rport_wwns == NULL || *nrports == 0) return; fcs = port->fcs; bfa_trc(fcs, (u32) *nrports); i = 0; qh = &port->rport_q; qe = bfa_q_first(qh); while ((qe != qh) && (i < *nrports)) { rport = (struct bfa_fcs_rport_s *)qe; if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { qe = bfa_q_next(qe); bfa_trc(fcs, (u32) rport->pwwn); bfa_trc(fcs, rport->pid); bfa_trc(fcs, i); continue; } rport_wwns[i] = rport->pwwn; i++; qe = bfa_q_next(qe); } bfa_trc(fcs, i); *nrports = i; return; }
static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) { int sgeid, nsges, i; struct bfi_sge_s *sge; struct bfa_sgpg_s *sgpg; u32 pgcumsz; sgeid = BFI_SGE_INLINE; ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q); do { sge = sgpg->sgpg->sges; nsges = ioim->nsges - sgeid; if (nsges > BFI_SGPG_DATA_SGES) nsges = BFI_SGPG_DATA_SGES; pgcumsz = 0; for (i = 0; i < nsges; i++, sge++, sgeid++) { sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, sgeid); sge->sg_len = bfa_cb_ioim_get_sglen(ioim->dio, sgeid); pgcumsz += sge->sg_len; /** * set flags */ if (i < (nsges - 1)) sge->flags = BFI_SGE_DATA; else if (sgeid < (ioim->nsges - 1)) sge->flags = BFI_SGE_DATA_CPL; else sge->flags = BFI_SGE_DATA_LAST; } sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); /** * set the link element of each page */ if (sgeid == ioim->nsges) { sge->flags = BFI_SGE_PGDLEN; sge->sga.a32.addr_lo = 0; sge->sga.a32.addr_hi = 0; } else { sge->flags = BFI_SGE_LINK; sge->sga = sgpg->sgpg_pa; } sge->sg_len = pgcumsz; } while (sgeid < ioim->nsges); }