/* omap_start_ehc * - Start the TI USBHOST controller */ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); u8 tll_ch_mask = 0; unsigned reg = 0; int ret = 0; dev_dbg(omap->dev, "starting TI EHCI USB Controller\n"); /* Enable Clocks for USBHOST */ omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick"); if (IS_ERR(omap->usbhost_ick)) { ret = PTR_ERR(omap->usbhost_ick); goto err_host_ick; } clk_enable(omap->usbhost_ick); omap->usbhost_hs_fck = clk_get(omap->dev, "hs_fck"); if (IS_ERR(omap->usbhost_hs_fck)) { ret = PTR_ERR(omap->usbhost_hs_fck); goto err_host_120m_fck; } clk_enable(omap->usbhost_hs_fck); omap->usbhost_fs_fck = clk_get(omap->dev, "fs_fck"); if (IS_ERR(omap->usbhost_fs_fck)) { ret = PTR_ERR(omap->usbhost_fs_fck); goto err_host_48m_fck; } clk_enable(omap->usbhost_fs_fck); if (omap->phy_reset) { /* Refer: ISSUE1 */ if (gpio_is_valid(omap->reset_gpio_port[0])) { gpio_request(omap->reset_gpio_port[0], "USB1 PHY reset"); gpio_direction_output(omap->reset_gpio_port[0], 0); } if (gpio_is_valid(omap->reset_gpio_port[1])) { gpio_request(omap->reset_gpio_port[1], "USB2 PHY reset"); gpio_direction_output(omap->reset_gpio_port[1], 0); } /* Hold the PHY in RESET for enough time till DIR is high */ udelay(10); } /* Configure TLL for 60Mhz clk for ULPI */ omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck"); if (IS_ERR(omap->usbtll_fck)) { ret = PTR_ERR(omap->usbtll_fck); goto err_tll_fck; } clk_enable(omap->usbtll_fck); omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick"); if (IS_ERR(omap->usbtll_ick)) { ret = PTR_ERR(omap->usbtll_ick); goto err_tll_ick; } clk_enable(omap->usbtll_ick); omap->omap_ehci_rev = ehci_omap_readl(omap->uhh_base, OMAP_UHH_REVISION); dev_dbg(omap->dev, "OMAP UHH_REVISION 0x%x\n", omap->omap_ehci_rev); /* * Enable per-port clocks as needed (newer controllers only). * - External ULPI clock for PHY mode * - Internal clocks for TLL and HSIC modes (TODO) */ if (is_omap_ehci_rev2(omap)) { switch (omap->port_mode[0]) { case EHCI_HCD_OMAP_MODE_PHY: omap->xclk60mhsp1_ck = clk_get(omap->dev, "xclk60mhsp1_ck"); if (IS_ERR(omap->xclk60mhsp1_ck)) { ret = PTR_ERR(omap->xclk60mhsp1_ck); dev_err(omap->dev, "Unable to get Port1 ULPI clock\n"); } omap->utmi_p1_fck = clk_get(omap->dev, "utmi_p1_gfclk"); if (IS_ERR(omap->utmi_p1_fck)) { ret = PTR_ERR(omap->utmi_p1_fck); dev_err(omap->dev, "Unable to get utmi_p1_fck\n"); } ret = clk_set_parent(omap->utmi_p1_fck, omap->xclk60mhsp1_ck); if (ret != 0) { dev_err(omap->dev, "Unable to set P1 f-clock\n"); } break; case EHCI_HCD_OMAP_MODE_TLL: /* TODO */ default: break; } switch (omap->port_mode[1]) { case EHCI_HCD_OMAP_MODE_PHY: omap->xclk60mhsp2_ck = clk_get(omap->dev, "xclk60mhsp2_ck"); if (IS_ERR(omap->xclk60mhsp2_ck)) { ret = PTR_ERR(omap->xclk60mhsp2_ck); dev_err(omap->dev, "Unable to get Port2 ULPI clock\n"); } omap->utmi_p2_fck = clk_get(omap->dev, "utmi_p2_gfclk"); if (IS_ERR(omap->utmi_p2_fck)) { ret = PTR_ERR(omap->utmi_p2_fck); dev_err(omap->dev, "Unable to get utmi_p2_fck\n"); } ret = clk_set_parent(omap->utmi_p2_fck, omap->xclk60mhsp2_ck); if (ret != 0) { dev_err(omap->dev, "Unable to set P2 f-clock\n"); } break; case EHCI_HCD_OMAP_MODE_TLL: /* TODO */ default: break; } } /* perform TLL soft reset, and wait until reset is complete */ ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, OMAP_USBTLL_SYSCONFIG_SOFTRESET); /* Wait for TLL reset to complete */ while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) & OMAP_USBTLL_SYSSTATUS_RESETDONE)) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_dbg(omap->dev, "operation timed out\n"); ret = -EINVAL; goto err_sys_status; } } dev_dbg(omap->dev, "TLL RESET DONE\n"); /* (1<<3) = no idle mode only for initial debugging */ ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, OMAP_USBTLL_SYSCONFIG_ENAWAKEUP | OMAP_USBTLL_SYSCONFIG_SIDLEMODE | OMAP_USBTLL_SYSCONFIG_CACTIVITY); /* Put UHH in NoIdle/NoStandby mode */ reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG); if (is_omap_ehci_rev1(omap)) { reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP | OMAP_UHH_SYSCONFIG_SIDLEMODE | OMAP_UHH_SYSCONFIG_CACTIVITY | OMAP_UHH_SYSCONFIG_MIDLEMODE); reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE; } else if (is_omap_ehci_rev2(omap)) { reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR; reg |= OMAP4_UHH_SYSCONFIG_NOIDLE; reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR; reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY; } ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG); /* setup ULPI bypass and burst configurations */ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; if (is_omap_ehci_rev1(omap)) { if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; /* Bypass the TLL module for PHY mode operation */ if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) { dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n"); if (is_ehci_phy_mode(omap->port_mode[0]) || is_ehci_phy_mode(omap->port_mode[1]) || is_ehci_phy_mode(omap->port_mode[2])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; else reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; } else { dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n"); if (is_ehci_phy_mode(omap->port_mode[0])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; else if (is_ehci_tll_mode(omap->port_mode[0])) reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; if (is_ehci_phy_mode(omap->port_mode[1])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; else if (is_ehci_tll_mode(omap->port_mode[1])) reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; if (is_ehci_phy_mode(omap->port_mode[2])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; else if (is_ehci_tll_mode(omap->port_mode[2])) reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; } } else if (is_omap_ehci_rev2(omap)) { /* Clear port mode fields for PHY mode*/ reg &= ~OMAP4_P1_MODE_CLEAR; reg &= ~OMAP4_P2_MODE_CLEAR; if (is_ehci_tll_mode(omap->port_mode[0])) reg |= OMAP4_P1_MODE_TLL; else if (is_ehci_hsic_mode(omap->port_mode[0])) reg |= OMAP4_P1_MODE_HSIC; if (is_ehci_tll_mode(omap->port_mode[1])) reg |= OMAP4_P2_MODE_TLL; else if (is_ehci_hsic_mode(omap->port_mode[1])) reg |= OMAP4_P2_MODE_HSIC; } ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); /* * An undocumented "feature" in the OMAP3 EHCI controller, * causes suspended ports to be taken out of suspend when * the USBCMD.Run/Stop bit is cleared (for example when * we do ehci_bus_suspend). * This breaks suspend-resume if the root-hub is allowed * to suspend. Writing 1 to this undocumented register bit * disables this feature and restores normal behavior. */ ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04, EHCI_INSNREG04_DISABLE_UNSUSPEND); if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) || (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) || (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) { if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK; if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK; if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK; /* Enable UTMI mode for required TLL channels */ omap_usb_utmi_init(omap, tll_ch_mask, OMAP_TLL_CHANNEL_COUNT); } if (omap->phy_reset) { /* Refer ISSUE1: * Hold the PHY in RESET for enough time till * PHY is settled and ready */ udelay(10); if (gpio_is_valid(omap->reset_gpio_port[0])) gpio_set_value(omap->reset_gpio_port[0], 1); if (gpio_is_valid(omap->reset_gpio_port[1])) gpio_set_value(omap->reset_gpio_port[1], 1); } /* Soft reset the PHY using PHY reset command over ULPI */ if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) omap_ehci_soft_phy_reset(omap, 0); if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) omap_ehci_soft_phy_reset(omap, 1); return 0; err_sys_status: clk_disable(omap->utmi_p2_fck); clk_put(omap->utmi_p2_fck); clk_disable(omap->xclk60mhsp2_ck); clk_put(omap->xclk60mhsp2_ck); clk_disable(omap->utmi_p1_fck); clk_put(omap->utmi_p1_fck); clk_disable(omap->xclk60mhsp1_ck); clk_put(omap->xclk60mhsp1_ck); clk_disable(omap->usbtll_ick); clk_put(omap->usbtll_ick); err_tll_ick: clk_disable(omap->usbtll_fck); clk_put(omap->usbtll_fck); err_tll_fck: clk_disable(omap->usbhost_fs_fck); clk_put(omap->usbhost_fs_fck); if (omap->phy_reset) { if (gpio_is_valid(omap->reset_gpio_port[0])) gpio_free(omap->reset_gpio_port[0]); if (gpio_is_valid(omap->reset_gpio_port[1])) gpio_free(omap->reset_gpio_port[1]); } err_host_48m_fck: clk_disable(omap->usbhost_hs_fck); clk_put(omap->usbhost_hs_fck); err_host_120m_fck: clk_disable(omap->usbhost_ick); clk_put(omap->usbhost_ick); err_host_ick: return ret; }
/** * mesh_path_start_discovery - launch a path discovery from the PREQ queue * * @sdata: local mesh subif */ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_preq_queue *preq_node; struct mesh_path *mpath; u8 ttl, target_flags = 0; const u8 *da; u32 lifetime; spin_lock_bh(&ifmsh->mesh_preq_queue_lock); if (!ifmsh->preq_queue_len || time_before(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) { spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); return; } preq_node = list_first_entry(&ifmsh->preq_queue.list, struct mesh_preq_queue, list); list_del(&preq_node->list); --ifmsh->preq_queue_len; spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); rcu_read_lock(); mpath = mesh_path_lookup(sdata, preq_node->dst); if (!mpath) goto enddiscovery; spin_lock_bh(&mpath->state_lock); if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) { spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } mpath->flags &= ~MESH_PATH_REQ_QUEUED; if (preq_node->flags & PREQ_Q_F_START) { if (mpath->flags & MESH_PATH_RESOLVING) { spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } else { mpath->flags &= ~MESH_PATH_RESOLVED; mpath->flags |= MESH_PATH_RESOLVING; mpath->discovery_retries = 0; mpath->discovery_timeout = disc_timeout_jiff(sdata); } } else if (!(mpath->flags & MESH_PATH_RESOLVING) || mpath->flags & MESH_PATH_RESOLVED) { mpath->flags &= ~MESH_PATH_RESOLVING; spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } ifmsh->last_preq = jiffies; if (time_after(jiffies, ifmsh->last_sn_update + net_traversal_jiffies(sdata)) || time_before(jiffies, ifmsh->last_sn_update)) { ++ifmsh->sn; sdata->u.mesh.last_sn_update = jiffies; } lifetime = default_lifetime(sdata); ttl = sdata->u.mesh.mshcfg.element_ttl; if (ttl == 0) { sdata->u.mesh.mshstats.dropped_frames_ttl++; spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } if (preq_node->flags & PREQ_Q_F_REFRESH) target_flags |= IEEE80211_PREQ_TO_FLAG; else target_flags &= ~IEEE80211_PREQ_TO_FLAG; spin_unlock_bh(&mpath->state_lock); da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, target_flags, mpath->dst, mpath->sn, da, 0, ttl, lifetime, 0, ifmsh->preq_id++, sdata); mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); enddiscovery: rcu_read_unlock(); kfree(preq_node); }
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const u8 *preq_elem, u32 orig_metric) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_path *mpath = NULL; const u8 *target_addr, *orig_addr; const u8 *da; u8 target_flags, ttl, flags; u32 orig_sn, target_sn, lifetime, target_metric = 0; bool reply = false; bool forward = true; bool root_is_gate; /* Update target SN, if present */ target_addr = PREQ_IE_TARGET_ADDR(preq_elem); orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); target_sn = PREQ_IE_TARGET_SN(preq_elem); orig_sn = PREQ_IE_ORIG_SN(preq_elem); target_flags = PREQ_IE_TARGET_F(preq_elem); /* Proactive PREQ gate announcements */ flags = PREQ_IE_FLAGS(preq_elem); root_is_gate = !!(flags & RANN_FLAG_IS_GATE); mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr); if (ether_addr_equal(target_addr, sdata->vif.addr)) { mhwmp_dbg(sdata, "PREQ is for us\n"); forward = false; reply = true; target_metric = 0; if (time_after(jiffies, ifmsh->last_sn_update + net_traversal_jiffies(sdata)) || time_before(jiffies, ifmsh->last_sn_update)) { ++ifmsh->sn; ifmsh->last_sn_update = jiffies; } target_sn = ifmsh->sn; } else if (is_broadcast_ether_addr(target_addr) && (target_flags & IEEE80211_PREQ_TO_FLAG)) { rcu_read_lock(); mpath = mesh_path_lookup(sdata, orig_addr); if (mpath) { if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { reply = true; target_addr = sdata->vif.addr; target_sn = ++ifmsh->sn; target_metric = 0; ifmsh->last_sn_update = jiffies; } if (root_is_gate) mesh_path_add_gate(mpath); } rcu_read_unlock(); } else { rcu_read_lock(); mpath = mesh_path_lookup(sdata, target_addr); if (mpath) { if ((!(mpath->flags & MESH_PATH_SN_VALID)) || SN_LT(mpath->sn, target_sn)) { mpath->sn = target_sn; mpath->flags |= MESH_PATH_SN_VALID; } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) && (mpath->flags & MESH_PATH_ACTIVE)) { reply = true; target_metric = mpath->metric; target_sn = mpath->sn; /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/ target_flags |= IEEE80211_PREQ_TO_FLAG; } } rcu_read_unlock(); } if (reply) { lifetime = PREQ_IE_LIFETIME(preq_elem); ttl = ifmsh->mshcfg.element_ttl; if (ttl != 0) { mhwmp_dbg(sdata, "replying to the PREQ\n"); mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, orig_sn, 0, target_addr, target_sn, mgmt->sa, 0, ttl, lifetime, target_metric, 0, sdata); } else { ifmsh->mshstats.dropped_frames_ttl++; } } if (forward && ifmsh->mshcfg.dot11MeshForwarding) { u32 preq_id; u8 hopcount; ttl = PREQ_IE_TTL(preq_elem); lifetime = PREQ_IE_LIFETIME(preq_elem); if (ttl <= 1) { ifmsh->mshstats.dropped_frames_ttl++; return; } mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr); --ttl; preq_id = PREQ_IE_PREQ_ID(preq_elem); hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; da = (mpath && mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { target_addr = PREQ_IE_TARGET_ADDR(preq_elem); target_sn = PREQ_IE_TARGET_SN(preq_elem); } mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, orig_sn, target_flags, target_addr, target_sn, da, hopcount, ttl, lifetime, orig_metric, preq_id, sdata); if (!is_multicast_ether_addr(da)) ifmsh->mshstats.fwded_unicast++; else ifmsh->mshstats.fwded_mcast++; ifmsh->mshstats.fwded_frames++; } }
static void adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, int wptr_ahead) { int nopcount; unsigned int freecmds; unsigned int *cmds; uint cmds_gpu; unsigned long wait_time; unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); unsigned long wait_time_part; unsigned int prev_reg_val[hang_detect_regs_count]; memset(prev_reg_val, 0, sizeof(prev_reg_val)); if (wptr_ahead) { nopcount = rb->sizedwords - rb->wptr - 1; cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr; GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount)); do { GSL_RB_GET_READPTR(rb, &rb->rptr); } while (!rb->rptr); rb->wptr++; adreno_ringbuffer_submit(rb); rb->wptr = 0; } wait_time = jiffies + wait_timeout; wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); while (1) { GSL_RB_GET_READPTR(rb, &rb->rptr); freecmds = rb->rptr - rb->wptr; if (freecmds == 0 || freecmds > numcmds) break; if (time_after(jiffies, wait_time_part)) { wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); if ((adreno_hang_detect(rb->device, prev_reg_val))){ KGSL_DRV_ERR(rb->device, "Hang detected while waiting for freespace in" "ringbuffer rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr); goto err; } } if (time_after(jiffies, wait_time)) { KGSL_DRV_ERR(rb->device, "Timed out while waiting for freespace in ringbuffer " "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr); goto err; } continue; err: if (!adreno_dump_and_recover(rb->device)) { wait_time = jiffies + wait_timeout; } else { BUG(); } } }
/** * radeon_fence_wait_any_seq - wait for a sequence number on any ring * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_any(), et al. * Returns 0 if the sequence number has passed, error for all other cases. */ static int radeon_fence_wait_any_seq(struct radeon_device *rdev, u64 *target_seq, bool intr) { unsigned long timeout, last_activity, tmp; unsigned i, ring = RADEON_NUM_RINGS; bool signaled, fence_queue_locked; int r; for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) { continue; } /* use the most recent one as indicator */ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { last_activity = rdev->fence_drv[i].last_activity; } /* For lockup detection just pick the lowest ring we are * actively waiting for */ if (i < ring) { ring = i; } } /* nothing to wait for ? */ if (ring == RADEON_NUM_RINGS) { return -ENOENT; } while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; if (time_after(last_activity, timeout)) { /* the normal case, timeout is somewhere before last_activity */ timeout = last_activity - timeout; } else { /* either jiffies wrapped around, or no fence was signaled in the last 500ms * anyway we will just wait for the minimum amount and then check for a lockup */ timeout = 1; } CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)", ring, target_seq[ring]); for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (target_seq[i]) { radeon_irq_kms_sw_irq_get(rdev, i); } } fence_queue_locked = false; r = 0; while (!(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))) { if (!fence_queue_locked) { mtx_lock(&rdev->fence_queue_mtx); fence_queue_locked = true; } if (intr) { r = cv_timedwait_sig(&rdev->fence_queue, &rdev->fence_queue_mtx, timeout); } else { r = cv_timedwait(&rdev->fence_queue, &rdev->fence_queue_mtx, timeout); } if (r == EINTR) r = ERESTARTSYS; if (r != 0) { if (r == EWOULDBLOCK) { signaled = radeon_fence_any_seq_signaled( rdev, target_seq); } break; } } if (fence_queue_locked) { mtx_unlock(&rdev->fence_queue_mtx); } for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (target_seq[i]) { radeon_irq_kms_sw_irq_put(rdev, i); } } if (unlikely(r == ERESTARTSYS)) { return -r; } CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)", ring, target_seq[ring]); if (unlikely(!signaled)) { #ifndef __FreeBSD__ /* we were interrupted for some reason and fence * isn't signaled yet, resume waiting */ if (r) { continue; } #endif sx_xlock(&rdev->ring_lock); for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { if (time_after(rdev->fence_drv[i].last_activity, tmp)) { tmp = rdev->fence_drv[i].last_activity; } } /* test if somebody else has already decided that this is a lockup */ if (last_activity != tmp) { last_activity = tmp; sx_xunlock(&rdev->ring_lock); continue; } if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { /* good news we believe it's a lockup */ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n", (uintmax_t)target_seq[ring]); /* change last activity so nobody else think there is a lockup */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { rdev->fence_drv[i].last_activity = jiffies; } /* mark the ring as not ready any more */ rdev->ring[ring].ready = false; sx_xunlock(&rdev->ring_lock); return -EDEADLK; } sx_xunlock(&rdev->ring_lock); } } return 0; }
/* * send command to firmware * * @wl: wl struct * @id: command id * @buf: buffer containing the command, must work with dma * @len: length of the buffer */ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, size_t res_len) { struct wl1271_cmd_header *cmd; unsigned long timeout; u32 intr; int ret = 0; u16 status; u16 poll_count = 0; cmd = buf; cmd->id = cpu_to_le16(id); cmd->status = 0; WARN_ON(len % 4 != 0); WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags)); wl1271_write(wl, wl->cmd_box_addr, buf, len, false); wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { if (time_after(jiffies, timeout)) { wl1271_error("command complete timeout"); ret = -ETIMEDOUT; goto fail; } poll_count++; if (poll_count < WL1271_CMD_FAST_POLL_COUNT) udelay(10); else msleep(1); intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); } /* read back the status code of the command */ if (res_len == 0) res_len = sizeof(struct wl1271_cmd_header); wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false); status = le16_to_cpu(cmd->status); if (status != CMD_STATUS_SUCCESS) { wl1271_error("command execute failure %d", status); ret = -EIO; goto fail; } wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE); return 0; fail: WARN_ON(1); wl12xx_queue_recovery_work(wl); return ret; }
static void omap2430_musb_set_vbus(struct musb *musb, int is_on) { u8 devctl; unsigned long timeout = jiffies + msecs_to_jiffies(1000); int ret = 1; /* HDRC controls CPEN, but beware current surges during device * connect. They can trigger transient overcurrent conditions * that must be ignored. */ pm_runtime_get_sync(musb->controller); devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (is_on) { if (musb->xceiv->state == OTG_STATE_A_IDLE) { /* start the session */ devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); /* * Wait for the musb to set as A device to enable the * VBUS */ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(musb->controller, "configured as A device timeout"); ret = -EINVAL; break; } } if (ret && musb->xceiv->set_vbus) otg_set_vbus(musb->xceiv, 1); musb->xceiv->default_a = 1; MUSB_HST_MODE(musb); } } else { musb->is_active = 0; /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and * jumping right to B_IDLE... */ musb->xceiv->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; devctl &= ~MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); MUSB_DEV_MODE(musb); } dev_dbg(musb->controller, "VBUS %s, devctl %02x " /* otg %3x conf %08x prcm %08x */ "\n", otg_state_string(musb->xceiv->state), musb_readb(musb->mregs, MUSB_DEVCTL)); pm_runtime_put_sync(musb->controller); }
static int sunxi_clk_factors_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { unsigned long reg; struct clk_factors_value factor_val; unsigned long orig_jiffies; struct sunxi_clk_factors *factor = to_clk_factor(hw); struct sunxi_clk_factors_config *config = factor->config; if(!factor->get_factors) return 0; factor->get_factors(rate, parent_rate, &factor_val); reg = factor_readl(factor,factor->reg); if(config->sdmwidth) { factor_writel(factor,config->sdmval, (void __iomem *)config->sdmpat); reg = SET_BITS(config->sdmshift, config->sdmwidth, reg, 1); } if(config->nwidth) reg = SET_BITS(config->nshift, config->nwidth, reg, factor_val.factorn); if(config->kwidth) reg = SET_BITS(config->kshift, config->kwidth, reg, factor_val.factork); if(config->mwidth) reg = SET_BITS(config->mshift, config->mwidth, reg, factor_val.factorm); if(config->pwidth) reg = SET_BITS(config->pshift, config->pwidth, reg, factor_val.factorp); if(config->d1width) reg = SET_BITS(config->d1shift, config->d1width, reg, factor_val.factord1); if(config->d2width) reg = SET_BITS(config->d2shift, config->d2width, reg, factor_val.factord2); if(config->frac) { reg = SET_BITS(config->modeshift, 1, reg, factor_val.frac_mode); reg = SET_BITS(config->outshift, 1, reg, factor_val.frac_freq); } factor_writel(factor,reg, factor->reg); #ifndef CONFIG_SUNXI_CLK_DUMMY_DEBUG orig_jiffies = jiffies; if(GET_BITS(config->enshift, 1, reg)) { while(1) { reg = factor_readl(factor,factor->lock_reg); if(GET_BITS(factor->lock_bit, 1, reg)) break; if(time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { #if (defined CONFIG_FPGA_V4_PLATFORM) || (defined CONFIG_FPGA_V7_PLATFORM) printk("clk %s wait lock timeout\n",hw->clk->name); #else WARN(1, "clk %s wait lock timeout\n",hw->clk->name); #endif break; } } } #endif return 0; }
static int arp_process(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct in_device *in_dev = __in_dev_get_rcu(dev); struct arphdr *arp; unsigned char *arp_ptr; struct rtable *rt; unsigned char *sha; __be32 sip, tip; u16 dev_type = dev->type; int addr_type; struct neighbour *n; struct net *net = dev_net(dev); /* arp_rcv below verifies the ARP header and verifies the device * is ARP'able. */ if (in_dev == NULL) goto out; arp = arp_hdr(skb); switch (dev_type) { default: if (arp->ar_pro != htons(ETH_P_IP) || htons(dev_type) != arp->ar_hrd) goto out; break; case ARPHRD_ETHER: case ARPHRD_IEEE802_TR: case ARPHRD_FDDI: case ARPHRD_IEEE802: /* * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). * This is the case also of FDDI, where the RFC 1390 says that * FDDI devices should accept ARP hardware of (1) Ethernet, * however, to be more robust, we'll accept both 1 (Ethernet) * or 6 (IEEE 802.2) */ if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || arp->ar_pro != htons(ETH_P_IP)) goto out; break; case ARPHRD_AX25: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_AX25)) goto out; break; case ARPHRD_NETROM: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_NETROM)) goto out; break; } /* Understand only these message types */ if (arp->ar_op != htons(ARPOP_REPLY) && arp->ar_op != htons(ARPOP_REQUEST)) goto out; /* * Extract fields */ arp_ptr = (unsigned char *)(arp + 1); sha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; arp_ptr += dev->addr_len; memcpy(&tip, arp_ptr, 4); /* * Check for bad requests for 127.x.x.x and requests for multicast * addresses. If this is one such, delete it. */ if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) goto out; /* * Special case: We must set Frame Relay source Q.922 address */ if (dev_type == ARPHRD_DLCI) sha = dev->broadcast; /* * Process entry. The idea here is we want to send a reply if it is a * request for us or if it is a request for someone else that we hold * a proxy for. We want to add an entry to our cache if it is a reply * to us or if it is a request for our address. * (The assumption for this last is that if someone is requesting our * address, they are probably intending to talk to us, so it saves time * if we cache their address. Their address is also probably not in * our cache, since ours is not in their cache.) * * Putting this another way, we only care about replies if they are to * us, in which case we add them to the cache. For requests, we care * about those for us and those for our proxies. We reply to both, * and in the case of requests for us we add the requester to the arp * cache. */ /* Special case: IPv4 duplicate address detection packet (RFC2131) */ if (sip == 0) { if (arp->ar_op == htons(ARPOP_REQUEST) && inet_addr_type(net, tip) == RTN_LOCAL && !arp_ignore(in_dev, sip, tip)) arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha); goto out; } if (arp->ar_op == htons(ARPOP_REQUEST) && ip_route_input_noref(skb, tip, sip, 0, dev) == 0) { rt = skb_rtable(skb); addr_type = rt->rt_type; if (addr_type == RTN_LOCAL) { int dont_send; dont_send = arp_ignore(in_dev, sip, tip); if (!dont_send && IN_DEV_ARPFILTER(in_dev)) dont_send = arp_filter(sip, tip, dev); if (!dont_send) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) { arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha); neigh_release(n); } } goto out; } else if (IN_DEV_FORWARD(in_dev)) { if (addr_type == RTN_UNICAST && (arp_fwd_proxy(in_dev, dev, rt) || arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || (rt->dst.dev != dev && pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) neigh_release(n); if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || skb->pkt_type == PACKET_HOST || in_dev->arp_parms->proxy_delay == 0) { arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha); } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); return 0; } goto out; } } } /* Update our ARP tables */ n = __neigh_lookup(&arp_tbl, &sip, dev, 0); if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) { /* Unsolicited ARP is not accepted by default. It is possible, that this option should be enabled for some devices (strip is candidate) */ if (n == NULL && (arp->ar_op == htons(ARPOP_REPLY) || (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) && inet_addr_type(net, sip) == RTN_UNICAST) n = __neigh_lookup(&arp_tbl, &sip, dev, 1); } if (n) { int state = NUD_REACHABLE; int override; /* If several different ARP replies follows back-to-back, use the FIRST one. It is possible, if several proxy agents are active. Taking the first reply prevents arp trashing and chooses the fastest router. */ override = time_after(jiffies, n->updated + n->parms->locktime); /* Broadcast replies and request packets do not assert neighbour reachability. */ if (arp->ar_op != htons(ARPOP_REPLY) || skb->pkt_type != PACKET_HOST) state = NUD_STALE; neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0); neigh_release(n); }
DSP_STATUS CHNLSM_InterruptDSP2(struct WMD_DEV_CONTEXT *pDevContext, u16 wMbVal) { #ifdef CONFIG_BRIDGE_DVFS struct dspbridge_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; u32 opplevel = 0; #endif struct CFG_HOSTRES resources; DSP_STATUS status = DSP_SOK; unsigned long timeout; u32 temp; status = CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); if (DSP_FAILED(status)) return DSP_EFAIL; #ifdef CONFIG_BRIDGE_DVFS if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION || pDevContext->dwBrdState == BRD_HIBERNATION) { if (pdata->dsp_get_opp) opplevel = (*pdata->dsp_get_opp)(); if (opplevel == 1) { if (pdata->dsp_set_min_opp) (*pdata->dsp_set_min_opp)(opplevel+1); } } #endif if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION || pDevContext->dwBrdState == BRD_HIBERNATION) { /* Restart the IVA clock that was disabled while * the DSP initiated Hibernation. */ status = CLK_Enable(SERVICESCLK_iva2_ck); if (DSP_FAILED(status)) return status; /* Restore mailbox settings */ /* Restart the peripheral clocks that were disabled only * in DSP initiated Hibernation case.*/ if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION) { DSP_PeripheralClocks_Enable(pDevContext, NULL); /* Enabling Dpll in lock mode*/ temp = (u32) *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x34)); temp = (temp & 0xFFFFFFFE) | 0x1; *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x34)) = (u32) temp; temp = (u32) *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x4)); temp = (temp & 0xFFFFFC8) | 0x37; *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x4)) = (u32) temp; } HW_MBOX_restoreSettings(resources.dwMboxBase); /* Access MMU SYS CONFIG register to generate a short wakeup */ temp = (u32) *((REG_UWORD32 *) ((u32) (resources.dwDmmuBase) + 0x10)); pDevContext->dwBrdState = BRD_RUNNING; } timeout = jiffies + msecs_to_jiffies(1); while (fifo_full((void __iomem *) resources.dwMboxBase, 0)) { if (time_after(jiffies, timeout)) { printk(KERN_ERR "dspbridge: timed out waiting for mailbox\n"); return WMD_E_TIMEOUT; } } DBG_Trace(DBG_LEVEL3, "writing %x to Mailbox\n", wMbVal); HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP, wMbVal); return DSP_SOK; }
int aac_sa_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int instance; const char *name; instance = dev->id; name = dev->name; if (aac_sa_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* * Check to see if the board failed any self tests. */ if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) { printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance); goto error_iounmap; } /* * Check to see if the board panic'd while booting. */ if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) { printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes. */ while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) { if (time_after(jiffies, start+startup_timeout*HZ)) { status = sa_readl(dev, Mailbox7); printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n", name, instance, status); goto error_iounmap; } msleep(1); } /* * Fill in the function dispatch table. */ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt; dev->a_ops.adapter_notify = aac_sa_notify_adapter; dev->a_ops.adapter_sync_cmd = sa_sync_cmd; dev->a_ops.adapter_check_health = aac_sa_check_health; dev->a_ops.adapter_intr = aac_sa_intr; dev->a_ops.adapter_ioremap = aac_sa_ioremap; /* * First clear out all interrupts. Then enable the one's that * we can handle. */ aac_adapter_disable_int(dev); aac_adapter_enable_int(dev); if(aac_init_adapter(dev) == NULL) goto error_irq; if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) { printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } aac_adapter_enable_int(dev); /* * Tell the adapter that all is configure, and it can start * accepting requests */ aac_sa_start_adapter(dev); return 0; error_irq: aac_sa_disable_interrupt(dev); free_irq(dev->scsi_host_ptr->irq, (void *)dev); error_iounmap: return -1; }
static struct lm87_data *lm87_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm87_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { int i, j; dev_dbg(&client->dev, "Updating data.\n"); i = (data->channel & CHAN_TEMP3) ? 1 : 0; j = (data->channel & CHAN_TEMP3) ? 5 : 6; for (; i < j; i++) { data->in[i] = lm87_read_value(client, LM87_REG_IN(i)); data->in_min[i] = lm87_read_value(client, LM87_REG_IN_MIN(i)); data->in_max[i] = lm87_read_value(client, LM87_REG_IN_MAX(i)); } for (i = 0; i < 2; i++) { if (data->channel & CHAN_NO_FAN(i)) { data->in[6+i] = lm87_read_value(client, LM87_REG_AIN(i)); data->in_max[6+i] = lm87_read_value(client, LM87_REG_AIN_MAX(i)); data->in_min[6+i] = lm87_read_value(client, LM87_REG_AIN_MIN(i)); } else { data->fan[i] = lm87_read_value(client, LM87_REG_FAN(i)); data->fan_min[i] = lm87_read_value(client, LM87_REG_FAN_MIN(i)); } } j = (data->channel & CHAN_TEMP3) ? 3 : 2; for (i = 0 ; i < j; i++) { data->temp[i] = lm87_read_value(client, LM87_REG_TEMP[i]); data->temp_high[i] = lm87_read_value(client, LM87_REG_TEMP_HIGH[i]); data->temp_low[i] = lm87_read_value(client, LM87_REG_TEMP_LOW[i]); } i = lm87_read_value(client, LM87_REG_TEMP_HW_INT_LOCK); j = lm87_read_value(client, LM87_REG_TEMP_HW_INT); data->temp_crit_int = min(i, j); i = lm87_read_value(client, LM87_REG_TEMP_HW_EXT_LOCK); j = lm87_read_value(client, LM87_REG_TEMP_HW_EXT); data->temp_crit_ext = min(i, j); i = lm87_read_value(client, LM87_REG_VID_FAN_DIV); data->fan_div[0] = (i >> 4) & 0x03; data->fan_div[1] = (i >> 6) & 0x03; data->vid = (i & 0x0F) | (lm87_read_value(client, LM87_REG_VID4) & 0x01) << 4; data->alarms = lm87_read_value(client, LM87_REG_ALARMS1) | (lm87_read_value(client, LM87_REG_ALARMS2) << 8); data->aout = lm87_read_value(client, LM87_REG_AOUT); data->last_updated = jiffies; data->valid = 1; }
unsigned long probe_irq_on(void) { unsigned int i; irq_desc_t *desc; unsigned long val; unsigned long delay; down(&probe_sem); /* * something may have generated an irq long ago and we want to * flush such a longstanding irq before considering it as spurious. */ for (i = NR_IRQS-1; i > 0; i--) { desc = irq_desc + i; spin_lock_irq(&desc->lock); if (!irq_desc[i].action) irq_desc[i].handler->startup(i); spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) /* about 20ms delay */ synchronize_irq(); /* * enable any unassigned irqs * (we must startup again here because if a longstanding irq * happened in the previous stage, it may have masked itself) */ for (i = NR_IRQS-1; i > 0; i--) { desc = irq_desc + i; spin_lock_irq(&desc->lock); if (!desc->action) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; if (desc->handler->startup(i)) desc->status |= IRQ_PENDING; } spin_unlock_irq(&desc->lock); } /* * Wait for spurious interrupts to trigger */ for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) /* about 100ms delay */ synchronize_irq(); /* * Now filter out any obviously spurious interrupts */ val = 0; for (i = 0; i < NR_IRQS; i++) { irq_desc_t *desc = irq_desc + i; unsigned int status; spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(status & IRQ_WAITING)) { desc->status = status & ~IRQ_AUTODETECT; desc->handler->shutdown(i); } else if (i < 32) val |= 1 << i; } spin_unlock_irq(&desc->lock); } return val; }
static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) { unsigned long timeout = jiffies + msecs_to_jiffies(100); dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n"); /* Reset OMAP modules for insmod/rmmod to work */ ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, is_omap_ehci_rev2(omap) ? OMAP4_UHH_SYSCONFIG_SOFTRESET : OMAP_UHH_SYSCONFIG_SOFTRESET); while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) & (1 << 0))) { cpu_relax(); if (time_after(jiffies, timeout)) dev_dbg(omap->dev, "operation timed out\n"); } while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) & (1 << 1))) { cpu_relax(); if (time_after(jiffies, timeout)) dev_dbg(omap->dev, "operation timed out\n"); } while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) & (1 << 2))) { cpu_relax(); if (time_after(jiffies, timeout)) dev_dbg(omap->dev, "operation timed out\n"); } ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1)); while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) & (1 << 0))) { cpu_relax(); if (time_after(jiffies, timeout)) dev_dbg(omap->dev, "operation timed out\n"); } if (omap->usbtll_fck != NULL) { clk_disable(omap->usbtll_fck); clk_put(omap->usbtll_fck); omap->usbtll_fck = NULL; } if (omap->usbhost_ick != NULL) { clk_disable(omap->usbhost_ick); clk_put(omap->usbhost_ick); omap->usbhost_ick = NULL; } if (omap->usbhost_fs_fck != NULL) { clk_disable(omap->usbhost_fs_fck); clk_put(omap->usbhost_fs_fck); omap->usbhost_fs_fck = NULL; } if (omap->usbhost_hs_fck != NULL) { clk_disable(omap->usbhost_hs_fck); clk_put(omap->usbhost_hs_fck); omap->usbhost_hs_fck = NULL; } if (omap->usbtll_ick != NULL) { clk_disable(omap->usbtll_ick); clk_put(omap->usbtll_ick); omap->usbtll_ick = NULL; } if (is_omap_ehci_rev2(omap)) { if (omap->xclk60mhsp1_ck != NULL) { clk_disable(omap->xclk60mhsp1_ck); clk_put(omap->xclk60mhsp1_ck); omap->xclk60mhsp1_ck = NULL; } if (omap->utmi_p1_fck != NULL) { clk_disable(omap->utmi_p1_fck); clk_put(omap->utmi_p1_fck); omap->utmi_p1_fck = NULL; } if (omap->xclk60mhsp2_ck != NULL) { clk_disable(omap->xclk60mhsp2_ck); clk_put(omap->xclk60mhsp2_ck); omap->xclk60mhsp2_ck = NULL; } if (omap->utmi_p2_fck != NULL) { clk_disable(omap->utmi_p2_fck); clk_put(omap->utmi_p2_fck); omap->utmi_p2_fck = NULL; } } if (omap->phy_reset) { if (gpio_is_valid(omap->reset_gpio_port[0])) gpio_free(omap->reset_gpio_port[0]); if (gpio_is_valid(omap->reset_gpio_port[1])) gpio_free(omap->reset_gpio_port[1]); } dev_dbg(omap->dev, "Clock to USB host has been disabled\n"); }
static int ether1_init_for_open (struct net_device *dev) { int i, status, addr, next, next2; int failures = 0; unsigned long timeout; writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); for (i = 0; i < 6; i++) init_sa.sa_addr[i] = dev->dev_addr[i]; /* load data structures into ether1 RAM */ ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE); ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE); ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE); ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE); ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE); ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE); ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE); ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE); if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) { printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n", dev->name); return 1; } /* * setup circularly linked list of { rfd, rbd, buffer }, with * all rfds circularly linked, rbds circularly linked. * First rfd is linked to scp, first rbd is linked to first * rfd. Last rbd has a suspend command. */ addr = RX_AREA_START; do { next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; if (next2 >= RX_AREA_END) { next = RX_AREA_START; init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND; priv(dev)->rx_tail = addr; } else init_rfd.rfd_command = 0; if (addr == RX_AREA_START) init_rfd.rfd_rbdoffset = addr + RFD_SIZE; else init_rfd.rfd_rbdoffset = 0; init_rfd.rfd_link = next; init_rbd.rbd_link = next + RFD_SIZE; init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE; ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE); ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE); addr = next; } while (next2 < RX_AREA_END); priv(dev)->tx_link = NOP_ADDR; priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; priv(dev)->tx_tail = TDR_ADDR; priv(dev)->rx_head = RX_AREA_START; /* release reset & give 586 a prod */ priv(dev)->resetting = 1; priv(dev)->initialising = 1; writeb(CTRL_RST, REG_CONTROL); writeb(0, REG_CONTROL); writeb(CTRL_CA, REG_CONTROL); /* 586 should now unset iscp.busy */ timeout = jiffies + HZ/2; while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) { if (time_after(jiffies, timeout)) { printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name); return 1; } } /* check status of commands that we issued */ timeout += HZ/10; while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ/10; while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ/10; while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ; while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); } else { status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS); if (status & TDR_XCVRPROB) printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name); else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) { #ifdef FANCY printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name, status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10, (status & TDR_TIME) % 10); #else printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name, status & TDR_SHORT ? "short" : "open", (status & TDR_TIME)); #endif } } if (failures) ether1_reset (dev); return failures ? 1 : 0; }
static void serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_omap_port *up = (struct uart_omap_port *)port; unsigned char cval = 0; unsigned char efr = 0; unsigned long flags = 0; unsigned int baud, quot; unsigned long old_jiffies; switch (termios->c_cflag & CSIZE) { case CS5: cval = UART_LCR_WLEN5; break; case CS6: cval = UART_LCR_WLEN6; break; case CS7: cval = UART_LCR_WLEN7; break; default: case CS8: cval = UART_LCR_WLEN8; break; } if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; if (termios->c_cflag & PARENB) cval |= UART_LCR_PARITY; if (!(termios->c_cflag & PARODD)) cval |= UART_LCR_EPAR; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13); quot = serial_omap_get_divisor(port, baud); up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 | UART_FCR_ENABLE_FIFO; if (up->use_dma) up->fcr |= UART_FCR_DMA_SELECT; old_jiffies = jiffies; /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= UART_LSR_BI; /* * Characters to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= UART_LSR_BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_OE; } /* * ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= UART_LSR_DR; /* * Modem status interrupts */ up->ier &= ~UART_IER_MSI; if (UART_ENABLE_MS(&up->port, termios->c_cflag)) up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, cval); /* reset DLAB */ /* FIFOs and DMA Settings */ /* FCR can be changed only when the * baud clock is not running * DLL_REG and DLH_REG set to 0. */ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_DISABLE); serial_out(up, UART_LCR, UART_LCR_DLAB); serial_out(up, UART_DLL, 0); serial_out(up, UART_DLM, 0); serial_out(up, UART_LCR, 0); serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_LCR, 0); up->mcr = serial_in(up, UART_MCR); serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); /* FIFO ENABLE, DMA MODE */ serial_out(up, UART_FCR, up->fcr); serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB); if (up->use_dma) { if (up->uart_dma.tx_threshold) { serial_out(up, UART_MDR3, SET_DMA_TX_THRESHOLD); serial_out(up, UART_TX_DMA_THRESHOLD, TX_FIFO_THR_LVL); } serial_out(up, UART_TI752_TLR, 0); serial_out(up, UART_OMAP_SCR, (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8)); } serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, UART_LCR_DLAB); serial_out(up, UART_MCR, up->mcr); /* Protocol, Baud Rate, and Interrupt Settings */ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_LCR, 0); serial_out(up, UART_IER, 0); serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB); serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ serial_out(up, UART_LCR, 0); serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB); serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, cval); if (baud > 230400 && baud != 3000000) serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE13X); else serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE16X); /* Hardware Flow Control Configuration */ if (termios->c_cflag & CRTSCTS) { efr |= ((up->ctsrts & UART_EFR_CTS) | (up->restore_autorts ? 0 : UART_EFR_RTS)); serial_out(up, UART_LCR, UART_LCR_DLAB); up->mcr = serial_in(up, UART_MCR); serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG); serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */ serial_out(up, UART_LCR, UART_LCR_DLAB); serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS); serial_out(up, UART_LCR, cval); } serial_omap_set_mctrl(&up->port, up->port.mctrl); /* Software Flow Control Configuration */ if (termios->c_iflag & (IXON | IXOFF)) serial_omap_configure_xonxoff(up, termios); if (time_after(jiffies, old_jiffies + (HZ * 5))) printk(KERN_ERR "omap-serial: jiffies = %ld, old_jiffies = %ld\n", jiffies, old_jiffies); spin_unlock_irqrestore(&up->port.lock, flags); dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); }
static void sharpsl_battery_thread(void *private_) { int voltage, percent, apm_status, i = 0; if (!sharpsl_pm.machinfo) return; sharpsl_pm.battstat.ac_status = (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN) ? APM_AC_ONLINE : APM_AC_OFFLINE); /* Corgi cannot confirm when battery fully charged so periodically kick! */ if (machine_is_corgi() && (sharpsl_pm.charge_mode == CHRG_ON) && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) schedule_work(&toggle_charger); while(1) { voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); if (voltage > 0) break; if (i++ > 5) { voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage; dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n"); break; } } voltage = sharpsl_average_value(voltage); apm_status = get_apm_status(voltage); percent = get_percentage(voltage); /* At low battery voltages, the voltage has a tendency to start creeping back up so we try to avoid this here */ if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE) || (apm_status == APM_BATTERY_STATUS_HIGH) || percent <= sharpsl_pm.battstat.mainbat_percent) { sharpsl_pm.battstat.mainbat_voltage = voltage; sharpsl_pm.battstat.mainbat_status = apm_status; sharpsl_pm.battstat.mainbat_percent = percent; } dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %d\n", voltage, sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies); /* If battery is low. limit backlight intensity to save power. */ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE) && ((sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_LOW) || (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL))) { if (!(sharpsl_pm.flags & SHARPSL_BL_LIMIT)) { corgibl_limit_intensity(1); sharpsl_pm.flags |= SHARPSL_BL_LIMIT; } } else if (sharpsl_pm.flags & SHARPSL_BL_LIMIT) { corgibl_limit_intensity(0); sharpsl_pm.flags &= ~SHARPSL_BL_LIMIT; } /* Suspend if critical battery level */ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE) && (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL) && !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) { sharpsl_pm.flags |= SHARPSL_APM_QUEUED; dev_err(sharpsl_pm.dev, "Fatal Off\n"); apm_queue_event(APM_CRITICAL_SUSPEND); } schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME); }
static void rate_control_pid_tx_status(void *priv, struct net_device *dev, struct sk_buff *skb, struct ieee80211_tx_status *status) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_sub_if_data *sdata; struct rc_pid_info *pinfo = priv; struct sta_info *sta; struct rc_pid_sta_info *spinfo; unsigned long period; sta = sta_info_get(local, hdr->addr1); if (!sta) return; /* Don't update the state if we're not controlling the rate. */ sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { sta->txrate = sdata->bss->max_ratectrl_rateidx; return; } /* Ignore all frames that were sent with a different rate than the rate * we currently advise mac80211 to use. */ if (status->control.rate != &local->oper_hw_mode->rates[sta->txrate]) goto ignore; spinfo = sta->rate_ctrl_priv; spinfo->tx_num_xmit++; #ifdef CONFIG_MAC80211_DEBUGFS rate_control_pid_event_tx_status(&spinfo->events, status); #endif /* We count frames that totally failed to be transmitted as two bad * frames, those that made it out but had some retries as one good and * one bad frame. */ if (status->excessive_retries) { spinfo->tx_num_failed += 2; spinfo->tx_num_xmit++; } else if (status->retry_count) { spinfo->tx_num_failed++; spinfo->tx_num_xmit++; } if (status->excessive_retries) { sta->tx_retry_failed++; sta->tx_num_consecutive_failures++; sta->tx_num_mpdu_fail++; } else { sta->last_ack_rssi[0] = sta->last_ack_rssi[1]; sta->last_ack_rssi[1] = sta->last_ack_rssi[2]; sta->last_ack_rssi[2] = status->ack_signal; sta->tx_num_consecutive_failures = 0; sta->tx_num_mpdu_ok++; } sta->tx_retry_count += status->retry_count; sta->tx_num_mpdu_fail += status->retry_count; /* Update PID controller state. */ period = (HZ * pinfo->sampling_period + 500) / 1000; if (!period) period = 1; if (time_after(jiffies, spinfo->last_sample + period)) rate_control_pid_sample(pinfo, local, sta); ignore: sta_info_put(sta); }
static struct dps_800ab_16_d_data *dps_800ab_16_d_update_device( \ struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct dps_800ab_16_d_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); /* Select SWPLD PSU offset */ i2c_cpld_write(6, SWPLD_REG, SWPLD_PSU_MUX_REG, psu_member_data); if (time_after(jiffies, data->last_updated)) { int i, status; u8 command; struct reg_data_byte regs_byte[] = { {0x20, &data->vout_mode}, {0x81, &data->fan_fault} }; struct reg_data_word regs_word[] = { {0x88, &data->v_in}, {0x8b, &data->v_out}, {0x89, &data->i_in}, {0x8c, &data->i_out}, {0x97, &data->p_in}, {0x96, &data->p_out}, {0x8d, &(data->temp_input[0])}, {0x8e, &(data->temp_input[1])}, {0x3b, &(data->fan_duty_cycle[0])}, {0x90, &(data->fan_speed[0])}, }; dev_dbg(&client->dev, "start data update\n"); /* one milliseconds from now */ data->last_updated = jiffies + HZ / 1000; data->v_in = 0; data->v_out = 0; data->i_in = 0; data->i_out = 0; data->p_in = 0; data->p_out = 0; data->temp_input[0] = 0; data->temp_input[1] = 0; data->fan_duty_cycle[0] = 0; data->fan_speed[0] = 0; data->mfr_model[0] = '\0'; data->mfr_serial[0] = '\0'; command = 0x9a; /* PSU mfr_model */ status = dps_800ab_16_d_read_block(client, command, data->mfr_model, ARRAY_SIZE(data->mfr_model) - 1); data->mfr_model[ARRAY_SIZE(data->mfr_model) - 1] = '\0'; if (status < 0) { dev_dbg(&client->dev, "reg %d, err %d\n", command, status); } command = 0x9e; /* PSU mfr_serial */ status = dps_800ab_16_d_read_block(client, command, data->mfr_serial, ARRAY_SIZE(data->mfr_serial) - 1); data->mfr_serial[ARRAY_SIZE(data->mfr_serial) - 1] = '\0'; if (status < 0) { dev_dbg(&client->dev, "reg %d, err %d\n", command, status); } for (i = 0; i < ARRAY_SIZE(regs_byte); i++) { status = dps_800ab_16_d_read_byte(client, regs_byte[i].reg); if (status < 0) { dev_dbg(&client->dev, "reg %d, err %d\n", regs_byte[i].reg, status); } else { *(regs_byte[i].value) = status; } } for (i = 0; i < ARRAY_SIZE(regs_word); i++) { status = dps_800ab_16_d_read_word(client, regs_word[i].reg); if (status < 0) { dev_dbg(&client->dev, "reg %d, err %d\n", regs_word[i].reg, status); } else { *(regs_word[i].value) = status; } } data->valid = 1; } mutex_unlock(&data->update_lock); return data; }
/* * si470x_set_seek - set seek */ static int si470x_set_seek(struct si470x_device *radio, unsigned int wrap_around, unsigned int seek_upward) { int retval = 0; unsigned long timeout; bool timed_out = 0; /* start seeking */ radio->registers[POWERCFG] |= POWERCFG_SEEK; if (wrap_around == 1) radio->registers[POWERCFG] &= ~POWERCFG_SKMODE; else radio->registers[POWERCFG] |= POWERCFG_SKMODE; if (seek_upward == 1) radio->registers[POWERCFG] |= POWERCFG_SEEKUP; else radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP; retval = si470x_set_register(radio, POWERCFG); if (retval < 0) goto done; /* currently I2C driver only uses interrupt way to seek */ if (radio->stci_enabled) { INIT_COMPLETION(radio->completion); /* wait till seek operation has completed */ retval = wait_for_completion_timeout(&radio->completion, msecs_to_jiffies(seek_timeout)); if (!retval) timed_out = true; } else { /* wait till seek operation has completed */ timeout = jiffies + msecs_to_jiffies(seek_timeout); do { retval = si470x_get_register(radio, STATUSRSSI); if (retval < 0) goto stop; timed_out = time_after(jiffies, timeout); } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) && (!timed_out)); } if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) dev_warn(&radio->videodev->dev, "seek does not complete\n"); if (radio->registers[STATUSRSSI] & STATUSRSSI_SF) dev_warn(&radio->videodev->dev, "seek failed / band limit reached\n"); if (timed_out) dev_warn(&radio->videodev->dev, "seek timed out after %u ms\n", seek_timeout); stop: /* stop seeking */ radio->registers[POWERCFG] &= ~POWERCFG_SEEK; retval = si470x_set_register(radio, POWERCFG); done: /* try again, if timed out */ if ((retval == 0) && timed_out) retval = -EAGAIN; return retval; }
static void minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, void *priv_sta, struct ieee80211_tx_status *st) { struct ieee80211_tx_info *info = st->info; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_tx_rate *ar = info->status.rates; struct minstrel_rate_stats *rate, *rate2; struct minstrel_priv *mp = priv; bool last, update = false; int i; if (!msp->is_ht) return mac80211_minstrel.tx_status_ext(priv, sband, &msp->legacy, st); /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) { info->status.ampdu_ack_len = (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0); info->status.ampdu_len = 1; } mi->ampdu_packets++; mi->ampdu_len += info->status.ampdu_len; if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { int avg_ampdu_len = minstrel_ht_avg_ampdu_len(mi); mi->sample_wait = 16 + 2 * avg_ampdu_len; mi->sample_tries = 1; mi->sample_count--; } if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) mi->sample_packets += info->status.ampdu_len; last = !minstrel_ht_txstat_valid(mp, &ar[0]); for (i = 0; !last; i++) { last = (i == IEEE80211_TX_MAX_RATES - 1) || !minstrel_ht_txstat_valid(mp, &ar[i + 1]); rate = minstrel_ht_get_stats(mp, mi, &ar[i]); if (last) rate->success += info->status.ampdu_ack_len; rate->attempts += ar[i].count * info->status.ampdu_len; } /* * check for sudden death of spatial multiplexing, * downgrade to a lower number of streams if necessary. */ rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); if (rate->attempts > 30 && MINSTREL_FRAC(rate->success, rate->attempts) < MINSTREL_FRAC(20, 100)) { minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); update = true; } rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); if (rate2->attempts > 30 && MINSTREL_FRAC(rate2->success, rate2->attempts) < MINSTREL_FRAC(20, 100)) { minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); update = true; } if (time_after(jiffies, mi->last_stats_update + (mp->update_interval / 2 * HZ) / 1000)) { update = true; minstrel_ht_update_stats(mp, mi); } if (update) minstrel_ht_update_rates(mp, mi); }
static int init586(struct net_device *dev) { void *ptr; unsigned long s; int i, result = 0; struct priv *p = netdev_priv(dev); volatile struct configure_cmd_struct *cfg_cmd; volatile struct iasetup_cmd_struct *ias_cmd; volatile struct tdr_cmd_struct *tdr_cmd; volatile struct mcsetup_cmd_struct *mc_cmd; struct dev_mc_list *dmi = dev->mc_list; int num_addrs = dev->mc_count; ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */ cfg_cmd->cmd_status = 0; cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; cfg_cmd->cmd_link = 0xffff; cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ cfg_cmd->priority = 0x00; cfg_cmd->ifs = 0x60; cfg_cmd->time_low = 0x00; cfg_cmd->time_high = 0xf2; cfg_cmd->promisc = 0; if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) cfg_cmd->promisc = 1; cfg_cmd->carr_coll = 0x00; p->scb->cbl_offset = make16(cfg_cmd); p->scb->cmd = CUC_START; /* cmd.-unit start */ elmc_id_attn586(); s = jiffies; /* warning: only active with interrupts on !! */ while (!(cfg_cmd->cmd_status & STAT_COMPL)) { if (time_after(jiffies, s + 30*HZ/100)) break; } if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) { printk(KERN_WARNING "%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status); return 1; } /* * individual address setup */ ias_cmd = (struct iasetup_cmd_struct *) ptr; ias_cmd->cmd_status = 0; ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; ias_cmd->cmd_link = 0xffff; memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN); p->scb->cbl_offset = make16(ias_cmd); p->scb->cmd = CUC_START; /* cmd.-unit start */ elmc_id_attn586(); s = jiffies; while (!(ias_cmd->cmd_status & STAT_COMPL)) { if (time_after(jiffies, s + 30*HZ/100)) break; } if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) { printk(KERN_WARNING "%s (elmc): individual address setup command failed: %04x\n", dev->name, ias_cmd->cmd_status); return 1; } /* * TDR, wire check .. e.g. no resistor e.t.c */ tdr_cmd = (struct tdr_cmd_struct *) ptr; tdr_cmd->cmd_status = 0; tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; tdr_cmd->cmd_link = 0xffff; tdr_cmd->status = 0; p->scb->cbl_offset = make16(tdr_cmd); p->scb->cmd = CUC_START; /* cmd.-unit start */ elmc_attn586(); s = jiffies; while (!(tdr_cmd->cmd_status & STAT_COMPL)) { if (time_after(jiffies, s + 30*HZ/100)) { printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__); result = 1; break; } } if (!result) { DELAY(2); /* wait for result */ result = tdr_cmd->status; p->scb->cmd = p->scb->status & STAT_MASK; elmc_id_attn586(); /* ack the interrupts */ if (result & TDR_LNK_OK) { /* empty */ } else if (result & TDR_XCVR_PRB) { printk(KERN_WARNING "%s: TDR: Transceiver problem!\n", dev->name); } else if (result & TDR_ET_OPN) { printk(KERN_WARNING "%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK); } else if (result & TDR_ET_SRT) { if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ printk(KERN_WARNING "%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK); } else { printk(KERN_WARNING "%s: TDR: Unknown status %04x\n", dev->name, result); } } /* * ack interrupts */ p->scb->cmd = p->scb->status & STAT_MASK; elmc_id_attn586(); /* * alloc nop/xmit-cmds */ #if (NUM_XMIT_BUFFS == 1) for (i = 0; i < 2; i++) { p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; p->nop_cmds[i]->cmd_cmd = CMD_NOP; p->nop_cmds[i]->cmd_status = 0; p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); ptr = (char *) ptr + sizeof(struct nop_cmd_struct); } p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); #else for (i = 0; i < NUM_XMIT_BUFFS; i++) { p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; p->nop_cmds[i]->cmd_cmd = CMD_NOP; p->nop_cmds[i]->cmd_status = 0; p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); ptr = (char *) ptr + sizeof(struct nop_cmd_struct); p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); } #endif ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */ /* * Multicast setup */ if (dev->mc_count) { /* I don't understand this: do we really need memory after the init? */ int len = ((char *) p->iscp - (char *) ptr - 8) / 6; if (len <= 0) { printk(KERN_ERR "%s: Ooooops, no memory for MC-Setup!\n", dev->name); } else { if (len < num_addrs) { num_addrs = len; printk(KERN_WARNING "%s: Sorry, can only apply %d MC-Address(es).\n", dev->name, num_addrs); } mc_cmd = (struct mcsetup_cmd_struct *) ptr; mc_cmd->cmd_status = 0; mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; mc_cmd->cmd_link = 0xffff; mc_cmd->mc_cnt = num_addrs * 6; for (i = 0; i < num_addrs; i++) { memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr, 6); dmi = dmi->next; } p->scb->cbl_offset = make16(mc_cmd); p->scb->cmd = CUC_START; elmc_id_attn586(); s = jiffies; while (!(mc_cmd->cmd_status & STAT_COMPL)) { if (time_after(jiffies, s + 30*HZ/100)) break; } if (!(mc_cmd->cmd_status & STAT_COMPL)) { printk(KERN_WARNING "%s: Can't apply multicast-address-list.\n", dev->name); } } } /* * alloc xmit-buffs / init xmit_cmds */ for (i = 0; i < NUM_XMIT_BUFFS; i++) { p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */ ptr = (char *) ptr + XMIT_BUFF_SIZE; p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */ ptr = (char *) ptr + sizeof(struct tbd_struct); if ((void *) ptr > (void *) p->iscp) { printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", dev->name); return 1; } memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct)); memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct)); p->xmit_cmds[i]->cmd_status = STAT_COMPL; p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); p->xmit_buffs[i]->next = 0xffff; p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); } p->xmit_count = 0; p->xmit_last = 0; #ifndef NO_NOPCOMMANDS p->nop_point = 0; #endif /* * 'start transmitter' (nop-loop) */ #ifndef NO_NOPCOMMANDS p->scb->cbl_offset = make16(p->nop_cmds[0]); p->scb->cmd = CUC_START; elmc_id_attn586(); WAIT_4_SCB_CMD(); #else p->xmit_cmds[0]->cmd_link = 0xffff; p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT; #endif return 0; }
/** * radeon_fence_wait_seq - wait for a specific sequence number * * @rdev: radeon device pointer * @target_seq: sequence number we want to wait for * @ring: ring index the fence is associated with * @intr: use interruptable sleep * @lock_ring: whether the ring should be locked or not * * Wait for the requested sequence number to be written (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait(), et al. * Returns 0 if the sequence number has passed, error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected and the ring is * marked as not ready so no further jobs get scheduled until a successful * reset. */ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, unsigned ring, bool intr, bool lock_ring) { unsigned long timeout, last_activity; uint64_t seq; unsigned i; bool signaled, fence_queue_locked; int r; while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { if (!rdev->ring[ring].ready) { return -EBUSY; } timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { /* the normal case, timeout is somewhere before last_activity */ timeout = rdev->fence_drv[ring].last_activity - timeout; } else { /* either jiffies wrapped around, or no fence was signaled in the last 500ms * anyway we will just wait for the minimum amount and then check for a lockup */ timeout = 1; } seq = atomic64_read(&rdev->fence_drv[ring].last_seq); /* Save current last activity valuee, used to check for GPU lockups */ last_activity = rdev->fence_drv[ring].last_activity; CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)", ring, seq); radeon_irq_kms_sw_irq_get(rdev, ring); fence_queue_locked = false; r = 0; while (!(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring))) { if (!fence_queue_locked) { mtx_lock(&rdev->fence_queue_mtx); fence_queue_locked = true; } if (intr) { r = cv_timedwait_sig(&rdev->fence_queue, &rdev->fence_queue_mtx, timeout); } else { r = cv_timedwait(&rdev->fence_queue, &rdev->fence_queue_mtx, timeout); } if (r == EINTR) r = ERESTARTSYS; if (r != 0) { if (r == EWOULDBLOCK) { signaled = radeon_fence_seq_signaled( rdev, target_seq, ring); } break; } } if (fence_queue_locked) { mtx_unlock(&rdev->fence_queue_mtx); } radeon_irq_kms_sw_irq_put(rdev, ring); if (unlikely(r == ERESTARTSYS)) { return -r; } CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)", ring, seq); if (unlikely(!signaled)) { #ifndef __FreeBSD__ /* we were interrupted for some reason and fence * isn't signaled yet, resume waiting */ if (r) { continue; } #endif /* check if sequence value has changed since last_activity */ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { continue; } if (lock_ring) { sx_xlock(&rdev->ring_lock); } /* test if somebody else has already decided that this is a lockup */ if (last_activity != rdev->fence_drv[ring].last_activity) { if (lock_ring) { sx_xunlock(&rdev->ring_lock); } continue; } if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { /* good news we believe it's a lockup */ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n", (uintmax_t)target_seq, (uintmax_t)seq); /* change last activity so nobody else think there is a lockup */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { rdev->fence_drv[i].last_activity = jiffies; } /* mark the ring as not ready any more */ rdev->ring[ring].ready = false; if (lock_ring) { sx_xunlock(&rdev->ring_lock); } return -EDEADLK; } if (lock_ring) { sx_xunlock(&rdev->ring_lock); } } } return 0; }
static int omap_i2c_init(struct omap_i2c_dev *dev) { u16 psc = 0, scll = 0, sclh = 0, buf = 0; u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0; unsigned long fclk_rate = 12000000; unsigned long timeout; unsigned long internal_clk = 0; struct clk *fclk; if (dev->rev >= OMAP_I2C_REV_2) { /* Disable I2C controller before soft reset */ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) & ~(OMAP_I2C_CON_EN)); omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK); /* For some reason we need to set the EN bit before the * reset done bit gets set. */ timeout = jiffies + OMAP_I2C_TIMEOUT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG) & SYSS_RESETDONE_MASK)) { if (time_after(jiffies, timeout)) { dev_warn(dev->dev, "timeout waiting " "for controller reset\n"); return -ETIMEDOUT; } msleep(1); } /* SYSC register is cleared by the reset; rewrite it */ if (dev->rev == OMAP_I2C_REV_ON_2430) { omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_AUTOIDLE_MASK); } else if (dev->rev >= OMAP_I2C_REV_ON_3430) { dev->syscstate = SYSC_AUTOIDLE_MASK; dev->syscstate |= SYSC_ENAWAKEUP_MASK; dev->syscstate |= (SYSC_IDLEMODE_SMART << __ffs(SYSC_SIDLEMODE_MASK)); dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK << __ffs(SYSC_CLOCKACTIVITY_MASK)); omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, dev->syscstate); /* * Enabling all wakup sources to stop I2C freezing on * WFI instruction. * REVISIT: Some wkup sources might not be needed. */ dev->westate = OMAP_I2C_WE_ALL; omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); } } omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); if (cpu_class_is_omap1()) { /* * The I2C functional clock is the armxor_ck, so there's * no need to get "armxor_ck" separately. Now, if OMAP2420 * always returns 12MHz for the functional clock, we can * do this bit unconditionally. */ fclk = clk_get(dev->dev, "fck"); fclk_rate = clk_get_rate(fclk); clk_put(fclk); /* TRM for 5912 says the I2C clock must be prescaled to be * between 7 - 12 MHz. The XOR input clock is typically * 12, 13 or 19.2 MHz. So we should have code that produces: * * XOR MHz Divider Prescaler * 12 1 0 * 13 2 1 * 19.2 2 1 */ if (fclk_rate > 12000000) psc = fclk_rate / 12000000; } if (!(cpu_class_is_omap1() || cpu_is_omap2420())) { /* * HSI2C controller internal clk rate should be 19.2 Mhz for * HS and for all modes on 2430. On 34xx we can use lower rate * to get longer filter period for better noise suppression. * The filter is iclk (fclk for HS) period. */ if (dev->speed > 400 || cpu_is_omap2430()) internal_clk = 19200; else if (dev->speed > 100) internal_clk = 9600; else internal_clk = 4000; fclk = clk_get(dev->dev, "fck"); fclk_rate = clk_get_rate(fclk) / 1000; clk_put(fclk); /* Compute prescaler divisor */ psc = fclk_rate / internal_clk; psc = psc - 1; /* If configured for High Speed */ if (dev->speed > 400) { unsigned long scl; /* For first phase of HS mode */ scl = internal_clk / 400; fsscll = scl - (scl / 3) - 7; fssclh = (scl / 3) - 5; /* For second phase of HS mode */ scl = fclk_rate / dev->speed; hsscll = scl - (scl / 3) - 7; hssclh = (scl / 3) - 5; } else if (dev->speed > 100) { unsigned long scl; /* Fast mode */ scl = internal_clk / dev->speed; fsscll = scl - (scl / 3) - 7; fssclh = (scl / 3) - 5; } else { /* Standard mode */ fsscll = internal_clk / (dev->speed * 2) - 7; fssclh = internal_clk / (dev->speed * 2) - 5; } scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll; sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh; } else { /* Program desired operating rate */ fclk_rate /= (psc + 1) * 1000; if (psc > 2) psc = 2; scll = fclk_rate / (dev->speed * 2) - 7 + psc; sclh = fclk_rate / (dev->speed * 2) - 7 + psc; } /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc); /* SCL low and high time values */ omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll); omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh); if (dev->fifo_size) { /* Note: setup required fifo size - 1. RTRSH and XTRSH */ buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR | (dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf); } /* Take the I2C module out of reset: */ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); dev->errata = 0; if (cpu_is_omap2430() || cpu_is_omap34xx()) dev->errata |= I2C_OMAP_ERRATA_I207; /* Enable interrupts */ dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | OMAP_I2C_IE_AL) | ((dev->fifo_size) ? (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); if (cpu_is_omap34xx()) { dev->pscstate = psc; dev->scllstate = scll; dev->sclhstate = sclh; dev->bufstate = buf; } return 0; }
static void xpc_do_exit(enum xp_retval reason) { short partid; int active_part_count, printed_waiting_msg = 0; struct xpc_partition *part; unsigned long printmsg_time, disengage_request_timeout = 0; /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ DBUG_ON(xpc_exiting == 1); /* * Let the heartbeat checker thread and the discovery thread * (if one is running) know that they should exit. Also wake up * the heartbeat checker thread in case it's sleeping. */ xpc_exiting = 1; wake_up_interruptible(&xpc_act_IRQ_wq); /* ignore all incoming interrupts */ free_irq(SGI_XPC_ACTIVATE, NULL); /* wait for the discovery thread to exit */ wait_for_completion(&xpc_discovery_exited); /* wait for the heartbeat checker thread to exit */ wait_for_completion(&xpc_hb_checker_exited); /* sleep for a 1/3 of a second or so */ (void)msleep_interruptible(300); /* wait for all partitions to become inactive */ printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); xpc_disengage_request_timedout = 0; do { active_part_count = 0; for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { part = &xpc_partitions[partid]; if (xpc_partition_disengaged(part) && part->act_state == XPC_P_INACTIVE) { continue; } active_part_count++; XPC_DEACTIVATE_PARTITION(part, reason); if (part->disengage_request_timeout > disengage_request_timeout) { disengage_request_timeout = part->disengage_request_timeout; } } if (xpc_partition_engaged(-1UL)) { if (time_after(jiffies, printmsg_time)) { dev_info(xpc_part, "waiting for remote " "partitions to disengage, timeout in " "%ld seconds\n", (disengage_request_timeout - jiffies) / HZ); printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); printed_waiting_msg = 1; } } else if (active_part_count > 0) { if (printed_waiting_msg) { dev_info(xpc_part, "waiting for local partition" " to disengage\n"); printed_waiting_msg = 0; } } else { if (!xpc_disengage_request_timedout) { dev_info(xpc_part, "all partitions have " "disengaged\n"); } break; } /* sleep for a 1/3 of a second or so */ (void)msleep_interruptible(300); } while (1); DBUG_ON(xpc_partition_engaged(-1UL)); /* indicate to others that our reserved page is uninitialized */ xpc_rsvd_page->vars_pa = 0; /* now it's time to eliminate our heartbeat */ del_timer_sync(&xpc_hb_timer); DBUG_ON(xpc_vars->heartbeating_to_mask != 0); if (reason == xpUnloading) { /* take ourselves off of the reboot_notifier_list */ (void)unregister_reboot_notifier(&xpc_reboot_notifier); /* take ourselves off of the die_notifier list */ (void)unregister_die_notifier(&xpc_die_notifier); } /* close down protections for IPI operations */ xpc_restrict_IPI_ops(); /* clear the interface to XPC's functions */ xpc_clear_interface(); if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); kfree(xpc_remote_copy_buffer_base); }
/* * Low level master read/write transaction. */ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int r; u16 w; dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); if (msg->len == 0) return -EINVAL; omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ dev->buf = msg->buf; dev->buf_len = msg->len; omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG); w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w); init_completion(&dev->cmd_complete); dev->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; /* High speed configuration */ if (dev->speed > 400) w |= OMAP_I2C_CON_OPMODE_HS; if (msg->flags & I2C_M_TEN) w |= OMAP_I2C_CON_XA; if (!(msg->flags & I2C_M_RD)) w |= OMAP_I2C_CON_TRX; if (!dev->b_hw && stop) w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); /* * Don't write stt and stp together on some hardware. */ if (dev->b_hw && stop) { unsigned long delay = jiffies + OMAP_I2C_TIMEOUT; u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); while (con & OMAP_I2C_CON_STT) { con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); /* Let the user know if i2c is in a bad state */ if (time_after(jiffies, delay)) { dev_err(dev->dev, "controller timed out " "waiting for start condition to finish\n"); return -ETIMEDOUT; } cpu_relax(); } w |= OMAP_I2C_CON_STP; w &= ~OMAP_I2C_CON_STT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } /* * REVISIT: We should abort the transfer on signals, but the bus goes * into arbitration and we're currently unable to recover from it. */ r = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); dev->buf_len = 0; if (r < 0) return r; if (r == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); return -ETIMEDOUT; } if (likely(!dev->cmd_err)) return 0; /* We have an error */ if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) { omap_i2c_init(dev); return -EIO; } if (dev->cmd_err & OMAP_I2C_STAT_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; if (stop) { w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } return -EREMOTEIO; } return -EIO; }
/** * hwmp_route_info_get - Update routing info to originator and transmitter * * @sdata: local mesh subif * @mgmt: mesh management frame * @hwmp_ie: hwmp information element (PREP or PREQ) * @action: type of hwmp ie * * This function updates the path routing information to the originator and the * transmitter of a HWMP PREQ or PREP frame. * * Returns: metric to frame originator or 0 if the frame should not be further * processed * * Notes: this function is the only place (besides user-provided info) where * path routing information is updated. */ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const u8 *hwmp_ie, enum mpath_frame_type action) { struct ieee80211_local *local = sdata->local; struct mesh_path *mpath; struct sta_info *sta; bool fresh_info; const u8 *orig_addr, *ta; u32 orig_sn, orig_metric; unsigned long orig_lifetime, exp_time; u32 last_hop_metric, new_metric; bool process = true; rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta) { rcu_read_unlock(); return 0; } last_hop_metric = airtime_link_metric_get(local, sta); /* Update and check originator routing info */ fresh_info = true; switch (action) { case MPATH_PREQ: orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); orig_sn = PREQ_IE_ORIG_SN(hwmp_ie); orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); orig_metric = PREQ_IE_METRIC(hwmp_ie); break; case MPATH_PREP: /* Originator here refers to the MP that was the target in the * Path Request. We divert from the nomenclature in the draft * so that we can easily use a single function to gather path * information from both PREQ and PREP frames. */ orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie); orig_sn = PREP_IE_TARGET_SN(hwmp_ie); orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); orig_metric = PREP_IE_METRIC(hwmp_ie); break; default: rcu_read_unlock(); return 0; } new_metric = orig_metric + last_hop_metric; if (new_metric < orig_metric) new_metric = MAX_METRIC; exp_time = TU_TO_EXP_TIME(orig_lifetime); if (ether_addr_equal(orig_addr, sdata->vif.addr)) { /* This MP is the originator, we are not interested in this * frame, except for updating transmitter's path info. */ process = false; fresh_info = false; } else { mpath = mesh_path_lookup(sdata, orig_addr); if (mpath) { spin_lock_bh(&mpath->state_lock); if (mpath->flags & MESH_PATH_FIXED) fresh_info = false; else if ((mpath->flags & MESH_PATH_ACTIVE) && (mpath->flags & MESH_PATH_SN_VALID)) { if (SN_GT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && new_metric >= mpath->metric)) { process = false; fresh_info = false; } } else if (!(mpath->flags & MESH_PATH_ACTIVE)) { bool have_sn, newer_sn, bounced; have_sn = mpath->flags & MESH_PATH_SN_VALID; newer_sn = have_sn && SN_GT(orig_sn, mpath->sn); bounced = have_sn && (SN_DELTA(orig_sn, mpath->sn) > MAX_SANE_SN_DELTA); if (!have_sn || newer_sn) { /* if SN is newer than what we had * then we can take it */; } else if (bounced) { /* if SN is way different than what * we had then assume the other side * rebooted or restarted */; } else { process = false; fresh_info = false; } } } else { mpath = mesh_path_add(sdata, orig_addr); if (IS_ERR(mpath)) { rcu_read_unlock(); return 0; } spin_lock_bh(&mpath->state_lock); } if (fresh_info) { mesh_path_assign_nexthop(mpath, sta); mpath->flags |= MESH_PATH_SN_VALID; mpath->metric = new_metric; mpath->sn = orig_sn; mpath->exp_time = time_after(mpath->exp_time, exp_time) ? mpath->exp_time : exp_time; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); /* draft says preq_id should be saved to, but there does * not seem to be any use for it, skipping by now */ } else spin_unlock_bh(&mpath->state_lock); } /* Update and check transmitter routing info */ ta = mgmt->sa; if (ether_addr_equal(orig_addr, ta)) fresh_info = false; else { fresh_info = true; mpath = mesh_path_lookup(sdata, ta); if (mpath) { spin_lock_bh(&mpath->state_lock); if ((mpath->flags & MESH_PATH_FIXED) || ((mpath->flags & MESH_PATH_ACTIVE) && (last_hop_metric > mpath->metric))) fresh_info = false; } else { mpath = mesh_path_add(sdata, ta); if (IS_ERR(mpath)) { rcu_read_unlock(); return 0; } spin_lock_bh(&mpath->state_lock); } if (fresh_info) { mesh_path_assign_nexthop(mpath, sta); mpath->metric = last_hop_metric; mpath->exp_time = time_after(mpath->exp_time, exp_time) ? mpath->exp_time : exp_time; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } else spin_unlock_bh(&mpath->state_lock); } rcu_read_unlock(); return process ? new_metric : 0; }
int _mali_osk_time_after( u32 ticka, u32 tickb ) { return time_after((unsigned long)ticka, (unsigned long)tickb); }
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const struct ieee80211_rann_ie *rann) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct mesh_path *mpath; u8 ttl, flags, hopcount; const u8 *orig_addr; u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; bool root_is_gate; ttl = rann->rann_ttl; flags = rann->rann_flags; root_is_gate = !!(flags & RANN_FLAG_IS_GATE); orig_addr = rann->rann_addr; orig_sn = le32_to_cpu(rann->rann_seq); interval = le32_to_cpu(rann->rann_interval); hopcount = rann->rann_hopcount; hopcount++; orig_metric = le32_to_cpu(rann->rann_metric); /* Ignore our own RANNs */ if (ether_addr_equal(orig_addr, sdata->vif.addr)) return; mhwmp_dbg(sdata, "received RANN from %pM via neighbour %pM (is_gate=%d)\n", orig_addr, mgmt->sa, root_is_gate); rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta) { rcu_read_unlock(); return; } last_hop_metric = airtime_link_metric_get(local, sta); new_metric = orig_metric + last_hop_metric; if (new_metric < orig_metric) new_metric = MAX_METRIC; mpath = mesh_path_lookup(sdata, orig_addr); if (!mpath) { mpath = mesh_path_add(sdata, orig_addr); if (IS_ERR(mpath)) { rcu_read_unlock(); sdata->u.mesh.mshstats.dropped_frames_no_route++; return; } } if (!(SN_LT(mpath->sn, orig_sn)) && !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { rcu_read_unlock(); return; } if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || (time_after(jiffies, mpath->last_preq_to_root + root_path_confirmation_jiffies(sdata)) || time_before(jiffies, mpath->last_preq_to_root))) && !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) { mhwmp_dbg(sdata, "time to refresh root mpath %pM\n", orig_addr); mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); mpath->last_preq_to_root = jiffies; } mpath->sn = orig_sn; mpath->rann_metric = new_metric; mpath->is_root = true; /* Recording RANNs sender address to send individually * addressed PREQs destined for root mesh STA */ memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); if (root_is_gate) mesh_path_add_gate(mpath); if (ttl <= 1) { ifmsh->mshstats.dropped_frames_ttl++; rcu_read_unlock(); return; } ttl--; if (ifmsh->mshcfg.dot11MeshForwarding) { mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, orig_sn, 0, NULL, 0, broadcast_addr, hopcount, ttl, interval, new_metric, 0, sdata); } rcu_read_unlock(); }
static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; if (unlikely(cpu_is_offline(smp_processor_id()))) { return IRQ_HANDLED; } platform_timer_interrupt(irq, dev_id); new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING); if (paravirt_do_steal_accounting(&new_itm)) goto skip_process_time_accounting; while (1) { update_process_times(user_mode(get_irq_regs())); new_itm += local_cpu_data->itm_delta; if (smp_processor_id() == time_keeper_id) { /* * Here we are in the timer irq handler. We have irqs locally * disabled, but we don't know if the timer_bh is running on * another CPU. We need to avoid to SMP race by acquiring the * xtime_lock. */ write_seqlock(&xtime_lock); do_timer(1); local_cpu_data->itm_next = new_itm; write_sequnlock(&xtime_lock); } else local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; /* * Allow IPIs to interrupt the timer loop. */ local_irq_enable(); local_irq_disable(); } skip_process_time_accounting: do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * do_timer() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; }