int rtl92c_init_sw_vars(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); rtl8192ce_bt_reg_init(hw); rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_AMF | RCR_ADF | RCR_APP_MIC | RCR_APP_ICV | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | 0); rtlpci->irq_mask[0] = (u32) (IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK | IMR_MGNTDOK | IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW | 0); rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; if (!rtlpriv->psc.inactiveps) pr_info("rtl8192ce: Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) pr_info("rtl8192ce: FW Power Save off (module option)\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl92c_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw\n"); return 1; } /* request fw */ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && !IS_92C_SERIAL(rtlhal->version)) rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return 0; }
static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, struct ipoib_cm_rx *p) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_init_attr attr = { .event_handler = ipoib_cm_rx_event_handler, .send_cq = priv->recv_cq, /* For drain WR */ .recv_cq = priv->recv_cq, .srq = priv->cm.srq, .cap.max_send_wr = 1, /* For drain WR */ .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_RC, .qp_context = p, }; if (!ipoib_cm_has_srq(dev)) { attr.cap.max_recv_wr = ipoib_recvq_size; attr.cap.max_recv_sge = IPOIB_CM_RX_SG; } return ib_create_qp(priv->pd, &attr); } static int ipoib_cm_modify_rx_qp(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, unsigned psn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); return ret; } ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); return ret; } qp_attr.qp_state = IB_QPS_RTR; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); return ret; } qp_attr.rq_psn = psn; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); return ret; } /* * Current Mellanox HCA firmware won't generate completions * with error for drain WRs unless the QP has been moved to * RTS first. This work-around leaves a window where a QP has * moved to error asynchronously, but this will eventually get * fixed in firmware, so let's not error out if modify QP * fails. */ qp_attr.qp_state = IB_QPS_RTS; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); return 0; } ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); return 0; } return 0; } static void ipoib_cm_init_rx_wr(struct net_device *dev, struct ib_recv_wr *wr, struct ib_sge *sge) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < priv->cm.num_frags; ++i) sge[i].lkey = priv->mr->lkey; sge[0].length = IPOIB_CM_HEAD_SIZE; for (i = 1; i < priv->cm.num_frags; ++i) sge[i].length = PAGE_SIZE; wr->next = NULL; wr->sg_list = sge; wr->num_sge = priv->cm.num_frags; } static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct { struct ib_recv_wr wr; struct ib_sge sge[IPOIB_CM_RX_SG]; } *t; int ret; int i; <<<<<<< HEAD rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring); =======
int rtl92c_init_sw_vars(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); const struct firmware *firmware; rtl8192ce_bt_reg_init(hw); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_AMF | RCR_ADF | RCR_APP_MIC | RCR_APP_ICV | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | 0); rtlpci->irq_mask[0] = (u32) (IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK | IMR_MGNTDOK | IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW | 0); rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0); /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl92c_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Can't alloc buffer for fw.\n")); return 1; } /* request fw */ err = request_firmware(&firmware, rtlpriv->cfg->fw_name, rtlpriv->io.dev); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Failed to request firmware!\n")); return 1; } if (firmware->size > 0x4000) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Firmware is too big!\n")); release_firmware(firmware); return 1; } memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size); rtlpriv->rtlhal.fwsize = firmware->size; release_firmware(firmware); return 0; }
int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) { int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); rtl8723be_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); rtlpriv->phy.lck_inprogress = false; mac->ht_enable = true; /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | RCR_ADF | RCR_AICV | RCR_AB | RCR_AM | RCR_APM | 0); rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT | IMR_HSISR_IND_ON_INT | IMR_C2HCMD | IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_RDU | IMR_ROK | 0); rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); rtlpci->sys_irq_mask = (u32)(HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN | 0); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl8723be_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /*low power: Disable 32k */ rtlpriv->psc.low_power_enable = false; rtlpriv->rtlhal.earlymode_enable = false; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); return 1; } rtlpriv->max_fw_size = 0x8000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return 0; }
int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); int err = 0; rtl8723e_bt_reg_init(hw); rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | RCR_ADF | RCR_AICV | RCR_AB | RCR_AM | RCR_APM | 0); rtlpci->irq_mask[0] = (u32) (PHIMR_ROK | PHIMR_RDU | PHIMR_VODOK | PHIMR_VIDOK | PHIMR_BEDOK | PHIMR_BKDOK | PHIMR_MGNTDOK | PHIMR_HIGHDOK | PHIMR_C2HCMD | PHIMR_HISRE_IND | PHIMR_TSF_BIT32_TOGGLE | PHIMR_TXBCNOK | PHIMR_PSTIMEOUT | 0); rtlpci->irq_mask[1] = (u32)(PHIMR_RXFOVW | 0); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; rtlpriv->cfg->mod_params->disable_watchdog = rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; rtl8723e_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x6000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); return 1; } if (IS_VENDOR_8723_A_CUT(rtlhal->version)) rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin"; else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin"; rtlpriv->max_fw_size = 0x6000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return 0; }
static int netvsc_init_buf(struct hv_device *device) { int ret = 0; struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; int node; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = hv_get_drvdata(device); node = cpu_to_node(device->channel->target_cpu); net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); if (!net_device->recv_buf) net_device->recv_buf = vzalloc(net_device->recv_buf_size); if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); ret = -ENOMEM; goto cleanup; } /* * Establish the gpadl handle for this buffer on this * channel. Note: This call uses the vmbus connection rather * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, net_device->recv_buf_size, &net_device->recv_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, "unable to establish receive buffer's gpadl\n"); goto cleanup; } /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; init_packet->msg.v1_msg.send_recv_buf. gpadl_handle = net_device->recv_buf_gpadl_handle; init_packet->msg.v1_msg. send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { netdev_err(ndev, "unable to send receive buffer's gpadl to netvsp\n"); goto cleanup; } wait_for_completion(&net_device->channel_init_wait); /* Check the response */ if (init_packet->msg.v1_msg. send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "Unable to complete receive buffer " "initialization with NetVsp - status %d\n", init_packet->msg.v1_msg. send_recv_buf_complete.status); ret = -EINVAL; goto cleanup; } /* Parse the response */ net_device->recv_section_cnt = init_packet->msg. v1_msg.send_recv_buf_complete.num_sections; net_device->recv_section = kmemdup( init_packet->msg.v1_msg.send_recv_buf_complete.sections, net_device->recv_section_cnt * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL); if (net_device->recv_section == NULL) { ret = -EINVAL; goto cleanup; } /* * For 1st release, there should only be 1 section that represents the * entire receive buffer */ if (net_device->recv_section_cnt != 1 || net_device->recv_section->offset != 0) { ret = -EINVAL; goto cleanup; } /* Now setup the send buffer. */ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); if (!net_device->send_buf) net_device->send_buf = vzalloc(net_device->send_buf_size); if (!net_device->send_buf) { netdev_err(ndev, "unable to allocate send " "buffer of size %d\n", net_device->send_buf_size); ret = -ENOMEM; goto cleanup; } /* Establish the gpadl handle for this buffer on this * channel. Note: This call uses the vmbus connection rather * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, net_device->send_buf_size, &net_device->send_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, "unable to establish send buffer's gpadl\n"); goto cleanup; } /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; init_packet->msg.v1_msg.send_send_buf.gpadl_handle = net_device->send_buf_gpadl_handle; init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { netdev_err(ndev, "unable to send send buffer's gpadl to netvsp\n"); goto cleanup; } wait_for_completion(&net_device->channel_init_wait); /* Check the response */ if (init_packet->msg.v1_msg. send_send_buf_complete.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "Unable to complete send buffer " "initialization with NetVsp - status %d\n", init_packet->msg.v1_msg. send_send_buf_complete.status); ret = -EINVAL; goto cleanup; } /* Parse the response */ net_device->send_section_size = init_packet->msg. v1_msg.send_send_buf_complete.section_size; /* Section count is simply the size divided by the section size. */ net_device->send_section_cnt = net_device->send_buf_size/net_device->send_section_size; dev_info(&device->device, "Send section size: %d, Section count:%d\n", net_device->send_section_size, net_device->send_section_cnt); /* Setup state for managing the send buffer. */ net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); net_device->send_section_map = kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); if (net_device->send_section_map == NULL) { ret = -ENOMEM; goto cleanup; } goto exit; cleanup: netvsc_destroy_buf(device); exit: return ret; }
static int adafruit22fb_probe(struct spi_device *spi) { struct fb_info *info; struct fbtft_par *par; int ret; fbtft_dev_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, &spi->dev, "%s()\n", __func__); if (rotate > 3) { dev_warn(&spi->dev, "argument 'rotate' illegal value: %d (0-3). Setting it to 0.\n", rotate); rotate = 0; } switch (rotate) { case 0: case 2: adafruit22_display.width = WIDTH; adafruit22_display.height = HEIGHT; break; case 1: case 3: adafruit22_display.width = HEIGHT; adafruit22_display.height = WIDTH; break; } info = fbtft_framebuffer_alloc(&adafruit22_display, &spi->dev); if (!info) return -ENOMEM; par = info->par; par->spi = spi; fbtft_debug_init(par); par->fbtftops.init_display = adafruit22fb_init_display; par->fbtftops.register_backlight = fbtft_register_backlight; par->fbtftops.write_data_command = fbtft_write_data_command8_bus9; par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; par->fbtftops.set_addr_win = adafruit22fb_set_addr_win; spi->bits_per_word=9; ret = spi->master->setup(spi); if (ret) { dev_warn(&spi->dev, "9-bit SPI not available, emulating using 8-bit.\n"); spi->bits_per_word=8; ret = spi->master->setup(spi); if (ret) goto fbreg_fail; /* allocate buffer with room for dc bits */ par->extra = vzalloc(par->txbuf.len + (par->txbuf.len / 8)); if (!par->extra) { ret = -ENOMEM; goto fbreg_fail; } par->fbtftops.write = adafruit22fb_write_emulate_9bit; } ret = fbtft_register_framebuffer(info); if (ret < 0) goto fbreg_fail; return 0; fbreg_fail: if (par->extra) vfree(par->extra); fbtft_framebuffer_release(info); return ret; }
/*InitializeVariables8812E*/ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) { int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); rtl8821ae_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); mac->ht_enable = true; mac->ht_cur_stbc = 0; mac->ht_stbc_cap = 0; mac->vht_cur_ldpc = 0; mac->vht_ldpc_cap = 0; mac->vht_cur_stbc = 0; mac->vht_stbc_cap = 0; rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/ rtlpriv->rtlhal.bandset = BAND_ON_BOTH; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_NONQOS_VHT | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | /*This bit controls the PS-Poll packet filter.*/ RCR_ADF | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | 0); rtlpci->irq_mask[0] = (u32)(IMR_PSTIMEOUT | IMR_GTINT3 | IMR_HSISR_IND_ON_INT | IMR_C2HCMD | IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_RDU | IMR_ROK | 0); rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | IMR_TXFOVW | 0); rtlpci->sys_irq_mask = (u32)(HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN | 0); /* for WOWLAN */ rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET | WAKE_ON_PATTERN_MATCH; /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl8821ae_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); return 1; } rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for wowlan fw.\n"); return 1; } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; } else { rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; } rtlpriv->max_fw_size = 0x8000; /*load normal firmware*/ pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request normal firmware!\n"); return 1; } /*load wowlan firmware*/ pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->wowlan_fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_wowlan_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request wowlan firmware!\n"); return 1; } return 0; }
int rtl88e_init_sw_vars(struct ieee80211_hw *hw) { int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 tid; char *fw_name; rtl8188ee_bt_reg_init(hw); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(15); /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | RCR_ADF | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | 0); rtlpci->irq_mask[0] = (u32)(IMR_PSTIMEOUT | IMR_HSISR_IND_ON_INT | IMR_C2HCMD | IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_RDU | IMR_ROK | 0); rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0); rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN); /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; rtlpriv->cfg->mod_params->disable_watchdog = rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); if (!rtlpriv->psc.inactiveps) pr_info("rtl8188ee: Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) pr_info("rtl8188ee: FW Power Save off (module option)\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl88e_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { pr_info("Can't alloc buffer for fw.\n"); return 1; } fw_name = "rtlwifi/rtl8188efw.bin"; rtlpriv->max_fw_size = 0x8000; pr_info("Using firmware %s\n", fw_name); err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { pr_info("Failed to request firmware!\n"); vfree(rtlpriv->rtlhal.pfirmware); rtlpriv->rtlhal.pfirmware = NULL; return 1; } /* for early mode */ rtlpriv->rtlhal.earlymode_enable = false; rtlpriv->rtlhal.max_earlymode_num = 10; for (tid = 0; tid < 8; tid++) skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]); /*low power */ rtlpriv->psc.low_power_enable = false; if (rtlpriv->psc.low_power_enable) { timer_setup(&rtlpriv->works.fw_clockoff_timer, rtl88ee_fw_clk_off_timer_callback, 0); } timer_setup(&rtlpriv->works.fast_antenna_training_timer, rtl88e_dm_fast_antenna_training_callback, 0); return err; }
static int prepare_elf64_headers(struct crash_elf_data *ced, void **addr, unsigned long *sz) { Elf64_Ehdr *ehdr; Elf64_Phdr *phdr; unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; unsigned char *buf, *bufp; unsigned int cpu; unsigned long long notes_addr; int ret; /* extra phdr for vmcoreinfo elf note */ nr_phdr = nr_cpus + 1; nr_phdr += ced->max_nr_ranges; /* * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping * area on x86_64 (ffffffff80000000 - ffffffffa0000000). * I think this is required by tools like gdb. So same physical * memory will be mapped in two elf headers. One will contain kernel * text virtual addresses and other will have __va(physical) addresses. */ nr_phdr++; elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); buf = vzalloc(elf_sz); if (!buf) return -ENOMEM; bufp = buf; ehdr = (Elf64_Ehdr *)bufp; bufp += sizeof(Elf64_Ehdr); memcpy(ehdr->e_ident, ELFMAG, SELFMAG); ehdr->e_ident[EI_CLASS] = ELFCLASS64; ehdr->e_ident[EI_DATA] = ELFDATA2LSB; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELF_OSABI; memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); ehdr->e_type = ET_CORE; ehdr->e_machine = ELF_ARCH; ehdr->e_version = EV_CURRENT; ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); /* Prepare one phdr of type PT_NOTE for each present cpu */ for_each_present_cpu(cpu) { phdr = (Elf64_Phdr *)bufp; bufp += sizeof(Elf64_Phdr); phdr->p_type = PT_NOTE; notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); phdr->p_offset = phdr->p_paddr = notes_addr; phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); (ehdr->e_phnum)++; } /* Prepare one PT_NOTE header for vmcoreinfo */ phdr = (Elf64_Phdr *)bufp; bufp += sizeof(Elf64_Phdr); phdr->p_type = PT_NOTE; phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note); (ehdr->e_phnum)++; #ifdef CONFIG_X86_64 /* Prepare PT_LOAD type program header for kernel text region */ phdr = (Elf64_Phdr *)bufp; bufp += sizeof(Elf64_Phdr); phdr->p_type = PT_LOAD; phdr->p_flags = PF_R|PF_W|PF_X; phdr->p_vaddr = (Elf64_Addr)_text; phdr->p_filesz = phdr->p_memsz = _end - _text; phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); (ehdr->e_phnum)++; #endif /* Prepare PT_LOAD headers for system ram chunks. */ ced->ehdr = ehdr; ced->bufp = bufp; ret = walk_system_ram_res(0, -1, ced, prepare_elf64_ram_headers_callback); if (ret < 0) return ret; *addr = buf; *sz = elf_sz; return 0; }
/* Prepare memory map for crash dump kernel */ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) { int i, ret = 0; unsigned long flags; struct e820entry ei; struct crash_memmap_data cmd; struct crash_mem *cmem; cmem = vzalloc(sizeof(struct crash_mem)); if (!cmem) return -ENOMEM; memset(&cmd, 0, sizeof(struct crash_memmap_data)); cmd.params = params; /* Add first 640K segment */ ei.addr = image->arch.backup_src_start; ei.size = image->arch.backup_src_sz; ei.type = E820_RAM; add_e820_entry(params, &ei); /* Add ACPI tables */ cmd.type = E820_ACPI; flags = IORESOURCE_MEM | IORESOURCE_BUSY; walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd, memmap_entry_callback); /* Add ACPI Non-volatile Storage */ cmd.type = E820_NVS; walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd, memmap_entry_callback); /* Add crashk_low_res region */ if (crashk_low_res.end) { ei.addr = crashk_low_res.start; ei.size = crashk_low_res.end - crashk_low_res.start + 1; ei.type = E820_RAM; add_e820_entry(params, &ei); } /* Exclude some ranges from crashk_res and add rest to memmap */ ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end); if (ret) goto out; for (i = 0; i < cmem->nr_ranges; i++) { ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; /* If entry is less than a page, skip it */ if (ei.size < PAGE_SIZE) continue; ei.addr = cmem->ranges[i].start; ei.type = E820_RAM; add_e820_entry(params, &ei); } out: vfree(cmem); return ret; }
static int stp_uart_fifo_init(void) { int err = 0; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) g_stp_uart_rx_buf = vzalloc(LDISC_RX_BUF_SIZE); if (!g_stp_uart_rx_buf) { UART_ERR_FUNC("kfifo_alloc failed (kernel version >= 2.6.37)\n"); err = -4; goto fifo_init_end; } #else g_stp_uart_rx_buf = vmalloc(LDISC_RX_BUF_SIZE); if (!g_stp_uart_rx_buf) { UART_ERR_FUNC("kfifo_alloc failed (kernel version < 2.6.37)\n"); err = -4; goto fifo_init_end; } memset(g_stp_uart_rx_buf, 0, LDISC_RX_BUF_SIZE); #endif UART_LOUD_FUNC("g_stp_uart_rx_buf alloc ok(0x%p, %d)\n", g_stp_uart_rx_buf, LDISC_RX_BUF_SIZE); /*add rx fifo*/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) spin_lock_init(&g_stp_uart_rx_fifo_spinlock); g_stp_uart_rx_fifo = kfifo_alloc(LDISC_RX_FIFO_SIZE, GFP_KERNEL, &g_stp_uart_rx_fifo_spinlock); if (NULL == g_stp_uart_rx_fifo) { UART_ERR_FUNC("kfifo_alloc failed (kernel version < 2.6.33)\n"); err = -1; goto fifo_init_end; } #else /* allocate struct kfifo first */ g_stp_uart_rx_fifo = kzalloc(sizeof(struct kfifo), GFP_KERNEL); if (NULL == g_stp_uart_rx_fifo) { err = -2; UART_ERR_FUNC("kzalloc struct kfifo failed (kernel version > 2.6.33)\n"); goto fifo_init_end; } /* allocate kfifo data buffer then */ err = kfifo_alloc(g_stp_uart_rx_fifo, LDISC_RX_FIFO_SIZE, GFP_KERNEL); if (0 != err) { UART_ERR_FUNC("kfifo_alloc failed, err(%d)(kernel version > 2.6.33)\n", err); kfree(g_stp_uart_rx_fifo); g_stp_uart_rx_fifo = NULL; err = -3; goto fifo_init_end; } #endif UART_LOUD_FUNC("g_stp_uart_rx_fifo alloc ok\n"); fifo_init_end: if (0 == err) { /* kfifo init ok */ kfifo_reset(g_stp_uart_rx_fifo); UART_DBG_FUNC("g_stp_uart_rx_fifo init success\n"); } else { UART_ERR_FUNC("stp_uart_fifo_init() fail(%d)\n", err); if (g_stp_uart_rx_buf) { UART_ERR_FUNC("free g_stp_uart_rx_buf\n"); vfree(g_stp_uart_rx_buf); g_stp_uart_rx_buf = NULL; } } return err; }
int jffs2_do_mount_fs(struct jffs2_sb_info *c) { int ret; int i; int size; c->free_size = c->flash_size; c->nr_blocks = c->flash_size / c->sector_size; size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) c->blocks = vzalloc(size); else #endif c->blocks = kzalloc(size, GFP_KERNEL); if (!c->blocks) return -ENOMEM; for (i=0; i<c->nr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i].list); c->blocks[i].offset = i * c->sector_size; c->blocks[i].free_size = c->sector_size; } INIT_LIST_HEAD(&c->clean_list); INIT_LIST_HEAD(&c->very_dirty_list); INIT_LIST_HEAD(&c->dirty_list); INIT_LIST_HEAD(&c->erasable_list); INIT_LIST_HEAD(&c->erasing_list); INIT_LIST_HEAD(&c->erase_checking_list); INIT_LIST_HEAD(&c->erase_pending_list); INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); INIT_LIST_HEAD(&c->erase_complete_list); INIT_LIST_HEAD(&c->free_list); INIT_LIST_HEAD(&c->bad_list); INIT_LIST_HEAD(&c->bad_used_list); c->highest_ino = 1; c->summary = NULL; ret = jffs2_sum_init(c); if (ret) goto out_free; if (jffs2_build_filesystem(c)) { dbg_fsbuild("build_fs failed\n"); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); ret = -EIO; goto out_free; } jffs2_calc_trigger_levels(c); return 0; out_free: #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else #endif kfree(c->blocks); return ret; }
static int isp_open (struct inode *node, struct file *file) { int ret = 0; struct isp_k_private *isp_private = s_isp_private;//platform_get_drvdata(rot_get_platform_device()) struct isp_k_file *fd = NULL; if (!file) { ret = -EINVAL; printk("isp_open: file is null error.\n"); return ret; } if (!isp_private) { ret = -EFAULT; printk("isp_open: isp_private is null, error.\n"); return ret; } down(&isp_private->device_lock); fd = vzalloc(sizeof(*fd)); if (!fd) { ret = -ENOMEM; up(&isp_private->device_lock); printk("isp_open: no memory for fd, error.\n"); return ret; } fd->isp_private = isp_private; spin_lock_init(&fd->drv_private.isr_lock); sema_init(&fd->drv_private.isr_done_lock, 0); ret = isp_queue_init(&(fd->drv_private.queue)); if (unlikely(0 != ret)) { ret = -EFAULT; vfree(fd); up(&isp_private->device_lock); printk("isp_open: isp_queue_init error.\n"); return ret; } ret = isp_module_eb(fd); if (unlikely(0 != ret)) { ret = -EIO; printk("isp_open: isp_module_eb error.\n"); goto open_exit; } ret = isp_module_rst(fd); if (unlikely(0 != ret)) { ret = -EIO; isp_module_dis(fd); printk("isp_open: isp_module_rst error.\n"); goto open_exit; } ret = isp_register_irq(fd); if (unlikely(0 != ret)) { ret = -EIO; isp_module_dis(fd); printk("isp_open: isp_register_irq error.\n"); goto open_exit; } file->private_data = fd; printk("isp_open: success.\n"); return ret; open_exit: vfree(fd); fd = NULL; file->private_data = NULL; up(&isp_private->device_lock); return ret; }
static int choke_change(struct Qdisc *sch, struct nlattr *opt) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_CHOKE_MAX + 1]; const struct tc_red_qopt *ctl; int err; struct sk_buff **old = NULL; unsigned int mask; u32 max_P; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy); if (err < 0) return err; if (tb[TCA_CHOKE_PARMS] == NULL || tb[TCA_CHOKE_STAB] == NULL) return -EINVAL; max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; ctl = nla_data(tb[TCA_CHOKE_PARMS]); if (ctl->limit > CHOKE_MAX_QUEUE) return -EINVAL; mask = roundup_pow_of_two(ctl->limit + 1) - 1; if (mask != q->tab_mask) { struct sk_buff **ntab; ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL | __GFP_NOWARN); if (!ntab) ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *)); if (!ntab) return -ENOMEM; sch_tree_lock(sch); old = q->tab; if (old) { unsigned int oqlen = sch->q.qlen, tail = 0; while (q->head != q->tail) { struct sk_buff *skb = q->tab[q->head]; q->head = (q->head + 1) & q->tab_mask; if (!skb) continue; if (tail < mask) { ntab[tail++] = skb; continue; } qdisc_qstats_backlog_dec(sch, skb); --sch->q.qlen; qdisc_drop(skb, sch); } qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen); q->head = 0; q->tail = tail; } q->tab_mask = mask; q->tab = ntab; } else sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, nla_data(tb[TCA_CHOKE_STAB]), max_P); red_set_vars(&q->vars); if (q->head == q->tail) red_end_of_idle_period(&q->vars); sch_tree_unlock(sch); choke_free(old); return 0; }
void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) { void *p; char drv_version[64]; struct usb_card_rec *cardp; struct sdio_mmc_card *sdio_card; struct mwifiex_private *priv; int i, idx; struct netdev_queue *txq; struct mwifiex_debug_info *debug_info; if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); adapter->drv_info_size = 0; } dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n"); adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); if (!adapter->drv_info_dump) return; p = (char *)(adapter->drv_info_dump); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); mwifiex_drv_get_driver_version(adapter, drv_version, sizeof(drv_version) - 1); p += sprintf(p, "driver_version = %s\n", drv_version); if (adapter->iface_type == MWIFIEX_USB) { cardp = (struct usb_card_rec *)adapter->card; p += sprintf(p, "tx_cmd_urb_pending = %d\n", atomic_read(&cardp->tx_cmd_urb_pending)); p += sprintf(p, "tx_data_urb_pending = %d\n", atomic_read(&cardp->tx_data_urb_pending)); p += sprintf(p, "rx_cmd_urb_pending = %d\n", atomic_read(&cardp->rx_cmd_urb_pending)); p += sprintf(p, "rx_data_urb_pending = %d\n", atomic_read(&cardp->rx_data_urb_pending)); } p += sprintf(p, "tx_pending = %d\n", atomic_read(&adapter->tx_pending)); p += sprintf(p, "rx_pending = %d\n", atomic_read(&adapter->rx_pending)); if (adapter->iface_type == MWIFIEX_SDIO) { sdio_card = (struct sdio_mmc_card *)adapter->card; p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n", sdio_card->mp_rd_bitmap, sdio_card->curr_rd_port); p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n", sdio_card->mp_wr_bitmap, sdio_card->curr_wr_port); } for (i = 0; i < adapter->priv_num; i++) { if (!adapter->priv[i] || !adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; p += sprintf(p, "\n[interface : \"%s\"]\n", priv->netdev->name); p += sprintf(p, "wmm_tx_pending[0] = %d\n", atomic_read(&priv->wmm_tx_pending[0])); p += sprintf(p, "wmm_tx_pending[1] = %d\n", atomic_read(&priv->wmm_tx_pending[1])); p += sprintf(p, "wmm_tx_pending[2] = %d\n", atomic_read(&priv->wmm_tx_pending[2])); p += sprintf(p, "wmm_tx_pending[3] = %d\n", atomic_read(&priv->wmm_tx_pending[3])); p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ? "Disconnected" : "Connected"); p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev) ? "on" : "off")); for (idx = 0; idx < priv->netdev->num_tx_queues; idx++) { txq = netdev_get_tx_queue(priv->netdev, idx); p += sprintf(p, "tx queue %d:%s ", idx, netif_tx_queue_stopped(txq) ? "stopped" : "started"); } p += sprintf(p, "\n%s: num_tx_timeout = %d\n", priv->netdev->name, priv->num_tx_timeout); } if (adapter->iface_type == MWIFIEX_SDIO) { p += sprintf(p, "\n=== SDIO register DUMP===\n"); if (adapter->if_ops.reg_dump) p += adapter->if_ops.reg_dump(adapter, p); } p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n"); debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { for (i = 0; i < adapter->priv_num; i++) { if (!adapter->priv[i] || !adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; mwifiex_get_debug_info(priv, debug_info); p += mwifiex_debug_info_to_buffer(priv, p, debug_info); break; } kfree(debug_info); } adapter->drv_info_size = p - adapter->drv_info_dump; dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n"); }
static void connect(struct backend_info *be) { int err; struct xenbus_device *dev = be->dev; unsigned long credit_bytes, credit_usec; unsigned int queue_index; unsigned int requested_num_queues; struct xenvif_queue *queue; /* Check whether the frontend requested multiple queues * and read the number requested. */ err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues", "%u", &requested_num_queues); if (err < 0) { requested_num_queues = 1; /* Fall back to single queue */ } else if (requested_num_queues > xenvif_max_queues) { /* buggy or malicious guest */ xenbus_dev_fatal(dev, err, "guest requested %u queues, exceeding the maximum of %u.", requested_num_queues, xenvif_max_queues); return; } err = xen_net_read_mac(dev, be->vif->fe_dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); return; } xen_net_read_rate(dev, &credit_bytes, &credit_usec); xen_unregister_watchers(be->vif); xen_register_watchers(dev, be->vif); read_xenbus_vif_flags(be); /* Use the number of queues requested by the frontend */ be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); if (!be->vif->queues) { xenbus_dev_fatal(dev, -ENOMEM, "allocating queues"); return; } be->vif->num_queues = requested_num_queues; be->vif->stalled_queues = requested_num_queues; for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { queue = &be->vif->queues[queue_index]; queue->vif = be->vif; queue->id = queue_index; snprintf(queue->name, sizeof(queue->name), "%s-q%u", be->vif->dev->name, queue->id); err = xenvif_init_queue(queue); if (err) { /* xenvif_init_queue() cleans up after itself on * failure, but we need to clean up any previously * initialised queues. Set num_queues to i so that * earlier queues can be destroyed using the regular * disconnect logic. */ be->vif->num_queues = queue_index; goto err; } queue->credit_bytes = credit_bytes; queue->remaining_credit = credit_bytes; queue->credit_usec = credit_usec; err = connect_rings(be, queue); if (err) { /* connect_rings() cleans up after itself on failure, * but we need to clean up after xenvif_init_queue() here, * and also clean up any previously initialised queues. */ xenvif_deinit_queue(queue); be->vif->num_queues = queue_index; goto err; } } #ifdef CONFIG_DEBUG_FS xenvif_debugfs_addif(be->vif); #endif /* CONFIG_DEBUG_FS */ /* Initialisation completed, tell core driver the number of * active queues. */ rtnl_lock(); netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues); netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues); rtnl_unlock(); xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, hotplug_status_changed, "%s/%s", dev->nodename, "hotplug-status"); if (!err) be->have_hotplug_status_watch = 1; netif_tx_wake_all_queues(be->vif->dev); return; err: if (be->vif->num_queues > 0) xenvif_disconnect(be->vif); /* Clean up existing queues */ vfree(be->vif->queues); be->vif->queues = NULL; be->vif->num_queues = 0; return; }
static int __init intel_fw_logging_init(void) { int length = 0; int err = 0; u32 scu_trace_size; u32 trace_size; oshob_base = get_oshob_addr(); if (oshob_base == NULL) return -EINVAL; /* * Calculate size of SCU extra trace buffer. Size of the buffer * is given by SCU. Make sanity check in case of incorrect data. * We don't want to allocate all of the memory for trace dump. */ trace_size = intel_scu_ipc_get_scu_trace_buffer_size(); if (trace_size > MAX_SCU_EXTRA_DUMP_SIZE) trace_size = 0; /* * Buffer size is in bytes. We will dump 32-bit hex values + header. * Calculate number of lines and assume constant number of * characters per line. */ scu_trace_size = ((trace_size / sizeof(u32)) + 2) * CHAR_PER_LINE_EXTRA_TRACE; /* Allocate buffer for traditional trace and extra trace */ log_buffer = vzalloc(MAX_FULL_SIZE + scu_trace_size); if (!log_buffer) { err = -ENOMEM; goto err_nomem; } length = create_fwerr_log(log_buffer, oshob_base); if (!length) goto err_nolog; length = dump_scu_extented_trace(log_buffer, length, trace_size); #ifdef CONFIG_PROC_FS ipanic_faberr = create_proc_entry("ipanic_fabric_err", S_IFREG | S_IRUGO, NULL); if (ipanic_faberr == 0) { pr_err("Fail creating procfile ipanic_fabric_err\n"); err = -ENOMEM; goto err_procfail; } ipanic_faberr->read_proc = intel_fw_logging_proc_read; ipanic_faberr->write_proc = NULL; ipanic_faberr->size = length; #endif /* CONFIG_PROC_FS */ /* Dump log as error to console */ pr_err("%s", log_buffer); /* Clear fabric error region inside OSHOB if neccessary */ intel_scu_ipc_simple_command(IPCMSG_CLEAR_FABERROR, 0); goto leave; err_procfail: err_nolog: vfree(log_buffer); err_nomem: leave: iounmap(oshob_base); return err; }
static int __devinit hecubafb_probe(struct platform_device *dev) { struct fb_info *info; struct hecuba_board *board; int retval = -ENOMEM; int videomemorysize; unsigned char *videomemory; struct hecubafb_par *par; /* pick up board specific routines */ board = dev->dev.platform_data; if (!board) return -EINVAL; /* try to count device specific driver, if can't, platform recalls */ if (!try_module_get(board->owner)) return -ENODEV; videomemorysize = (DPY_W*DPY_H)/8; videomemory = vzalloc(videomemorysize); if (!videomemory) goto err_videomem_alloc; info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev); if (!info) goto err_fballoc; info->screen_base = (char __force __iomem *)videomemory; info->fbops = &hecubafb_ops; info->var = hecubafb_var; info->fix = hecubafb_fix; info->fix.smem_len = videomemorysize; par = info->par; par->info = info; par->board = board; par->send_command = pollo_send_command; par->send_data = pollo_send_data; info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB; info->fbdefio = &hecubafb_defio; fb_deferred_io_init(info); retval = register_framebuffer(info); if (retval < 0) goto err_fbreg; platform_set_drvdata(dev, info); printk(KERN_INFO "fb%d: Hecuba frame buffer device, using %dK of video memory\n", info->node, videomemorysize >> 10); /* this inits the dpy */ retval = par->board->init(par); if (retval < 0) goto err_fbreg; return 0; err_fbreg: framebuffer_release(info); err_fballoc: vfree(videomemory); err_videomem_alloc: module_put(board->owner); return retval; }
/** * kbase_create_context() - Create a kernel base context. * @kbdev: Kbase device * @is_compat: Force creation of a 32-bit context * * Allocate and init a kernel base context. * * Return: new kbase context */ struct kbase_context * kbase_create_context(struct kbase_device *kbdev, bool is_compat) { struct kbase_context *kctx; int err; KBASE_DEBUG_ASSERT(kbdev != NULL); /* zero-inited as lot of code assume it's zero'ed out on create */ kctx = vzalloc(sizeof(*kctx)); if (!kctx) goto out; /* creating a context is considered a disjoint event */ kbase_disjoint_event(kbdev); kctx->kbdev = kbdev; kctx->as_nr = KBASEP_AS_NR_INVALID; kctx->is_compat = is_compat; #ifdef CONFIG_MALI_TRACE_TIMELINE kctx->timeline.owner_tgid = task_tgid_nr(current); #endif atomic_set(&kctx->setup_complete, 0); atomic_set(&kctx->setup_in_progress, 0); kctx->infinite_cache_active = 0; spin_lock_init(&kctx->mm_update_lock); kctx->process_mm = NULL; atomic_set(&kctx->nonmapped_pages, 0); kctx->slots_pullable = 0; err = kbase_mem_pool_init(&kctx->mem_pool, kbdev->mem_pool_max_size_default, kctx->kbdev, &kbdev->mem_pool); if (err) goto free_kctx; atomic_set(&kctx->used_pages, 0); err = kbase_jd_init(kctx); if (err) goto free_pool; err = kbasep_js_kctx_init(kctx); if (err) goto free_jd; /* safe to call kbasep_js_kctx_term in this case */ err = kbase_event_init(kctx); if (err) goto free_jd; mutex_init(&kctx->reg_lock); INIT_LIST_HEAD(&kctx->waiting_soft_jobs); #ifdef CONFIG_KDS INIT_LIST_HEAD(&kctx->waiting_kds_resource); #endif err = kbase_mmu_init(kctx); if (err) goto free_event; kctx->pgd = kbase_mmu_alloc_pgd(kctx); if (!kctx->pgd) goto free_mmu; kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool); if (!kctx->aliasing_sink_page) goto no_sink_page; kctx->tgid = current->tgid; kctx->pid = current->pid; sprintf(kctx->process_name, "%s", current->group_leader->comm); init_waitqueue_head(&kctx->event_queue); kctx->cookies = KBASE_COOKIE_MASK; /* Make sure page 0 is not used... */ err = kbase_region_tracker_init(kctx); if (err) goto no_region_tracker; #ifdef CONFIG_GPU_TRACEPOINTS atomic_set(&kctx->jctx.work_id, 0); #endif #ifdef CONFIG_MALI_TRACE_TIMELINE atomic_set(&kctx->timeline.jd_atoms_in_flight, 0); #endif kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1; mutex_init(&kctx->vinstr_cli_lock); return kctx; no_region_tracker: kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); no_sink_page: /* VM lock needed for the call to kbase_mmu_free_pgd */ kbase_gpu_vm_lock(kctx); kbase_mmu_free_pgd(kctx); kbase_gpu_vm_unlock(kctx); free_mmu: kbase_mmu_term(kctx); free_event: kbase_event_cleanup(kctx); free_jd: /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ kbasep_js_kctx_term(kctx); kbase_jd_exit(kctx); free_pool: kbase_mem_pool_term(&kctx->mem_pool); free_kctx: vfree(kctx); out: return NULL; }
static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) { int err; u8 tid; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dm.useramask = true; /* dual mac */ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) rtlpriv->phy.current_channel = 36; else rtlpriv->phy.current_channel = 1; if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) { rtlpriv->rtlhal.disable_amsdu_8k = true; /* No long RX - reduce fragmentation */ rtlpci->rxbuffersize = 4096; } rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); rtlpci->receive_config = ( RCR_APPFCS | RCR_AMF | RCR_ADF | RCR_APP_MIC | RCR_APP_ICV | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL ); rtlpci->irq_mask[0] = (u32) ( IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK | IMR_MGNTDOK | IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW ); rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; if (!rtlpriv->psc.inactiveps) pr_info("Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) pr_info("FW Power Save off (module option)\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl92d_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for early mode */ rtlpriv->rtlhal.earlymode_enable = true; for (tid = 0; tid < 8; tid++) skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]); /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw\n"); return 1; } rtlpriv->max_fw_size = 0x8000; pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); /* request fw */ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return 0; }
int reiserfs_resize(struct super_block *s, unsigned long block_count_new) { int err = 0; struct reiserfs_super_block *sb; struct reiserfs_bitmap_info *bitmap; struct reiserfs_bitmap_info *info; struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s); struct buffer_head *bh; struct reiserfs_transaction_handle th; unsigned int bmap_nr_new, bmap_nr; unsigned int block_r_new, block_r; struct reiserfs_list_bitmap *jb; struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS]; unsigned long int block_count, free_blocks; int i; int copy_size; sb = SB_DISK_SUPER_BLOCK(s); if (SB_BLOCK_COUNT(s) >= block_count_new) { printk("can\'t shrink filesystem on-line\n"); return -EINVAL; } /* check the device size */ bh = sb_bread(s, block_count_new - 1); if (!bh) { printk("reiserfs_resize: can\'t read last block\n"); return -EINVAL; } bforget(bh); /* old disk layout detection; those partitions can be mounted, but * cannot be resized */ if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size != REISERFS_DISK_OFFSET_IN_BYTES) { printk ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n"); return -ENOTSUPP; } /* count used bits in last bitmap block */ block_r = SB_BLOCK_COUNT(s) - (reiserfs_bmap_count(s) - 1) * s->s_blocksize * 8; /* count bitmap blocks in new fs */ bmap_nr_new = block_count_new / (s->s_blocksize * 8); block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8; if (block_r_new) bmap_nr_new++; else block_r_new = s->s_blocksize * 8; /* save old values */ block_count = SB_BLOCK_COUNT(s); bmap_nr = reiserfs_bmap_count(s); /* resizing of reiserfs bitmaps (journal and real), if needed */ if (bmap_nr_new > bmap_nr) { /* reallocate journal bitmaps */ if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) { printk ("reiserfs_resize: unable to allocate memory for journal bitmaps\n"); return -ENOMEM; } /* the new journal bitmaps are zero filled, now we copy in the bitmap ** node pointers from the old journal bitmap structs, and then ** transfer the new data structures into the journal struct. ** ** using the copy_size var below allows this code to work for ** both shrinking and expanding the FS. */ copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr; copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *); for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { struct reiserfs_bitmap_node **node_tmp; jb = SB_JOURNAL(s)->j_list_bitmap + i; memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size); /* just in case vfree schedules on us, copy the new ** pointer into the journal struct before freeing the ** old one */ node_tmp = jb->bitmaps; jb->bitmaps = jbitmap[i].bitmaps; vfree(node_tmp); } /* allocate additional bitmap blocks, reallocate array of bitmap * block pointers */ bitmap = vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new); if (!bitmap) { /* Journal bitmaps are still supersized, but the memory isn't * leaked, so I guess it's ok */ printk("reiserfs_resize: unable to allocate memory.\n"); return -ENOMEM; } for (i = 0; i < bmap_nr; i++) bitmap[i] = old_bitmap[i]; /* This doesn't go through the journal, but it doesn't have to. * The changes are still atomic: We're synced up when the journal * transaction begins, and the new bitmaps don't matter if the * transaction fails. */ for (i = bmap_nr; i < bmap_nr_new; i++) { /* don't use read_bitmap_block since it will cache * the uninitialized bitmap */ bh = sb_bread(s, i * s->s_blocksize * 8); if (!bh) { vfree(bitmap); return -EIO; } memset(bh->b_data, 0, sb_blocksize(sb)); reiserfs_set_le_bit(0, bh->b_data); reiserfs_cache_bitmap_metadata(s, bh, bitmap + i); set_buffer_uptodate(bh); mark_buffer_dirty(bh); reiserfs_write_unlock(s); sync_dirty_buffer(bh); reiserfs_write_lock(s); // update bitmap_info stuff bitmap[i].free_count = sb_blocksize(sb) * 8 - 1; brelse(bh); } /* free old bitmap blocks array */ SB_AP_BITMAP(s) = bitmap; vfree(old_bitmap); } /* begin transaction, if there was an error, it's fine. Yes, we have * incorrect bitmaps now, but none of it is ever going to touch the * disk anyway. */ err = journal_begin(&th, s, 10); if (err) return err; /* Extend old last bitmap block - new blocks have been made available */ info = SB_AP_BITMAP(s) + bmap_nr - 1; bh = reiserfs_read_bitmap_block(s, bmap_nr - 1); if (!bh) { int jerr = journal_end(&th, s, 10); if (jerr) return jerr; return -EIO; } reiserfs_prepare_for_journal(s, bh, 1); for (i = block_r; i < s->s_blocksize * 8; i++) reiserfs_clear_le_bit(i, bh->b_data); info->free_count += s->s_blocksize * 8 - block_r; journal_mark_dirty(&th, s, bh); brelse(bh); /* Correct new last bitmap block - It may not be full */ info = SB_AP_BITMAP(s) + bmap_nr_new - 1; bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1); if (!bh) { int jerr = journal_end(&th, s, 10); if (jerr) return jerr; return -EIO; } reiserfs_prepare_for_journal(s, bh, 1); for (i = block_r_new; i < s->s_blocksize * 8; i++) reiserfs_set_le_bit(i, bh->b_data); journal_mark_dirty(&th, s, bh); brelse(bh); info->free_count -= s->s_blocksize * 8 - block_r_new; /* update super */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); free_blocks = SB_FREE_BLOCKS(s); PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr))); PUT_SB_BLOCK_COUNT(s, block_count_new); PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new); journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); SB_JOURNAL(s)->j_must_wait = 1; return journal_end(&th, s, 10); }
static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int err = 0; u16 earlyrxthreshold = 7; char *fw_name = "rtlwifi/rtl8192sefw.bin"; rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dm.useramask = true; /* compatible 5G band 91se just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->transmit_config = 0; rtlpci->receive_config = RCR_APPFCS | RCR_APWRMGT | /*RCR_ADD3 |*/ RCR_AMF | RCR_ADF | RCR_APP_MIC | RCR_APP_ICV | RCR_AICV | /* Accept ICV error, CRC32 Error */ RCR_ACRC32 | RCR_AB | /* Accept Broadcast, Multicast */ RCR_AM | /* Accept Physical match */ RCR_APM | /* Accept Destination Address packets */ /*RCR_AAP |*/ RCR_APP_PHYST_STAFF | /* Accept PHY status */ RCR_APP_PHYST_RXFF | (earlyrxthreshold << RCR_FIFO_OFFSET); rtlpci->irq_mask[0] = (u32) (IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK | IMR_HCCADOK | IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK | IMR_BDOK | IMR_RXCMDOK | /*IMR_TIMEOUT0 |*/ IMR_RDU | IMR_RXFOVW | IMR_BCNINT /*| IMR_TXFOVW*/ /*| IMR_TBDOK | IMR_TBDER*/); rtlpci->irq_mask[1] = (u32) 0; rtlpci->shortretry_limit = 0x30; rtlpci->longretry_limit = 0x30; rtlpci->first_init = true; /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; if (!rtlpriv->psc.inactiveps) pr_info("Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) pr_info("FW Power Save off (module option)\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl92s_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(sizeof(struct rt_firmware)); if (!rtlpriv->rtlhal.pfirmware) return 1; rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + sizeof(struct fw_hdr); pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" "Loading firmware %s\n", fw_name); /* request fw */ err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl92se_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); return 1; } return err; }
static int __devinit mx3_camera_probe(struct platform_device *pdev) { struct mx3_camera_dev *mx3_cam; struct resource *res; void __iomem *base; int err = 0; struct soc_camera_host *soc_host; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENODEV; goto egetres; } mx3_cam = vzalloc(sizeof(*mx3_cam)); if (!mx3_cam) { dev_err(&pdev->dev, "Could not allocate mx3 camera object\n"); err = -ENOMEM; goto ealloc; } mx3_cam->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(mx3_cam->clk)) { err = PTR_ERR(mx3_cam->clk); goto eclkget; } mx3_cam->pdata = pdev->dev.platform_data; mx3_cam->platform_flags = mx3_cam->pdata->flags; if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15))) { /* * Platform hasn't set available data widths. This is bad. * Warn and use a default. */ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " "data widths, using default 8 bit\n"); mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8; } mx3_cam->mclk = mx3_cam->pdata->mclk_10khz * 10000; if (!mx3_cam->mclk) { dev_warn(&pdev->dev, "mclk_10khz == 0! Please, fix your platform data. " "Using default 20MHz\n"); mx3_cam->mclk = 20000000; } /* list of video-buffers */ INIT_LIST_HEAD(&mx3_cam->capture); spin_lock_init(&mx3_cam->lock); base = ioremap(res->start, resource_size(res)); if (!base) { pr_err("Couldn't map %x@%x\n", resource_size(res), res->start); err = -ENOMEM; goto eioremap; } mx3_cam->base = base; soc_host = &mx3_cam->soc_host; soc_host->drv_name = MX3_CAM_DRV_NAME; soc_host->ops = &mx3_soc_camera_host_ops; soc_host->priv = mx3_cam; soc_host->v4l2_dev.dev = &pdev->dev; soc_host->nr = pdev->id; mx3_cam->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(mx3_cam->alloc_ctx)) { err = PTR_ERR(mx3_cam->alloc_ctx); goto eallocctx; } err = soc_camera_host_register(soc_host); if (err) goto ecamhostreg; /* IDMAC interface */ dmaengine_get(); return 0; ecamhostreg: vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx); eallocctx: iounmap(base); eioremap: clk_put(mx3_cam->clk); eclkget: vfree(mx3_cam); ealloc: egetres: return err; }
static int agp_backend_initialize(struct agp_bridge_data *bridge) { int size_value, rc, got_gatt=0, got_keylist=0; bridge->max_memory_agp = agp_find_max(); bridge->version = &agp_current_version; if (bridge->driver->needs_scratch_page) { struct page *page = bridge->driver->agp_alloc_page(bridge); if (!page) { dev_err(&bridge->dev->dev, "can't get memory for scratch page\n"); return -ENOMEM; } bridge->scratch_page_page = page; bridge->scratch_page_dma = page_to_phys(page); bridge->scratch_page = bridge->driver->mask_memory(bridge, bridge->scratch_page_dma, 0); } size_value = bridge->driver->fetch_size(); if (size_value == 0) { dev_err(&bridge->dev->dev, "can't determine aperture size\n"); rc = -EINVAL; goto err_out; } if (bridge->driver->create_gatt_table(bridge)) { dev_err(&bridge->dev->dev, "can't get memory for graphics translation table\n"); rc = -ENOMEM; goto err_out; } got_gatt = 1; bridge->key_list = vzalloc(PAGE_SIZE * 4); if (bridge->key_list == NULL) { dev_err(&bridge->dev->dev, "can't allocate memory for key lists\n"); rc = -ENOMEM; goto err_out; } got_keylist = 1; /* FIXME vmalloc'd memory not guaranteed contiguous */ if (bridge->driver->configure()) { dev_err(&bridge->dev->dev, "error configuring host chipset\n"); rc = -EINVAL; goto err_out; } INIT_LIST_HEAD(&bridge->mapped_list); spin_lock_init(&bridge->mapped_lock); return 0; err_out: if (bridge->driver->needs_scratch_page) { struct page *page = bridge->scratch_page_page; bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } if (got_gatt) bridge->driver->free_gatt_table(bridge); if (got_keylist) { vfree(bridge->key_list); bridge->key_list = NULL; } return rc; }
static int __init mpq_dmx_tspp_plugin_init(void) { int i; int j; int ret; MPQ_DVB_DBG_PRINT("%s executed\n", __func__); for (i = 0; i < TSIF_COUNT; i++) { mpq_dmx_tspp_info.tsif[i].buffer_count = TSPP_BUFFER_COUNT(tspp_out_buffer_size); if (mpq_dmx_tspp_info.tsif[i].buffer_count > MAX_BAM_DESCRIPTOR_COUNT) mpq_dmx_tspp_info.tsif[i].buffer_count = MAX_BAM_DESCRIPTOR_COUNT; mpq_dmx_tspp_info.tsif[i].aggregate_ids = vzalloc(mpq_dmx_tspp_info.tsif[i].buffer_count * sizeof(int)); if (NULL == mpq_dmx_tspp_info.tsif[i].aggregate_ids) { MPQ_DVB_ERR_PRINT( "%s: Failed to allocate memory for buffer descriptors aggregation\n", __func__); for (j = 0; j < i; j++) { kthread_stop(mpq_dmx_tspp_info.tsif[j].thread); vfree(mpq_dmx_tspp_info.tsif[j].aggregate_ids); mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex); } return -ENOMEM; } mpq_dmx_tspp_info.tsif[i].channel_ref = 0; mpq_dmx_tspp_info.tsif[i].buff_index = 0; mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL; mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL; mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0; atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0); for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) { mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1; mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0; mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1; } for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++) mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0; mpq_dmx_tspp_info.tsif[i].current_filter_count = 0; mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0; mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0; mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0; snprintf(mpq_dmx_tspp_info.tsif[i].name, TSIF_NAME_LENGTH, "dmx_tsif%d", i); init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue); mpq_dmx_tspp_info.tsif[i].thread = kthread_run( mpq_dmx_tspp_thread, (void *)i, mpq_dmx_tspp_info.tsif[i].name); if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) { vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids); for (j = 0; j < i; j++) { kthread_stop(mpq_dmx_tspp_info.tsif[j].thread); vfree(mpq_dmx_tspp_info.tsif[j].aggregate_ids); mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex); } MPQ_DVB_ERR_PRINT( "%s: kthread_run failed\n", __func__); return -ENOMEM; } mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex); } ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init); if (ret < 0) { MPQ_DVB_ERR_PRINT( "%s: mpq_dmx_plugin_init failed (errno=%d)\n", __func__, ret); for (i = 0; i < TSIF_COUNT; i++) { kthread_stop(mpq_dmx_tspp_info.tsif[i].thread); vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids); mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex); } } return ret; }
static int dhd_cfgvendor_priv_string_handler(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { const struct bcm_nlmsg_hdr *nlioc = data; struct net_device *ndev = NULL; struct bcm_cfg80211 *cfg; struct sk_buff *reply; void *buf = NULL, *cur; dhd_pub_t *dhd; dhd_ioctl_t ioc = { 0 }; int ret = 0, ret_len, payload, msglen; int maxmsglen = PAGE_SIZE - 0x100; int8 index; WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); DHD_ERROR(("entry: cmd = %d\n", nlioc->cmd)); cfg = wiphy_priv(wiphy); dhd = cfg->pub; DHD_OS_WAKE_LOCK(dhd); /* send to dongle only if we are not waiting for reload already */ if (dhd->hang_was_sent) { WL_ERR(("HANG was sent up earlier\n")); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); DHD_OS_WAKE_UNLOCK(dhd); return OSL_ERROR(BCME_DONGLE_DOWN); } len -= sizeof(struct bcm_nlmsg_hdr); ret_len = nlioc->len; if (ret_len > 0 || len > 0) { if (len > DHD_IOCTL_MAXLEN) { WL_ERR(("oversize input buffer %d\n", len)); len = DHD_IOCTL_MAXLEN; } if (ret_len > DHD_IOCTL_MAXLEN) { WL_ERR(("oversize return buffer %d\n", ret_len)); ret_len = DHD_IOCTL_MAXLEN; } payload = max(ret_len, len) + 1; buf = vzalloc(payload); if (!buf) { DHD_OS_WAKE_UNLOCK(dhd); return -ENOMEM; } memcpy(buf, (void *)nlioc + nlioc->offset, len); *(char *)(buf + len) = '\0'; } ndev = wdev_to_wlc_ndev(wdev, cfg); index = dhd_net2idx(dhd->info, ndev); if (index == DHD_BAD_IF) { WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); ret = BCME_ERROR; goto done; } ioc.cmd = nlioc->cmd; ioc.len = nlioc->len; ioc.set = nlioc->set; ioc.driver = nlioc->magic; ret = dhd_ioctl_process(dhd, index, &ioc, buf); if (ret) { WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); ret = OSL_ERROR(ret); goto done; } cur = buf; while (ret_len > 0) { msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len; ret_len -= msglen; payload = msglen + sizeof(msglen); reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); if (!reply) { WL_ERR(("Failed to allocate reply msg\n")); ret = -ENOMEM; break; } if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) || nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) { kfree_skb(reply); ret = -ENOBUFS; break; } ret = cfg80211_vendor_cmd_reply(reply); if (ret) { WL_ERR(("testmode reply failed:%d\n", ret)); break; } cur += msglen; } done: vfree(buf); DHD_OS_WAKE_UNLOCK(dhd); return ret; }
static int __init ldm_init(void) { printk(KERN_ALERT "%s\n", __FUNCTION__); int ret = 0; //1 创建块设备gendisk的主对象,函数参数是最大分区数 ldm.disk = alloc_disk(10); if (!ldm.disk) { ret = -ENOMEM; printk(KERN_ERR "alloc_disk failed\n"); goto err_alloc_disk; } //2 注册申请块设备的主设备号,可以从Documentation/devices.txt中寻找空闲的设备号,也可以在第一个参数处填入0,让系统自动分配。注册成功后可在/proc/devices中查询到注册信息 if (register_blkdev(12, DEVNAME) < 0) { ret = register_blkdev(0, DEVNAME); if (ret < 0) { printk(KERN_ERR "register_blkdev failed\n"); goto err_register_blkdev; } else { ldm.disk->major = ret; } } else { ldm.disk->major = 12; } printk("blkdev major num = %d\n", ldm.disk->major); //3 填充gendisk成员 ldm.disk->fops = &ldm.fops; //包含fdisk分区所需的操作方法 sprintf(ldm.disk->disk_name, DEVNAME); //填充gendisk中描述存储容量的对象成员,注意单位是扇区数(扇区大小一般是512字节) set_capacity(ldm.disk, DEV_SIZE / SECTOR_SIZE); //4 创建块设备的存储空间 //分配设备的存储空间,我们用内存来虚拟设备。vmalloc适合分配大块空间,分配的空间取自内存碎片的拼接,在物理地址上非连续,在虚拟地址上连续,比较节省内存空间 ldm.addr = vzalloc(DEV_SIZE); if (!ldm.addr) { printk(KERN_ERR "vzalloc failed\n"); ret = -ENOMEM; goto err_vmalloc; } //5 注册块设备所需的操作队列,提供框架所需的自旋锁对象 spin_lock_init(&ldm.lock); ldm.disk->queue = blk_init_queue(do_ldm_req, &ldm.lock); if (!ldm.disk->queue) { printk(KERN_ERR "blk_init_queue failed\n"); ret = -ENOMEM; goto err_blk_init_queue; } //6 注册块设备对象 add_disk(ldm.disk); return 0; //blk_cleanup_queue(ldm.disk->queue); err_blk_init_queue: vfree(ldm.addr); err_vmalloc: unregister_blkdev(ldm.disk->major, DEVNAME); err_register_blkdev: del_gendisk(ldm.disk); err_alloc_disk: return ret; }
static int __devinit st7735fb_probe (struct spi_device *spi) { int chip = spi_get_device_id(spi)->driver_data; struct st7735fb_platform_data *pdata = spi->dev.platform_data; int vmem_size = WIDTH*HEIGHT*BPP/8; u8 *vmem; struct fb_info *info; struct st7735fb_par *par; int retval = -ENOMEM; if (chip != ST7735_DISPLAY_AF_TFT18) { pr_err("%s: only the %s device is supported\n", DRVNAME, to_spi_driver(spi->dev.driver)->id_table->name); return -EINVAL; } if (!pdata) { pr_err("%s: platform data required for rst and dc info\n", DRVNAME); return -EINVAL; } vmem = vzalloc(vmem_size); if (!vmem) return retval; info = framebuffer_alloc(sizeof(struct st7735fb_par), &spi->dev); if (!info) goto fballoc_fail; info->screen_base = (u8 __force __iomem *)vmem; info->fbops = &st7735fb_ops; info->fix = st7735fb_fix; info->fix.smem_len = vmem_size; info->var = st7735fb_var; /* Choose any packed pixel format as long as it's RGB565 */ info->var.red.offset = 11; info->var.red.length = 5; info->var.green.offset = 5; info->var.green.length = 6; info->var.blue.offset = 0; info->var.blue.length = 5; info->var.transp.offset = 0; info->var.transp.length = 0; info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB; info->fbdefio = &st7735fb_defio; fb_deferred_io_init(info); par = info->par; par->info = info; par->spi = spi; par->rst = pdata->rst_gpio; par->dc = pdata->dc_gpio; #ifdef __LITTLE_ENDIAN /* Allocate swapped shadow buffer */ vmem = vzalloc(vmem_size); if (!vmem) return retval; par->ssbuf = vmem; #endif retval = register_framebuffer(info); if (retval < 0) goto fbreg_fail; spi_set_drvdata(spi, info); retval = st7735fb_init_display(par); if (retval < 0) goto init_fail; printk(KERN_INFO "fb%d: %s frame buffer device,\n\tusing %d KiB of video memory\n", info->node, info->fix.id, vmem_size); return 0; /* TODO: release gpios on fail */ init_fail: spi_set_drvdata(spi, NULL); fbreg_fail: framebuffer_release(info); fballoc_fail: vfree(vmem); return retval; }
void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) { u8 idx, *dump_data, *fw_dump_ptr; u32 dump_len; dump_len = (strlen("========Start dump driverinfo========\n") + adapter->drv_info_size + strlen("\n========End dump========\n")); for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { dump_len += (strlen("========Start dump ") + strlen(entry->mem_name) + strlen("========\n") + (entry->mem_size + 1) + strlen("\n========End dump========\n")); } } dump_data = vzalloc(dump_len + 1); if (!dump_data) goto done; fw_dump_ptr = dump_data; /* Dump all the memory data into single file, a userspace script will * be used to split all the memory data to multiple files */ mwifiex_dbg(adapter, MSG, "== mwifiex dump information to /sys/class/devcoredump start"); strcpy(fw_dump_ptr, "========Start dump driverinfo========\n"); fw_dump_ptr += strlen("========Start dump driverinfo========\n"); memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size); fw_dump_ptr += adapter->drv_info_size; strcpy(fw_dump_ptr, "\n========End dump========\n"); fw_dump_ptr += strlen("\n========End dump========\n"); for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { strcpy(fw_dump_ptr, "========Start dump "); fw_dump_ptr += strlen("========Start dump "); strcpy(fw_dump_ptr, entry->mem_name); fw_dump_ptr += strlen(entry->mem_name); strcpy(fw_dump_ptr, "========\n"); fw_dump_ptr += strlen("========\n"); memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size); fw_dump_ptr += entry->mem_size; strcpy(fw_dump_ptr, "\n========End dump========\n"); fw_dump_ptr += strlen("\n========End dump========\n"); } } /* device dump data will be free in device coredump release function * after 5 min */ dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL); mwifiex_dbg(adapter, MSG, "== mwifiex dump information to /sys/class/devcoredump end"); done: for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { vfree(entry->mem_ptr); entry->mem_ptr = NULL; } entry->mem_size = 0; } if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); adapter->drv_info_dump = NULL; adapter->drv_info_size = 0; } }