static int netvsc_init_buf(struct hv_device *device) { int ret = 0; struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; int node; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = hv_get_drvdata(device); node = cpu_to_node(device->channel->target_cpu); net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); if (!net_device->recv_buf) net_device->recv_buf = vzalloc(net_device->recv_buf_size); if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); ret = -ENOMEM; goto cleanup; } /* * Establish the gpadl handle for this buffer on this * channel. Note: This call uses the vmbus connection rather * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, net_device->recv_buf_size, &net_device->recv_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, "unable to establish receive buffer's gpadl\n"); goto cleanup; } /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; init_packet->msg.v1_msg.send_recv_buf. gpadl_handle = net_device->recv_buf_gpadl_handle; init_packet->msg.v1_msg. send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { netdev_err(ndev, "unable to send receive buffer's gpadl to netvsp\n"); goto cleanup; } wait_for_completion(&net_device->channel_init_wait); /* Check the response */ if (init_packet->msg.v1_msg. send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "Unable to complete receive buffer " "initialization with NetVsp - status %d\n", init_packet->msg.v1_msg. send_recv_buf_complete.status); ret = -EINVAL; goto cleanup; } /* Parse the response */ net_device->recv_section_cnt = init_packet->msg. v1_msg.send_recv_buf_complete.num_sections; net_device->recv_section = kmemdup( init_packet->msg.v1_msg.send_recv_buf_complete.sections, net_device->recv_section_cnt * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL); if (net_device->recv_section == NULL) { ret = -EINVAL; goto cleanup; } /* * For 1st release, there should only be 1 section that represents the * entire receive buffer */ if (net_device->recv_section_cnt != 1 || net_device->recv_section->offset != 0) { ret = -EINVAL; goto cleanup; } /* Now setup the send buffer. */ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); if (!net_device->send_buf) net_device->send_buf = vzalloc(net_device->send_buf_size); if (!net_device->send_buf) { netdev_err(ndev, "unable to allocate send " "buffer of size %d\n", net_device->send_buf_size); ret = -ENOMEM; goto cleanup; } /* Establish the gpadl handle for this buffer on this * channel. Note: This call uses the vmbus connection rather * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, net_device->send_buf_size, &net_device->send_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, "unable to establish send buffer's gpadl\n"); goto cleanup; } /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; init_packet->msg.v1_msg.send_send_buf.gpadl_handle = net_device->send_buf_gpadl_handle; init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { netdev_err(ndev, "unable to send send buffer's gpadl to netvsp\n"); goto cleanup; } wait_for_completion(&net_device->channel_init_wait); /* Check the response */ if (init_packet->msg.v1_msg. send_send_buf_complete.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "Unable to complete send buffer " "initialization with NetVsp - status %d\n", init_packet->msg.v1_msg. send_send_buf_complete.status); ret = -EINVAL; goto cleanup; } /* Parse the response */ net_device->send_section_size = init_packet->msg. v1_msg.send_send_buf_complete.section_size; /* Section count is simply the size divided by the section size. */ net_device->send_section_cnt = net_device->send_buf_size/net_device->send_section_size; dev_info(&device->device, "Send section size: %d, Section count:%d\n", net_device->send_section_size, net_device->send_section_cnt); /* Setup state for managing the send buffer. */ net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); net_device->send_section_map = kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); if (net_device->send_section_map == NULL) { ret = -ENOMEM; goto cleanup; } goto exit; cleanup: netvsc_destroy_buf(device); exit: return ret; }
static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps) { struct pinctrl_map *new_map; char *group = NULL; unsigned new_num = 1; unsigned long config = 0; unsigned long *pconfig; int length = strlen(np->name) + SUFFIX_LEN; bool purecfg = false; u32 val, reg; int ret, i = 0; /* Check for pin config node which has no 'reg' property */ if (of_property_read_u32(np, "reg", ®)) purecfg = true; ret = of_property_read_u32(np, "fsl,drive-strength", &val); if (!ret) config = val | MA_PRESENT; ret = of_property_read_u32(np, "fsl,voltage", &val); if (!ret) config |= val << VOL_SHIFT | VOL_PRESENT; ret = of_property_read_u32(np, "fsl,pull-up", &val); if (!ret) config |= val << PULL_SHIFT | PULL_PRESENT; /* Check for group node which has both mux and config settings */ if (!purecfg && config) new_num = 2; new_map = kzalloc(sizeof(*new_map) * new_num, GFP_KERNEL); if (!new_map) return -ENOMEM; if (!purecfg) { new_map[i].type = PIN_MAP_TYPE_MUX_GROUP; new_map[i].data.mux.function = np->name; /* Compose group name */ group = kzalloc(length, GFP_KERNEL); if (!group) { ret = -ENOMEM; goto free; } snprintf(group, length, "%s.%d", np->name, reg); new_map[i].data.mux.group = group; i++; } if (config) { pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); if (!pconfig) { ret = -ENOMEM; goto free_group; } new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; new_map[i].data.configs.group_or_pin = purecfg ? np->name : group; new_map[i].data.configs.configs = pconfig; new_map[i].data.configs.num_configs = 1; } *map = new_map; *num_maps = new_num; return 0; free_group: if (!purecfg) free(group); free: kfree(new_map); return ret; }
static int fill_stream_formats(struct snd_oxfw *oxfw, enum avc_general_plug_dir dir, unsigned short pid) { u8 *buf, **formats; unsigned int len, eid = 0; struct snd_oxfw_stream_formation dummy; int err; buf = kmalloc(AVC_GENERIC_FRAME_MAXIMUM_BYTES, GFP_KERNEL); if (buf == NULL) return -ENOMEM; if (dir == AVC_GENERAL_PLUG_DIR_OUT) formats = oxfw->tx_stream_formats; else formats = oxfw->rx_stream_formats; /* get first entry */ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES; err = avc_stream_get_format_list(oxfw->unit, dir, 0, buf, &len, 0); if (err == -ENOSYS) { /* LIST subfunction is not implemented */ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES; err = assume_stream_formats(oxfw, dir, pid, buf, &len, formats); goto end; } else if (err < 0) { dev_err(&oxfw->unit->device, "fail to get stream format %d for isoc %s plug %d:%d\n", eid, (dir == AVC_GENERAL_PLUG_DIR_IN) ? "in" : "out", pid, err); goto end; } /* LIST subfunction is implemented */ while (eid < SND_OXFW_STREAM_FORMAT_ENTRIES) { /* The format is too short. */ if (len < 3) { err = -EIO; break; } /* parse and set stream format */ err = snd_oxfw_stream_parse_format(buf, &dummy); if (err < 0) break; formats[eid] = kmemdup(buf, len, GFP_KERNEL); if (formats[eid] == NULL) { err = -ENOMEM; break; } /* get next entry */ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES; err = avc_stream_get_format_list(oxfw->unit, dir, 0, buf, &len, ++eid); /* No entries remained. */ if (err == -EINVAL) { err = 0; break; } else if (err < 0) { dev_err(&oxfw->unit->device, "fail to get stream format %d for isoc %s plug %d:%d\n", eid, (dir == AVC_GENERAL_PLUG_DIR_IN) ? "in" : "out", pid, err); break; } } end: kfree(buf); return err; }
int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) { struct p54_common *priv = dev->priv; struct eeprom_pda_wrap *wrap; struct pda_entry *entry; unsigned int data_len, entry_len; void *tmp; int err; u8 *end = (u8 *)eeprom + len; u16 synth = 0; u16 crc16 = ~0; wrap = (struct eeprom_pda_wrap *) eeprom; entry = (void *)wrap->data + le16_to_cpu(wrap->len); /* verify that at least the entry length/code fits */ while ((u8 *)entry <= end - sizeof(*entry)) { entry_len = le16_to_cpu(entry->len); data_len = ((entry_len - 1) << 1); /* abort if entry exceeds whole structure */ if ((u8 *)entry + sizeof(*entry) + data_len > end) break; switch (le16_to_cpu(entry->code)) { case PDR_MAC_ADDRESS: if (data_len != ETH_ALEN) break; SET_IEEE80211_PERM_ADDR(dev, entry->data); break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: if (priv->output_limit) break; err = p54_convert_output_limits(dev, entry->data, data_len); if (err) goto err; break; case PDR_PRISM_PA_CAL_CURVE_DATA: { struct pda_pa_curve_data *curve_data = (struct pda_pa_curve_data *)entry->data; if (data_len < sizeof(*curve_data)) { err = -EINVAL; goto err; } switch (curve_data->cal_method_rev) { case 0: err = p54_convert_rev0(dev, curve_data); break; case 1: err = p54_convert_rev1(dev, curve_data); break; default: wiphy_err(dev->wiphy, "unknown curve data revision %d\n", curve_data->cal_method_rev); err = -ENODEV; break; } if (err) goto err; } break; case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: priv->iq_autocal = kmemdup(entry->data, data_len, GFP_KERNEL); if (!priv->iq_autocal) { err = -ENOMEM; goto err; } priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); break; case PDR_DEFAULT_COUNTRY: p54_parse_default_country(dev, entry->data, data_len); break; case PDR_INTERFACE_LIST: tmp = entry->data; while ((u8 *)tmp < entry->data + data_len) { struct exp_if *exp_if = tmp; if (exp_if->if_id == cpu_to_le16(IF_ID_ISL39000)) synth = le16_to_cpu(exp_if->variant); tmp += sizeof(*exp_if); } break; case PDR_HARDWARE_PLATFORM_COMPONENT_ID: if (data_len < 2) break; priv->version = *(u8 *)(entry->data + 1); break; case PDR_RSSI_LINEAR_APPROXIMATION: case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: p54_parse_rssical(dev, entry->data, data_len, le16_to_cpu(entry->code)); break; case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: { __le16 *src = (void *) entry->data; s16 *dst = (void *) &priv->rssical_db; int i; if (data_len != sizeof(priv->rssical_db)) { err = -EINVAL; goto err; } for (i = 0; i < sizeof(priv->rssical_db) / sizeof(*src); i++) *(dst++) = (s16) le16_to_cpu(*(src++)); } break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { struct pda_custom_wrapper *pda = (void *) entry->data; if (priv->output_limit || data_len < sizeof(*pda)) break; priv->output_limit = p54_convert_db(pda, data_len); } break; case PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM: { struct pda_custom_wrapper *pda = (void *) entry->data; if (priv->curve_data || data_len < sizeof(*pda)) break; priv->curve_data = p54_convert_db(pda, data_len); } break; case PDR_END: crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry)); if (crc16 != le16_to_cpup((__le16 *)entry->data)) { wiphy_err(dev->wiphy, "eeprom failed checksum " "test!\n"); err = -ENOMSG; goto err; } else { goto good_eeprom; } break; default: break; } crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2); entry = (void *)entry + (entry_len + 1) * 2; } wiphy_err(dev->wiphy, "unexpected end of eeprom data.\n"); err = -ENODATA; goto err; good_eeprom: if (!synth || !priv->iq_autocal || !priv->output_limit || !priv->curve_data) { wiphy_err(dev->wiphy, "not all required entries found in eeprom!\n"); err = -EINVAL; goto err; } err = p54_generate_channel_lists(dev); if (err) goto err; priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) p54_init_xbow_synth(priv); if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) dev->wiphy->bands[IEEE80211_BAND_2GHZ] = priv->band_table[IEEE80211_BAND_2GHZ]; if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) dev->wiphy->bands[IEEE80211_BAND_5GHZ] = priv->band_table[IEEE80211_BAND_5GHZ]; if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED) priv->rx_diversity_mask = 3; if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED) priv->tx_diversity_mask = 3; if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { u8 perm_addr[ETH_ALEN]; wiphy_warn(dev->wiphy, "Invalid hwaddr! Using randomly generated MAC addr\n"); random_ether_addr(perm_addr); SET_IEEE80211_PERM_ADDR(dev, perm_addr); } wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", dev->wiphy->perm_addr, priv->version, p54_rf_chips[priv->rxhw]); return 0; err: kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; wiphy_err(dev->wiphy, "eeprom parse failed!\n"); return err; }
int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; enum ieee80211_band band; int i, err; ASSERT_WDEV_LOCK(wdev); if (!wdev->wext.ibss.beacon_interval) wdev->wext.ibss.beacon_interval = 100; /* try to find an IBSS channel if none requested ... */ if (!wdev->wext.ibss.channel) { for (band = 0; band < IEEE80211_NUM_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; sband = rdev->wiphy.bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; if (chan->flags & IEEE80211_CHAN_NO_IBSS) continue; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; wdev->wext.ibss.channel = chan; break; } if (wdev->wext.ibss.channel) break; } if (!wdev->wext.ibss.channel) return -EINVAL; } /* don't join -- SSID is not there */ if (!wdev->wext.ibss.ssid_len) return 0; if (!netif_running(wdev->netdev)) return 0; if (wdev->wext.keys) wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.ibss.privacy = wdev->wext.default_key != -1; if (wdev->wext.keys) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 6; i++) ck->params[i].key = ck->data[i]; } err = __cfg80211_join_ibss(rdev, wdev->netdev, &wdev->wext.ibss, ck); if (err) kfree(ck); return err; }
static int __devinit dsps_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; const struct dsps_musb_wrapper *wrp; struct dsps_glue *glue; struct resource *iomem; int ret, i; match = of_match_node(musb_dsps_of_match, np); if (!match) { dev_err(&pdev->dev, "fail to get matching of_match struct\n"); ret = -EINVAL; goto err0; } wrp = match->data; /* allocate glue */ glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "unable to allocate glue memory\n"); ret = -ENOMEM; goto err0; } /* get memory resource */ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { dev_err(&pdev->dev, "failed to get usbss mem resourse\n"); ret = -ENODEV; goto err1; } glue->dev = &pdev->dev; glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL); if (!glue->wrp) { dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n"); ret = -ENOMEM; goto err1; } platform_set_drvdata(pdev, glue); /* enable the usbss clocks */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); goto err2; } /* create the child platform device for all instances of musb */ for (i = 0; i < wrp->instances ; i++) { ret = dsps_create_musb_pdev(glue, i); if (ret != 0) { dev_err(&pdev->dev, "failed to create child pdev\n"); /* release resources of previously created instances */ for (i--; i >= 0 ; i--) dsps_delete_musb_pdev(glue, i); goto err3; } } return 0; err3: pm_runtime_put(&pdev->dev); err2: pm_runtime_disable(&pdev->dev); kfree(glue->wrp); err1: kfree(glue); err0: return ret; }
static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_IPT_MAX + 1]; struct tcf_ipt *ipt; struct tcf_common *pc; struct ipt_entry_target *td, *t; char *tname; int ret = 0, err; u32 hook = 0; u32 index = 0; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy); if (err < 0) return err; if (tb[TCA_IPT_HOOK] == NULL) return -EINVAL; if (tb[TCA_IPT_TARG] == NULL) return -EINVAL; td = (struct ipt_entry_target *)nla_data(tb[TCA_IPT_TARG]); if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) return -EINVAL; if (tb[TCA_IPT_INDEX] != NULL) index = nla_get_u32(tb[TCA_IPT_INDEX]); pc = tcf_hash_check(index, a, bind, &ipt_hash_info); if (!pc) { pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, &ipt_idx_gen, &ipt_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); ret = ACT_P_CREATED; } else { if (!ovr) { tcf_ipt_release(to_ipt(pc), bind); return -EEXIST; } } ipt = to_ipt(pc); hook = nla_get_u32(tb[TCA_IPT_HOOK]); err = -ENOMEM; tname = kmalloc(IFNAMSIZ, GFP_KERNEL); if (unlikely(!tname)) goto err1; if (tb[TCA_IPT_TABLE] == NULL || nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ) strcpy(tname, "mangle"); t = kmemdup(td, td->u.target_size, GFP_KERNEL); if (unlikely(!t)) goto err2; if ((err = ipt_init_target(t, tname, hook)) < 0) goto err3; spin_lock_bh(&ipt->tcf_lock); if (ret != ACT_P_CREATED) { ipt_destroy_target(ipt->tcfi_t); kfree(ipt->tcfi_tname); kfree(ipt->tcfi_t); } ipt->tcfi_tname = tname; ipt->tcfi_t = t; ipt->tcfi_hook = hook; spin_unlock_bh(&ipt->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &ipt_hash_info); return ret; err3: kfree(t); err2: kfree(tname); err1: kfree(pc); return err; }
int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); int result; enum ieee80211_band band; int channels, max_bitrates; bool supp_ht; static const u32 cipher_suites[] = { /* keep WEP first, it may be removed below */ WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, /* keep last -- depends on hw flags! */ WLAN_CIPHER_SUITE_AES_CMAC }; if (hw->max_report_rates == 0) hw->max_report_rates = hw->max_rates; /* * generic code guarantees at least one band, * set this very early because much code assumes * that hw.conf.channel is assigned */ channels = 0; max_bitrates = 0; supp_ht = false; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; if (!local->oper_channel) { /* init channel we're on */ local->hw.conf.channel = local->oper_channel = &sband->channels[0]; local->hw.conf.channel_type = NL80211_CHAN_NO_HT; } channels += sband->n_channels; /* * Since ieee80211_disable_40mhz_24ghz is global, we can * modify the sband's ht data even if the driver uses a * global structure for that. */ if (ieee80211_disable_40mhz_24ghz && band == IEEE80211_BAND_2GHZ && sband->ht_cap.ht_supported) { sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; } if (max_bitrates < sband->n_bitrates) max_bitrates = sband->n_bitrates; supp_ht = supp_ht || sband->ht_cap.ht_supported; } local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + sizeof(void *) * channels, GFP_KERNEL); if (!local->int_scan_req) return -ENOMEM; /* if low-level driver supports AP, we also support VLAN */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); /* mac80211 always supports monitor */ local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); #ifndef CONFIG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); #endif /* mac80211 supports control port protocol changing */ local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK), "U-APSD not supported with HW_PS_NULLFUNC_STACK\n"); /* * Calculate scan IE length -- we need this to alloc * memory and to subtract from the driver limit. It * includes the DS Params, (extended) supported rates, and HT * information -- SSID is the driver's responsibility. */ local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + 3 /* DS Params */; if (supp_ht) local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; } /* * If the driver supports any scan IEs, then assume the * limit includes the IEs mac80211 will add, otherwise * leave it at zero and let the driver sort it out; we * still pass our IEs to the driver but userspace will * not be allowed to in that case. */ if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; /* Set up cipher suites unless driver already did */ if (!local->hw.wiphy->cipher_suites) { local->hw.wiphy->cipher_suites = cipher_suites; local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) local->hw.wiphy->n_cipher_suites--; } if (IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)) { if (local->hw.wiphy->cipher_suites == cipher_suites) { local->hw.wiphy->cipher_suites += 2; local->hw.wiphy->n_cipher_suites -= 2; } else { u32 *suites; int r, w = 0; /* Filter out WEP */ suites = kmemdup( local->hw.wiphy->cipher_suites, sizeof(u32) * local->hw.wiphy->n_cipher_suites, GFP_KERNEL); if (!suites) return -ENOMEM; for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { u32 suite = local->hw.wiphy->cipher_suites[r]; if (suite == WLAN_CIPHER_SUITE_WEP40 || suite == WLAN_CIPHER_SUITE_WEP104) continue; suites[w++] = suite; } local->hw.wiphy->cipher_suites = suites; local->hw.wiphy->n_cipher_suites = w; local->wiphy_ciphers_allocated = true; } } if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; result = wiphy_register(local->hw.wiphy); if (result < 0) goto fail_wiphy_register; /* * We use the number of queues for feature tests (QoS, HT) internally * so restrict them appropriately. */ if (hw->queues > IEEE80211_MAX_QUEUES) hw->queues = IEEE80211_MAX_QUEUES; local->workqueue = alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0); if (!local->workqueue) { result = -ENOMEM; goto fail_workqueue; } /* * The hardware needs headroom for sending the frame, * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM != sizeof(struct ieee80211_tx_status_rtap_hdr)); local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, sizeof(struct ieee80211_tx_status_rtap_hdr)); debugfs_hw_add(local); /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default */ if (local->hw.max_listen_interval == 0) local->hw.max_listen_interval = 5; local->hw.conf.listen_interval = local->hw.max_listen_interval; local->dynamic_ps_forced_timeout = -1; result = sta_info_start(local); if (result < 0) goto fail_sta_info; result = ieee80211_wep_init(local); if (result < 0) wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", result); rtnl_lock(); result = ieee80211_init_rate_ctrl_alg(local, hw->rate_control_algorithm); if (result < 0) { wiphy_debug(local->hw.wiphy, "Failed to initialize rate control algorithm\n"); goto fail_rate; } /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { result = ieee80211_if_add(local, "wlan%d", NULL, NL80211_IFTYPE_STATION, NULL); if (result) wiphy_warn(local->hw.wiphy, "Failed to add default virtual iface\n"); } rtnl_unlock(); ieee80211_led_init(local); local->network_latency_notifier.notifier_call = ieee80211_max_network_latency; result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, &local->network_latency_notifier); if (result) { rtnl_lock(); goto fail_pm_qos; } #ifdef CONFIG_INET local->ifa_notifier.notifier_call = ieee80211_ifa_changed; result = register_inetaddr_notifier(&local->ifa_notifier); if (result) goto fail_ifa; #endif netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll, local->hw.napi_weight); return 0; #ifdef CONFIG_INET fail_ifa: pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, &local->network_latency_notifier); rtnl_lock(); #endif fail_pm_qos: ieee80211_led_exit(local); ieee80211_remove_interfaces(local); fail_rate: rtnl_unlock(); ieee80211_wep_free(local); sta_info_stop(local); fail_sta_info: destroy_workqueue(local->workqueue); fail_workqueue: wiphy_unregister(local->hw.wiphy); fail_wiphy_register: if (local->wiphy_ciphers_allocated) kfree(local->hw.wiphy->cipher_suites); kfree(local->int_scan_req); return result; }
static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) { /* * Before process AH * [IPv6][Ext1][Ext2][AH][Dest][Payload] * |<-------------->| hdr_len * * To erase AH: * Keeping copy of cleared headers. After AH processing, * Moving the pointer of skb->network_header by using skb_pull as long * as AH header length. Then copy back the copy as long as hdr_len * If destination header following AH exists, copy it into after [Ext2]. * * |<>|[IPv6][Ext1][Ext2][Dest][Payload] * There is offset of AH before IPv6 header after the process. */ struct ip_auth_hdr *ah; struct ipv6hdr *ip6h; struct ah_data *ahp; unsigned char *tmp_hdr = NULL; u16 hdr_len; u16 ah_hlen; int nexthdr; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) goto out; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out; skb->ip_summed = CHECKSUM_NONE; hdr_len = skb->data - skb_network_header(skb); ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; if (!pskb_may_pull(skb, ah_hlen)) goto out; tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); if (!tmp_hdr) goto out; ip6h = ipv6_hdr(skb); if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) goto free_out; ip6h->priority = 0; ip6h->flow_lbl[0] = 0; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; ip6h->hop_limit = 0; spin_lock(&x->lock); { u8 auth_data[MAX_AH_AUTH_LEN]; memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); skb_push(skb, hdr_len); err = ah_mac_digest(ahp, skb, ah->auth_data); if (err) goto unlock; if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) err = -EBADMSG; } unlock: spin_unlock(&x->lock); if (err) goto free_out; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), tmp_hdr, hdr_len); skb->transport_header = skb->network_header; __skb_pull(skb, ah_hlen + hdr_len); kfree(tmp_hdr); return nexthdr; free_out: kfree(tmp_hdr); out: return err; }
static void if_usb_receive_fwload(struct urb *urb) { struct if_usb_card *cardp = urb->context; struct sk_buff *skb = cardp->rx_skb; struct fwsyncheader *syncfwheader; struct bootcmdresp bootcmdresp; if (urb->status) { lbs_deb_usbd(&cardp->udev->dev, "URB status is failed during fw load\n"); kfree_skb(skb); return; } if (cardp->fwdnldover) { __le32 *tmp = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET); if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) { pr_info("Firmware ready event received\n"); wake_up(&cardp->fw_wq); } else { lbs_deb_usb("Waiting for confirmation; got %x %x\n", le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1])); if_usb_submit_rx_urb_fwload(cardp); } kfree_skb(skb); return; } if (cardp->bootcmdresp <= 0) { memcpy (&bootcmdresp, skb->data + IPFIELD_ALIGN_OFFSET, sizeof(bootcmdresp)); if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { kfree_skb(skb); if_usb_submit_rx_urb_fwload(cardp); cardp->bootcmdresp = BOOT_CMD_RESP_OK; lbs_deb_usbd(&cardp->udev->dev, "Received valid boot command response\n"); return; } if (bootcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) { if (bootcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) || bootcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || bootcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) { if (!cardp->bootcmdresp) pr_info("Firmware already seems alive; resetting\n"); cardp->bootcmdresp = -1; } else { pr_info("boot cmd response wrong magic number (0x%x)\n", le32_to_cpu(bootcmdresp.magic)); } } else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) && (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) && (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) { pr_info("boot cmd response cmd_tag error (%d)\n", bootcmdresp.cmd); } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) { pr_info("boot cmd response result error (%d)\n", bootcmdresp.result); } else { cardp->bootcmdresp = 1; lbs_deb_usbd(&cardp->udev->dev, "Received valid boot command response\n"); } kfree_skb(skb); if_usb_submit_rx_urb_fwload(cardp); return; } syncfwheader = kmemdup(skb->data + IPFIELD_ALIGN_OFFSET, sizeof(struct fwsyncheader), GFP_ATOMIC); if (!syncfwheader) { lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); kfree_skb(skb); return; } if (!syncfwheader->cmd) { lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", le32_to_cpu(syncfwheader->seqnum)); cardp->CRC_OK = 1; } else { lbs_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n"); cardp->CRC_OK = 0; } kfree_skb(skb); /* Give device 5s to either write firmware to its RAM or eeprom */ mod_timer(&cardp->fw_timeout, jiffies + (HZ*5)); if (cardp->fwfinalblk) { cardp->fwdnldover = 1; goto exit; } if_usb_send_fw_pkt(cardp); exit: if_usb_submit_rx_urb_fwload(cardp); kfree(syncfwheader); }
static void smssdio_work_thread(struct work_struct *arg) { int ret, isr; struct smscore_buffer_t *cb; struct SmsMsgHdr_S *hdr; size_t size; struct smssdio_device *smsdev = container_of(arg, struct smssdio_device, work_thread); struct sdio_func *sdfunc = smsdev->func; /* * The interrupt register has no defined meaning. It is just * a way of turning of the level triggered interrupt. */ sdio_claim_host(smsdev->func); isr = sdio_readb(smsdev->func, SMSSDIO_INT, &ret); if (ret) { sms_err("Got error reading interrupt status=%d, isr=%d\n", ret, isr); isr = sdio_readb(smsdev->func, SMSSDIO_INT, &ret); if (ret) { sms_err("Second read also failed, try to recover\n"); sdio_release_host(smsdev->func); sdfunc = kmemdup(smsdev->func, sizeof(struct sdio_func), GFP_KERNEL); if (!sdfunc) { sms_err("Out of memory!!!"); return; } sdfunc->num = 0; sdio_claim_host(sdfunc); sdio_writeb(sdfunc, 2, SMSSDIO_CCCR, &ret); sms_err("Read ISR status (write returned) %d\n", ret); isr = sdio_readb(smsdev->func, SMSSDIO_INT, &ret); sms_err("Read returned ret=%d, isr=%d\n", ret, isr); sdio_writeb(sdfunc, 0, SMSSDIO_CCCR, &ret); sdio_release_host(sdfunc); kfree(sdfunc); sms_err("Recovered, but this transaction is lost."); return; } sms_err("Second read succeed status=%d, isr=%d (continue)\n", ret, isr); } if (smsdev->split_cb == NULL) { cb = smscore_getbuffer(smsdev->coredev); if (!cb) { sms_err("Unable to allocate data buffer!\n"); sdio_release_host(smsdev->func); return; } ret = sdio_memcpy_fromio(smsdev->func, cb->p, SMSSDIO_DATA, SMSSDIO_BLOCK_SIZE); if (ret) { sms_warn("Error %d reading initial block, " "continue with sequence.\n", ret); } hdr = cb->p; if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) { smsdev->split_cb = cb; sdio_release_host(smsdev->func); return; } if (hdr->msgLength > smsdev->func->cur_blksize) size = hdr->msgLength - smsdev->func->cur_blksize; else size = 0; } else { cb = smsdev->split_cb; hdr = cb->p; size = hdr->msgLength - sizeof(struct SmsMsgHdr_S); smsdev->split_cb = NULL; } if (size) { void *buffer; buffer = cb->p + (hdr->msgLength - size); size = ALIGN(size, SMSSDIO_BLOCK_SIZE); BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); /* * First attempt to transfer all of it in one go... */ ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, size); if (ret && ret != -EINVAL) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading data from card!\n", ret); sdio_release_host(smsdev->func); return; } /* * ..then fall back to one block at a time if that is * not possible... * * (we have to do this manually because of the * problem with the "increase address" bit) */ if (ret == -EINVAL) { while (size) { ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, smsdev->func->cur_blksize); if (ret) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading " "data from card!\n", ret); sdio_release_host(smsdev->func); return; } buffer += smsdev->func->cur_blksize; if (size > smsdev->func->cur_blksize) size -= smsdev->func->cur_blksize; else size = 0; } } } sdio_release_host(smsdev->func); cb->size = hdr->msgLength; cb->offset = 0; smscore_onresponse(smsdev->coredev, cb); }
static int us122l_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data = { .out_ep = 4, .in_ep = 3, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk = { .vendor_name = "US122L", .product_name = NAME_ALLCAPS, .ifnum = 1, .type = QUIRK_MIDI_US122L, .data = &quirk_data }; struct usb_device *dev = US122L(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 1); return snd_usbmidi_create(card, iface, &US122L(card)->midi_list, &quirk); } static int us144_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data = { .out_ep = 4, .in_ep = 3, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk = { .vendor_name = "US144", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_US122L, .data = &quirk_data }; struct usb_device *dev = US122L(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); return snd_usbmidi_create(card, iface, &US122L(card)->midi_list, &quirk); } /* * Wrapper for usb_control_msg(). * Allocates a temp buffer to prevent dmaing from/to the stack. */ static int us122l_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int err; void *buf = NULL; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } err = usb_control_msg(dev, pipe, request, requesttype, value, index, buf, size, timeout); if (size > 0) { memcpy(data, buf, size); kfree(buf); } return err; } static void pt_info_set(struct usb_device *dev, u8 v) { int ret; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 'I', USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, v, 0, NULL, 0, 1000); snd_printdd(KERN_DEBUG "%i\n", ret); } static void usb_stream_hwdep_vm_open(struct vm_area_struct *area) { struct us122l *us122l = area->vm_private_data; atomic_inc(&us122l->mmap_count); snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } static int usb_stream_hwdep_vm_fault(struct vm_area_struct *area, struct vm_fault *vmf) { unsigned long offset; struct page *page; void *vaddr; struct us122l *us122l = area->vm_private_data; struct usb_stream *s; mutex_lock(&us122l->mutex); s = us122l->sk.s; if (!s) goto unlock; offset = vmf->pgoff << PAGE_SHIFT; if (offset < PAGE_ALIGN(s->read_size)) vaddr = (char *)s + offset; else { offset -= PAGE_ALIGN(s->read_size); if (offset >= PAGE_ALIGN(s->write_size)) goto unlock; vaddr = us122l->sk.write_page + offset; } page = virt_to_page(vaddr); get_page(page); mutex_unlock(&us122l->mutex); vmf->page = page; return 0; unlock: mutex_unlock(&us122l->mutex); return VM_FAULT_SIGBUS; } static void usb_stream_hwdep_vm_close(struct vm_area_struct *area) { struct us122l *us122l = area->vm_private_data; atomic_dec(&us122l->mmap_count); snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } static const struct vm_operations_struct usb_stream_hwdep_vm_ops = { .open = usb_stream_hwdep_vm_open, .fault = usb_stream_hwdep_vm_fault, .close = usb_stream_hwdep_vm_close, }; static int usb_stream_hwdep_open(struct snd_hwdep *hw, struct file *file) { struct us122l *us122l = hw->private_data; struct usb_interface *iface; snd_printdd(KERN_DEBUG "%p %p\n", hw, file); if (hw->used >= 2) return -EBUSY; if (!us122l->first) us122l->first = file; if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { iface = usb_ifnum_to_if(us122l->dev, 0); usb_autopm_get_interface(iface); } iface = usb_ifnum_to_if(us122l->dev, 1); usb_autopm_get_interface(iface); return 0; } static int usb_stream_hwdep_release(struct snd_hwdep *hw, struct file *file) { struct us122l *us122l = hw->private_data; struct usb_interface *iface; snd_printdd(KERN_DEBUG "%p %p\n", hw, file); if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { iface = usb_ifnum_to_if(us122l->dev, 0); usb_autopm_put_interface(iface); } iface = usb_ifnum_to_if(us122l->dev, 1); usb_autopm_put_interface(iface); if (us122l->first == file) us122l->first = NULL; mutex_lock(&us122l->mutex); if (us122l->master == file) us122l->master = us122l->slave; us122l->slave = NULL; mutex_unlock(&us122l->mutex); return 0; } static int usb_stream_hwdep_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = area->vm_end - area->vm_start; struct us122l *us122l = hw->private_data; unsigned long offset; struct usb_stream *s; int err = 0; bool read; offset = area->vm_pgoff << PAGE_SHIFT; mutex_lock(&us122l->mutex); s = us122l->sk.s; read = offset < s->read_size; if (read && area->vm_flags & VM_WRITE) { err = -EPERM; goto out; } snd_printdd(KERN_DEBUG "%lu %u\n", size, read ? s->read_size : s->write_size); /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > PAGE_ALIGN(read ? s->read_size : s->write_size)) { snd_printk(KERN_WARNING "%lu > %u\n", size, read ? s->read_size : s->write_size); err = -EINVAL; goto out; } area->vm_ops = &usb_stream_hwdep_vm_ops; area->vm_flags |= VM_RESERVED; area->vm_private_data = us122l; atomic_inc(&us122l->mmap_count); out: mutex_unlock(&us122l->mutex); return err; }
static void sess_auth_kerberos(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; __u32 capabilities; __u16 bytes_remaining; struct key *spnego_key = NULL; struct cifs_spnego_msg *msg; u16 blob_len; /* extended security */ /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, pSMB); spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); spnego_key = NULL; goto out; } msg = spnego_key->payload.data; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities = cpu_to_le32(capabilities); sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); if (ses->capabilities & CAP_UNICODE) { /* unicode strings must be word aligned */ if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) { *bcc_ptr = 0; bcc_ptr++; } unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp); } else { /* BB: is this right? */ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out_put_spnego_key; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out_put_spnego_key; } bcc_ptr += blob_len; bytes_remaining -= blob_len; /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); kfree(ses->auth_key.response); ses->auth_key.response = NULL; }
#include <linux/err.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/hash.h> #include <linux/key-type.h> #include <keys/ceph-type.h> #include <linux/ceph/decode.h> #include "crypto.h" int ceph_crypto_key_clone(struct ceph_crypto_key *dst, const struct ceph_crypto_key *src) { memcpy(dst, src, sizeof(struct ceph_crypto_key)); <<<<<<< HEAD dst->key = kmemdup(src->key, src->len, GFP_NOFS); if (!dst->key) return -ENOMEM; ======= <<<<<<< HEAD dst->key = kmemdup(src->key, src->len, GFP_NOFS); if (!dst->key) return -ENOMEM; ======= dst->key = kmalloc(src->len, GFP_NOFS); if (!dst->key) return -ENOMEM; memcpy(dst->key, src->key, src->len); >>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9 >>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2 return 0;
ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *ax25; /* * Take the default packet length for the device if zero is * specified. */ if (paclen == 0) { if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return NULL; paclen = ax25_dev->values[AX25_VALUES_PACLEN]; } /* * Look for an existing connection. */ if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) { ax25_output(ax25, paclen, skb); return ax25; /* It already existed */ } if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return NULL; if ((ax25 = ax25_create_cb()) == NULL) return NULL; ax25_fillin_cb(ax25, ax25_dev); ax25->source_addr = *src; ax25->dest_addr = *dest; if (digi != NULL) { ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC); if (ax25->digipeat == NULL) { ax25_cb_put(ax25); return NULL; } } switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_establish_data_link(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (ax25_dev->dama.slave) ax25_ds_establish_data_link(ax25); else ax25_std_establish_data_link(ax25); break; #endif } /* * There is one ref for the state machine; a caller needs * one more to put it back, just like with the existing one. */ ax25_cb_hold(ax25); ax25_cb_add(ax25); ax25->state = AX25_STATE_1; ax25_start_heartbeat(ax25); ax25_output(ax25, paclen, skb); return ax25; /* We had to create it */ }
/* samsung_bl_set - Set board specific data (if any) provided by user for * PWM Backlight control and register specific PWM and backlight device. * @gpio_info: structure containing GPIO info for PWM timer * @bl_data: structure containing Backlight control data */ void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info, struct platform_pwm_backlight_data *bl_data) { int ret = 0; struct platform_device *samsung_bl_device; struct platform_pwm_backlight_data *samsung_bl_data; samsung_bl_device = kmemdup(&samsung_dfl_bl_device, sizeof(struct platform_device), GFP_KERNEL); if (!samsung_bl_device) { printk(KERN_ERR "%s: no memory for platform dev\n", __func__); return; } samsung_bl_data = s3c_set_platdata(&samsung_dfl_bl_data, sizeof(struct platform_pwm_backlight_data), samsung_bl_device); if (!samsung_bl_data) { printk(KERN_ERR "%s: no memory for platform dev\n", __func__); goto err_data; } /* Copy board specific data provided by user */ samsung_bl_data->pwm_id = bl_data->pwm_id; samsung_bl_device->dev.parent = &s3c_device_timer[samsung_bl_data->pwm_id].dev; if (bl_data->max_brightness) samsung_bl_data->max_brightness = bl_data->max_brightness; if (bl_data->dft_brightness) samsung_bl_data->dft_brightness = bl_data->dft_brightness; if (bl_data->lth_brightness) samsung_bl_data->lth_brightness = bl_data->lth_brightness; if (bl_data->pwm_period_ns) samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns; if (bl_data->init) samsung_bl_data->init = bl_data->init; if (bl_data->notify) samsung_bl_data->notify = bl_data->notify; if (bl_data->exit) samsung_bl_data->exit = bl_data->exit; if (bl_data->check_fb) samsung_bl_data->check_fb = bl_data->check_fb; /* Keep the GPIO info for future use */ s3c_device_timer[samsung_bl_data->pwm_id].dev.platform_data = gpio_info; /* Register the specific PWM timer dev for Backlight control */ ret = platform_device_register( &s3c_device_timer[samsung_bl_data->pwm_id]); if (ret) { printk(KERN_ERR "failed to register pwm timer for backlight: %d\n", ret); goto err_plat_reg1; } /* Register the Backlight dev */ ret = platform_device_register(samsung_bl_device); if (ret) { printk(KERN_ERR "failed to register backlight device: %d\n", ret); goto err_plat_reg2; } return; err_plat_reg2: platform_device_unregister(&s3c_device_timer[samsung_bl_data->pwm_id]); err_plat_reg1: kfree(samsung_bl_data); err_data: kfree(samsung_bl_device); return; }
int copy_thread_tls(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); struct task_struct *tsk; int err; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); p->thread.ip = (unsigned long) ret_from_kernel_thread; task_user_gs(p) = __KERNEL_STACK_CANARY; childregs->ds = __USER_DS; childregs->es = __USER_DS; childregs->fs = __KERNEL_PERCPU; childregs->bx = sp; /* function */ childregs->bp = arg; childregs->orig_ax = -1; childregs->cs = __KERNEL_CS | get_kernel_rpl(); childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; p->thread.io_bitmap_ptr = NULL; return 0; } *childregs = *current_pt_regs(); childregs->ax = 0; if (sp) childregs->sp = sp; p->thread.ip = (unsigned long) ret_from_fork; task_user_gs(p) = get_user_gs(current_pt_regs()); p->thread.io_bitmap_ptr = NULL; tsk = current; err = -ENOMEM; if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } set_tsk_thread_flag(p, TIF_IO_BITMAP); } err = 0; /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) err = do_set_thread_area(p, -1, (struct user_desc __user *)tls, 0); if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } return err; }
/* * Main function to be called from the MTD mapping driver/device to * obtain the partitioning information. At this point the command line * arguments will actually be parsed and turned to struct mtd_partition * information. It returns partitions for the requested mtd device, or * the first one in the chain if a NULL mtd_id is passed in. */ static int parse_cmdline_partitions(struct mtd_info *master, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { unsigned long long offset; int i, err; struct cmdline_mtd_partition *part; const char *mtd_id = master->name; /* parse command line */ if (!cmdline_parsed) { err = mtdpart_setup_real(cmdline); if (err) return err; } /* * Search for the partition definition matching master->name. * If master->name is not set, stop at first partition definition. */ for (part = partitions; part; part = part->next) { if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) break; } if (!part) return 0; for (i = 0, offset = 0; i < part->num_parts; i++) { if (part->parts[i].offset == OFFSET_CONTINUOUS) part->parts[i].offset = offset; else offset = part->parts[i].offset; if (part->parts[i].size == SIZE_REMAINING) part->parts[i].size = master->size - offset; if (offset + part->parts[i].size > master->size) { pr_warn("%s: partitioning exceeds flash size, truncating\n", part->mtd_id); part->parts[i].size = master->size - offset; } offset += part->parts[i].size; if (part->parts[i].size == 0) { pr_warn("%s: skipping zero sized partition\n", part->mtd_id); part->num_parts--; memmove(&part->parts[i], &part->parts[i + 1], sizeof(*part->parts) * (part->num_parts - i)); i--; } } *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts, GFP_KERNEL); if (!*pparts) return -ENOMEM; return part->num_parts; }
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) { acpi_status status; struct acpi_object_list input; union acpi_object in_params[4]; union acpi_object *out_obj; u8 uuid[16]; u32 errors; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; if (!context) return AE_ERROR; if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid))) return AE_ERROR; context->ret.length = ACPI_ALLOCATE_BUFFER; context->ret.pointer = NULL; /* Setting up input parameters */ input.count = 4; input.pointer = in_params; in_params[0].type = ACPI_TYPE_BUFFER; in_params[0].buffer.length = 16; in_params[0].buffer.pointer = uuid; in_params[1].type = ACPI_TYPE_INTEGER; in_params[1].integer.value = context->rev; in_params[2].type = ACPI_TYPE_INTEGER; in_params[2].integer.value = context->cap.length/sizeof(u32); in_params[3].type = ACPI_TYPE_BUFFER; in_params[3].buffer.length = context->cap.length; in_params[3].buffer.pointer = context->cap.pointer; status = acpi_evaluate_object(handle, "_OSC", &input, &output); if (ACPI_FAILURE(status)) return status; if (!output.length) return AE_NULL_OBJECT; out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER || out_obj->buffer.length != context->cap.length) { acpi_print_osc_error(handle, context, "_OSC evaluation returned wrong type"); status = AE_TYPE; goto out_kfree; } /* Need to ignore the bit0 in result code */ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); if (errors) { if (errors & OSC_REQUEST_ERROR) acpi_print_osc_error(handle, context, "_OSC request failed"); if (errors & OSC_INVALID_UUID_ERROR) acpi_print_osc_error(handle, context, "_OSC invalid UUID"); if (errors & OSC_INVALID_REVISION_ERROR) acpi_print_osc_error(handle, context, "_OSC invalid revision"); if (errors & OSC_CAPABILITIES_MASK_ERROR) { if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD] & OSC_QUERY_ENABLE) goto out_success; status = AE_SUPPORT; goto out_kfree; } status = AE_ERROR; goto out_kfree; } out_success: context->ret.length = out_obj->buffer.length; context->ret.pointer = kmemdup(out_obj->buffer.pointer, context->ret.length, GFP_KERNEL); if (!context->ret.pointer) { status = AE_NO_MEMORY; goto out_kfree; } status = AE_OK; out_kfree: kfree(output.pointer); if (status != AE_OK) context->ret.pointer = NULL; return status; }
static int uda134x_soc_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec; struct uda134x_priv *uda134x; void *codec_setup_data = socdev->codec_data; int ret = -ENOMEM; struct uda134x_platform_data *pd; printk(KERN_INFO "UDA134X SoC Audio Codec\n"); if (!codec_setup_data) { printk(KERN_ERR "UDA134X SoC codec: " "missing L3 bitbang function\n"); return -ENODEV; } pd = codec_setup_data; switch (pd->model) { case UDA134X_UDA1340: case UDA134X_UDA1341: case UDA134X_UDA1344: case UDA134X_UDA1345: break; default: printk(KERN_ERR "UDA134X SoC codec: " "unsupported model %d\n", pd->model); return -EINVAL; } socdev->card->codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL); if (socdev->card->codec == NULL) return ret; codec = socdev->card->codec; uda134x = kzalloc(sizeof(struct uda134x_priv), GFP_KERNEL); if (uda134x == NULL) goto priv_err; snd_soc_codec_set_drvdata(codec, uda134x); codec->reg_cache = kmemdup(uda134x_reg, sizeof(uda134x_reg), GFP_KERNEL); if (codec->reg_cache == NULL) goto reg_err; mutex_init(&codec->mutex); codec->reg_cache_size = sizeof(uda134x_reg); codec->reg_cache_step = 1; codec->name = "UDA134X"; codec->owner = THIS_MODULE; codec->dai = &uda134x_dai; codec->num_dai = 1; codec->read = uda134x_read_reg_cache; codec->write = uda134x_write; INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); codec->control_data = codec_setup_data; if (pd->power) pd->power(1); uda134x_reset(codec); if (pd->is_powered_on_standby) { codec->set_bias_level = NULL; uda134x_set_bias_level(codec, SND_SOC_BIAS_ON); } else { codec->set_bias_level = uda134x_set_bias_level; uda134x_set_bias_level(codec, SND_SOC_BIAS_STANDBY); } /* register pcms */ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { printk(KERN_ERR "UDA134X: failed to register pcms\n"); goto pcm_err; } switch (pd->model) { case UDA134X_UDA1340: case UDA134X_UDA1344: ret = snd_soc_add_controls(codec, uda1340_snd_controls, ARRAY_SIZE(uda1340_snd_controls)); break; case UDA134X_UDA1341: ret = snd_soc_add_controls(codec, uda1341_snd_controls, ARRAY_SIZE(uda1341_snd_controls)); break; case UDA134X_UDA1345: ret = snd_soc_add_controls(codec, uda1345_snd_controls, ARRAY_SIZE(uda1345_snd_controls)); break; default: printk(KERN_ERR "%s unknown codec type: %d", __func__, pd->model); return -EINVAL; } if (ret < 0) { printk(KERN_ERR "UDA134X: failed to register controls\n"); goto pcm_err; } return 0; pcm_err: kfree(codec->reg_cache); reg_err: kfree(snd_soc_codec_get_drvdata(codec)); priv_err: kfree(codec); return ret; }
static int wl1271_boot_upload_nvs(struct wl1271 *wl) { size_t nvs_len, burst_len; int i; u32 dest_addr, val; u8 *nvs_ptr, *nvs, *nvs_aligned; nvs = wl->nvs; if (nvs == NULL) return -ENODEV; nvs_ptr = nvs; nvs_len = wl->nvs_len; /* Update the device MAC address into the nvs */ nvs[11] = wl->mac_addr[0]; nvs[10] = wl->mac_addr[1]; nvs[6] = wl->mac_addr[2]; nvs[5] = wl->mac_addr[3]; nvs[4] = wl->mac_addr[4]; nvs[3] = wl->mac_addr[5]; /* * Layout before the actual NVS tables: * 1 byte : burst length. * 2 bytes: destination address. * n bytes: data to burst copy. * * This is ended by a 0 length, then the NVS tables. */ /* FIXME: Do we need to check here whether the LSB is 1? */ while (nvs_ptr[0]) { burst_len = nvs_ptr[0]; dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); /* FIXME: Due to our new wl1271_translate_reg_addr function, we need to add the REGISTER_BASE to the destination */ dest_addr += REGISTERS_BASE; /* We move our pointer to the data */ nvs_ptr += 3; for (i = 0; i < burst_len; i++) { val = (nvs_ptr[0] | (nvs_ptr[1] << 8) | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); wl1271_debug(DEBUG_BOOT, "nvs burst write 0x%x: 0x%x", dest_addr, val); wl1271_spi_write32(wl, dest_addr, val); nvs_ptr += 4; dest_addr += 4; } } /* * We've reached the first zero length, the first NVS table * is 7 bytes further. */ nvs_ptr += 7; nvs_len -= nvs_ptr - nvs; nvs_len = ALIGN(nvs_len, 4); /* FIXME: The driver sets the partition here, but this is not needed, since it sets to the same one as currently in use */ /* Now we must set the partition correctly */ wl1271_set_partition(wl, &part_table[PART_WORK]); /* Copy the NVS tables to a new block to ensure alignment */ nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); if (!nvs_aligned) return -ENOMEM; /* And finally we upload the NVS tables */ /* FIXME: In wl1271, we upload everything at once. No endianness handling needed here?! The ref driver doesn't do anything about it at this point */ wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false); kfree(nvs_aligned); return 0; }
int * ip_vs_create_timeout_table(int *table, int size) { return kmemdup(table, size, GFP_ATOMIC); }
int amlogic_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps) { struct pinctrl_map *new_map = NULL; unsigned new_num = 1; unsigned long config = 0; unsigned long *pconfig; const char *pinctrl_set = "amlogic,setmask"; const char *pinctrl_clr = "amlogic,clrmask"; bool purecfg = false; u32 val, reg; int ret, i = 0; /* Check for pin config node which has no 'reg' property */ if (of_property_read_u32(np, pinctrl_set, ®) && of_property_read_u32(np, pinctrl_clr, &val)) purecfg = true; ret = of_property_read_u32(np, "amlogic,pullup", &val); if (!ret) config = AML_PINCONF_PACK_PULL(AML_PCON_PULLUP, val); ret = of_property_read_u32(np, "amlogic,pullupen", &val); if (!ret) config |= AML_PINCONF_PACK_PULLEN(AML_PCON_PULLUP, val); ret = of_property_read_u32(np, "amlogic,enable-output", &val); if (!ret) config |= AML_PINCONF_PACK_ENOUT(AML_PCON_ENOUT, val); /* Check for group node which has both mux and config settings */ if (!purecfg && config) new_num = 2; new_map = kzalloc(sizeof(*new_map) * new_num, GFP_KERNEL); if (!new_map) { pr_info("vmalloc map fail\n"); return -ENOMEM; } if (config) { pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); if (!pconfig) { ret = -ENOMEM; goto free_group; } new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; new_map[i].data.configs.group_or_pin = np->name; new_map[i].data.configs.configs = pconfig; new_map[i].data.configs.num_configs = 1; i++; } if (!purecfg) { new_map[i].type = PIN_MAP_TYPE_MUX_GROUP; new_map[i].data.mux.function = np->name; new_map[i].data.mux.group = np->name; } *map = new_map; *num_maps = new_num; return 0; free_group: kfree(new_map); return ret; }
/* * Reads external NVM from a file into mvm->nvm_sections * * HOW TO CREATE THE NVM FILE FORMAT: * ------------------------------ * 1. create hex file, format: * 3800 -> header * 0000 -> header * 5a40 -> data * * rev - 6 bit (word1) * len - 10 bit (word1) * id - 4 bit (word2) * rsv - 12 bit (word2) * * 2. flip 8bits with 8 bits per line to get the right NVM file format * * 3. create binary file from the hex file * * 4. save as "iNVM_xxx.bin" under /lib/firmware */ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) { int ret, section_size; u16 section_id; const struct firmware *fw_entry; const struct { __le16 word1; __le16 word2; u8 data[]; } *file_sec; const u8 *eof, *temp; #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) #define NVM_WORD2_ID(x) (x >> 12) IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); /* * Obtain NVM image via request_firmware. Since we already used * request_firmware_nowait() for the firmware binary load and only * get here after that we assume the NVM request can be satisfied * synchronously. */ ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, mvm->trans->dev); if (ret) { IWL_ERR(mvm, "ERROR: %s isn't available %d\n", iwlwifi_mod_params.nvm_file, ret); return ret; } IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", iwlwifi_mod_params.nvm_file, fw_entry->size); if (fw_entry->size < sizeof(*file_sec)) { IWL_ERR(mvm, "NVM file too small\n"); ret = -EINVAL; goto out; } if (fw_entry->size > MAX_NVM_FILE_LEN) { IWL_ERR(mvm, "NVM file too large\n"); ret = -EINVAL; goto out; } eof = fw_entry->data + fw_entry->size; file_sec = (void *)fw_entry->data; while (true) { if (file_sec->data > eof) { IWL_ERR(mvm, "ERROR - NVM file too short for section header\n"); ret = -EINVAL; break; } /* check for EOF marker */ if (!file_sec->word1 && !file_sec->word2) { ret = 0; break; } section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); if (section_size > IWL_MAX_NVM_SECTION_SIZE) { IWL_ERR(mvm, "ERROR - section too large (%d)\n", section_size); ret = -EINVAL; break; } if (!section_size) { IWL_ERR(mvm, "ERROR - section empty\n"); ret = -EINVAL; break; } if (file_sec->data + section_size > eof) { IWL_ERR(mvm, "ERROR - NVM file too short for section (%d bytes)\n", section_size); ret = -EINVAL; break; } temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) { IWL_ERR(mvm, "Invalid NVM section ID\n"); ret = -EINVAL; break; } mvm->nvm_sections[section_id].data = temp; mvm->nvm_sections[section_id].length = section_size; /* advance to the next section */ file_sec = (void *)(file_sec->data + section_size); } out: release_firmware(fw_entry); return ret; }
struct clk *rockchip_clk_register_cpuclk(const char *name, const char *const *parent_names, u8 num_parents, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock) { struct rockchip_cpuclk *cpuclk; struct clk_init_data init; struct clk *clk, *cclk; int ret; if (num_parents < 2) { pr_err("%s: needs at least two parent clocks\n", __func__); return ERR_PTR(-EINVAL); } cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL); if (!cpuclk) return ERR_PTR(-ENOMEM); init.name = name; init.parent_names = &parent_names[reg_data->mux_core_main]; init.num_parents = 1; init.ops = &rockchip_cpuclk_ops; /* only allow rate changes when we have a rate table */ init.flags = (nrates > 0) ? CLK_SET_RATE_PARENT : 0; /* disallow automatic parent changes by ccf */ init.flags |= CLK_SET_RATE_NO_REPARENT; init.flags |= CLK_GET_RATE_NOCACHE; cpuclk->reg_base = reg_base; cpuclk->lock = lock; cpuclk->reg_data = reg_data; cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb; cpuclk->hw.init = &init; cpuclk->alt_parent = __clk_lookup(parent_names[reg_data->mux_core_alt]); if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent: (%d)\n", __func__, reg_data->mux_core_alt); ret = -EINVAL; goto free_cpuclk; } ret = clk_prepare_enable(cpuclk->alt_parent); if (ret) { pr_err("%s: could not enable alternate parent\n", __func__); goto free_cpuclk; } clk = __clk_lookup(parent_names[reg_data->mux_core_main]); if (!clk) { pr_err("%s: could not lookup parent clock: (%d) %s\n", __func__, reg_data->mux_core_main, parent_names[reg_data->mux_core_main]); ret = -EINVAL; goto free_alt_parent; } ret = clk_notifier_register(clk, &cpuclk->clk_nb); if (ret) { pr_err("%s: failed to register clock notifier for %s\n", __func__, name); goto free_alt_parent; } if (nrates > 0) { cpuclk->rate_count = nrates; cpuclk->rate_table = kmemdup(rates, sizeof(*rates) * nrates, GFP_KERNEL); if (!cpuclk->rate_table) { pr_err("%s: could not allocate memory for cpuclk rates\n", __func__); ret = -ENOMEM; goto unregister_notifier; } } cclk = clk_register(NULL, &cpuclk->hw); if (IS_ERR(cclk)) { pr_err("%s: could not register cpuclk %s\n", __func__, name); ret = PTR_ERR(cclk); goto free_rate_table; } return cclk; free_rate_table: kfree(cpuclk->rate_table); unregister_notifier: clk_notifier_unregister(clk, &cpuclk->clk_nb); free_alt_parent: clk_disable_unprepare(cpuclk->alt_parent); free_cpuclk: kfree(cpuclk); return ERR_PTR(ret); }
int iwl_nvm_init(struct iwl_mvm *mvm) { int ret, i, section; u8 *nvm_buffer, *temp; /* load external NVM if configured */ if (iwlwifi_mod_params.nvm_file) { /* move to External NVM flow */ ret = iwl_mvm_read_external_nvm(mvm); if (ret) return ret; } else { /* Read From FW NVM */ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); /* TODO: find correct NVM max size for a section */ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, GFP_KERNEL); if (!nvm_buffer) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) { section = nvm_to_read[i]; /* we override the constness for initial read */ ret = iwl_nvm_read_section(mvm, section, nvm_buffer); if (ret < 0) break; temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; #ifdef CONFIG_IWLWIFI_DEBUGFS switch (section) { case NVM_SECTION_TYPE_HW: mvm->nvm_hw_blob.data = temp; mvm->nvm_hw_blob.size = ret; break; case NVM_SECTION_TYPE_SW: mvm->nvm_sw_blob.data = temp; mvm->nvm_sw_blob.size = ret; break; case NVM_SECTION_TYPE_CALIBRATION: mvm->nvm_calib_blob.data = temp; mvm->nvm_calib_blob.size = ret; break; case NVM_SECTION_TYPE_PRODUCTION: mvm->nvm_prod_blob.data = temp; mvm->nvm_prod_blob.size = ret; break; default: WARN(1, "section: %d", section); } #endif } kfree(nvm_buffer); if (ret < 0) return ret; } mvm->nvm_data = iwl_parse_nvm_sections(mvm); if (!mvm->nvm_data) return -ENODATA; return 0; }
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (unlikely(!skb->local_df && skb->len > mtu) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { if (skb->sk && dst_allfrag(skb_dst(skb))) sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh, rt); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); ip6_rt_put(rt); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); ip6_rt_put(rt); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb)) goto fail; left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, hroom); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh, rt); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); consume_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; }
static int gpio_regulator_probe(struct platform_device *pdev) { struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev); struct device_node *np = pdev->dev.of_node; struct gpio_regulator_data *drvdata; struct regulator_config cfg = { }; int ptr, ret, state; drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), GFP_KERNEL); if (drvdata == NULL) return -ENOMEM; if (np) { config = of_get_gpio_regulator_config(&pdev->dev, np, &drvdata->desc); if (IS_ERR(config)) return PTR_ERR(config); } drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { dev_err(&pdev->dev, "Failed to allocate supply name\n"); ret = -ENOMEM; goto err; } if (config->nr_gpios != 0) { drvdata->gpios = kmemdup(config->gpios, config->nr_gpios * sizeof(struct gpio), GFP_KERNEL); if (drvdata->gpios == NULL) { dev_err(&pdev->dev, "Failed to allocate gpio data\n"); ret = -ENOMEM; goto err_name; } drvdata->nr_gpios = config->nr_gpios; ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); if (ret) { dev_err(&pdev->dev, "Could not obtain regulator setting GPIOs: %d\n", ret); goto err_memstate; } } drvdata->states = kmemdup(config->states, config->nr_states * sizeof(struct gpio_regulator_state), GFP_KERNEL); if (drvdata->states == NULL) { dev_err(&pdev->dev, "Failed to allocate state data\n"); ret = -ENOMEM; goto err_memgpio; } drvdata->nr_states = config->nr_states; drvdata->desc.owner = THIS_MODULE; drvdata->desc.enable_time = config->startup_delay; /* handle regulator type*/ switch (config->type) { case REGULATOR_VOLTAGE: drvdata->desc.type = REGULATOR_VOLTAGE; drvdata->desc.ops = &gpio_regulator_voltage_ops; drvdata->desc.n_voltages = config->nr_states;
static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; }
static int usbduxsigma_firmware_upload(struct comedi_device *dev, const u8 *data, size_t size, unsigned long context) { struct usb_device *usb = comedi_to_usb_dev(dev); uint8_t *buf; uint8_t *tmp; int ret; if (!data) return 0; if (size > FIRMWARE_MAX_LEN) { dev_err(dev->class_dev, "firmware binary too large for FX2\n"); return -ENOMEM; } /* we generate a local buffer for the firmware */ buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; /* we need a malloc'ed buffer for usb_control_msg() */ tmp = kmalloc(1, GFP_KERNEL); if (!tmp) { kfree(buf); return -ENOMEM; } /* stop the current firmware on the device */ *tmp = 1; /* 7f92 to one */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUXSUB_FIRMWARE, VENDOR_DIR_OUT, USBDUXSUB_CPUCS, 0x0000, tmp, 1, BULK_TIMEOUT); if (ret < 0) { dev_err(dev->class_dev, "can not stop firmware\n"); goto done; } /* upload the new firmware to the device */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUXSUB_FIRMWARE, VENDOR_DIR_OUT, 0, 0x0000, buf, size, BULK_TIMEOUT); if (ret < 0) { dev_err(dev->class_dev, "firmware upload failed\n"); goto done; } /* start the new firmware on the device */ *tmp = 0; /* 7f92 to zero */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUXSUB_FIRMWARE, VENDOR_DIR_OUT, USBDUXSUB_CPUCS, 0x0000, tmp, 1, BULK_TIMEOUT); if (ret < 0) dev_err(dev->class_dev, "can not start firmware\n"); done: kfree(tmp); kfree(buf); return ret; }