int read_default_json_desc(void * root,void * record) { int ret; void * data; void * struct_template; DB_RECORD * db_record=record; void * temp_node; if(db_record->head.type <=0) return -EINVAL; struct_template=memdb_get_template(db_record->head.type,db_record->head.subtype); if(struct_template==NULL) return -EINVAL; ret=Galloc0(&data,struct_size(struct_template)); if(ret<0) return ret; ret=json_2_struct(root,data,struct_template); // namelist->elem_no=json_get_elemno(temp_node); db_record->record=data; ret=memdb_comp_uuid(db_record); if(ret<0) return ret; ret=memdb_store_record(db_record); return ret; }
/*-------------------------------------------------------------------------*/ void struct_free (struct_t *pStruct) /* Free the struct <pStruct> and all referenced data. */ { unsigned short num; struct_type_t * pSType; #ifdef DEBUG if (!pStruct) fatal("NULL pointer passed to struct_free().\n"); if (!pStruct->user) fatal("No wizlist pointer for struct in struct_free()."); if (pStruct->ref != 0) fatal("Struct with %"PRIdPINT" refs passed to struct_free().\n" , pStruct->ref); #endif for (num = struct_size(pStruct); num-- > 0; ) { free_svalue(&pStruct->member[num]); } pSType = pStruct->type; struct_free_empty(pStruct); /* needs a valid .type */ free_struct_type(pSType); } /* struct_free() */
/*-------------------------------------------------------------------------*/ void count_struct_ref (struct_t * pStruct) /* Count all references held by struct <pStruct> */ { pStruct->ref++; if (test_memory_reference(pStruct)) { note_malloced_block_ref(pStruct); count_struct_type_ref(pStruct->type); if (struct_size(pStruct)) { count_ref_in_vector(pStruct->member, struct_size(pStruct)); } } } /* count_struct_ref() */
/*-------------------------------------------------------------------------*/ void clear_struct_ref (struct_t * pStruct) /* Clear all references held by struct <pStruct> */ { if (pStruct->ref != 0) { clear_memory_reference(pStruct); pStruct->ref = 0; clear_struct_type_ref(pStruct->type); if (struct_size(pStruct)) { clear_ref_in_vector(pStruct->member, struct_size(pStruct)); } } } /* clear_struct_ref() */
int main(int argc, char *argv[]) { printf("/* This file is created automatically. Do not edit by hand.*/\n\n"); struct_size(MP_INT); struct_field(MP_INT,_mp_alloc); struct_field(MP_INT,_mp_size); struct_field(MP_INT,_mp_d); return 0; }
/* open up an existing blob to be examined or modified */ int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size, unsigned int max_find_device, void *(*realloc_fn) (void *, unsigned long)) { struct boot_param_header *bph = blob; /* can't cope with version < 16 */ if (be32_to_cpu(bph->version) < 16) return -1; /* clear the cxt */ memset(cxt, 0, sizeof(*cxt)); /* alloc node_tbl to track node ptrs returned by ft_find_device */ ++max_find_device; cxt->node_tbl = realloc_fn(NULL, max_find_device * sizeof(char *)); if (!cxt->node_tbl) return -1; memset(cxt->node_tbl, 0, max_find_device * sizeof(char *)); cxt->node_max = max_find_device; cxt->nodes_used = 1; /* don't use idx 0 b/c looks like NULL */ cxt->bph = bph; cxt->max_size = max_size; cxt->realloc = realloc_fn; cxt->rgn[FT_RSVMAP].start = blob + be32_to_cpu(bph->off_mem_rsvmap); cxt->rgn[FT_RSVMAP].size = rsvmap_size(cxt); cxt->rgn[FT_STRUCT].start = blob + be32_to_cpu(bph->off_dt_struct); cxt->rgn[FT_STRUCT].size = struct_size(cxt); cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings); cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size); /* Leave as '0' to force first ft_make_space call to do a ft_reorder * and move dt to an area allocated by realloc. cxt->isordered = ft_ordered(cxt); */ cxt->p = cxt->rgn[FT_STRUCT].start; cxt->str_anchor = cxt->rgn[FT_STRINGS].start; return 0; }
static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap) { struct clk_hw_onecell_data *clk_data; struct da8xx_usb0_clk48 *usb0; struct da8xx_usb1_clk48 *usb1; clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, 2), GFP_KERNEL); if (!clk_data) return -ENOMEM; clk_data->num = 2; usb0 = da8xx_cfgchip_register_usb0_clk48(dev, regmap); if (IS_ERR(usb0)) { if (PTR_ERR(usb0) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_warn(dev, "Failed to register usb0_clk48 (%ld)\n", PTR_ERR(usb0)); clk_data->hws[0] = ERR_PTR(-ENOENT); } else { clk_data->hws[0] = &usb0->hw; } usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); if (IS_ERR(usb1)) { if (PTR_ERR(usb1) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", PTR_ERR(usb1)); clk_data->hws[1] = ERR_PTR(-ENOENT); } else { clk_data->hws[1] = &usb1->hw; } return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data); }
int ocfs2_init_slot_info(struct ocfs2_super *osb) { int status; struct inode *inode = NULL; struct ocfs2_slot_info *si; si = kzalloc(struct_size(si, si_slots, osb->max_slots), GFP_KERNEL); if (!si) { status = -ENOMEM; mlog_errno(status); return status; } si->si_extended = ocfs2_uses_extended_slot_map(osb); si->si_num_slots = osb->max_slots; inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!inode) { status = -EINVAL; mlog_errno(status); goto bail; } si->si_inode = inode; status = ocfs2_map_slot_buffers(osb, si); if (status < 0) { mlog_errno(status); goto bail; } osb->slot_info = (struct ocfs2_slot_info *)si; bail: if (status < 0) __ocfs2_free_slot_info(si); return status; }
/*-------------------------------------------------------------------------*/ void struct_check_for_destr ( struct_t * pStruct ) /* Remove all references to destructed objects from <pStruct>. */ { int member, num_members; svalue_t * svp; num_members = struct_size(pStruct); for (member = 0, svp = pStruct->member ; member < num_members ; member++, svp++ ) { if (destructed_object_ref(svp)) { free_svalue(svp); put_number(svp, 0); } } } /* struct_check_for_destr() */
static int usb_parse_configuration(struct usb_device *dev, int cfgidx, struct usb_host_config *config, unsigned char *buffer, int size) { struct device *ddev = &dev->dev; unsigned char *buffer0 = buffer; int cfgno; int nintf, nintf_orig; int i, j, n; struct usb_interface_cache *intfc; unsigned char *buffer2; int size2; struct usb_descriptor_header *header; int retval; u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES]; unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); nintf = nintf_orig = config->desc.bNumInterfaces; config->desc.bNumInterfaces = 0; // Adjusted later if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { dev_err(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); return -EINVAL; } cfgno = config->desc.bConfigurationValue; buffer += config->desc.bLength; size -= config->desc.bLength; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", cfgno, nintf, USB_MAXINTERFACES); nintf = USB_MAXINTERFACES; } /* Go through the descriptors, checking their length and counting the * number of altsettings for each interface */ n = 0; for ((buffer2 = buffer, size2 = size); size2 > 0; (buffer2 += header->bLength, size2 -= header->bLength)) { if (size2 < sizeof(struct usb_descriptor_header)) { dev_warn(ddev, "config %d descriptor has %d excess " "byte%s, ignoring\n", cfgno, size2, plural(size2)); break; } header = (struct usb_descriptor_header *) buffer2; if ((header->bLength > size2) || (header->bLength < 2)) { dev_warn(ddev, "config %d has an invalid descriptor " "of length %d, skipping remainder of the config\n", cfgno, header->bLength); break; } if (header->bDescriptorType == USB_DT_INTERFACE) { struct usb_interface_descriptor *d; int inum; d = (struct usb_interface_descriptor *) header; if (d->bLength < USB_DT_INTERFACE_SIZE) { dev_warn(ddev, "config %d has an invalid " "interface descriptor of length %d, " "skipping\n", cfgno, d->bLength); continue; } inum = d->bInterfaceNumber; if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) && n >= nintf_orig) { dev_warn(ddev, "config %d has more interface " "descriptors, than it declares in " "bNumInterfaces, ignoring interface " "number: %d\n", cfgno, inum); continue; } if (inum >= nintf_orig) dev_warn(ddev, "config %d has an invalid " "interface number: %d but max is %d\n", cfgno, inum, nintf_orig - 1); /* Have we already encountered this interface? * Count its altsettings */ for (i = 0; i < n; ++i) { if (inums[i] == inum) break; } if (i < n) { if (nalts[i] < 255) ++nalts[i]; } else if (n < USB_MAXINTERFACES) { inums[n] = inum; nalts[n] = 1; ++n; } } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { struct usb_interface_assoc_descriptor *d; d = (struct usb_interface_assoc_descriptor *)header; if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { dev_warn(ddev, "config %d has an invalid interface association descriptor of length %d, skipping\n", cfgno, d->bLength); continue; } if (iad_num == USB_MAXIADS) { dev_warn(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { config->intf_assoc[iad_num] = d; iad_num++; } } else if (header->bDescriptorType == USB_DT_DEVICE || header->bDescriptorType == USB_DT_CONFIG) dev_warn(ddev, "config %d contains an unexpected " "descriptor of type 0x%X, skipping\n", cfgno, header->bDescriptorType); } /* for ((buffer2 = buffer, size2 = size); ...) */ size = buffer2 - buffer; config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); if (n != nintf) dev_warn(ddev, "config %d has %d interface%s, different from " "the descriptor's value: %d\n", cfgno, n, plural(n), nintf_orig); else if (n == 0) dev_warn(ddev, "config %d has no interfaces?\n", cfgno); config->desc.bNumInterfaces = nintf = n; /* Check for missing interface numbers */ for (i = 0; i < nintf; ++i) { for (j = 0; j < nintf; ++j) { if (inums[j] == i) break; } if (j >= nintf) dev_warn(ddev, "config %d has no interface number " "%d\n", cfgno, i); } /* Allocate the usb_interface_caches and altsetting arrays */ for (i = 0; i < nintf; ++i) { j = nalts[i]; if (j > USB_MAXALTSETTING) { dev_warn(ddev, "too many alternate settings for " "config %d interface %d: %d, " "using maximum allowed: %d\n", cfgno, inums[i], j, USB_MAXALTSETTING); nalts[i] = j = USB_MAXALTSETTING; } intfc = kzalloc(struct_size(intfc, altsetting, j), GFP_KERNEL); config->intf_cache[i] = intfc; if (!intfc) return -ENOMEM; kref_init(&intfc->ref); } /* FIXME: parse the BOS descriptor */ /* Skip over any Class Specific or Vendor Specific descriptors; * find the first interface descriptor */ config->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, &n); config->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "configuration"); buffer += i; size -= i; /* Parse all the interface/altsetting descriptors */ while (size > 0) { retval = usb_parse_interface(ddev, cfgno, config, buffer, size, inums, nalts); if (retval < 0) return retval; buffer += retval; size -= retval; } /* Check for missing altsettings */ for (i = 0; i < nintf; ++i) { intfc = config->intf_cache[i]; for (j = 0; j < intfc->num_altsetting; ++j) { for (n = 0; n < intfc->num_altsetting; ++n) { if (intfc->altsetting[n].desc. bAlternateSetting == j) break; } if (n >= intfc->num_altsetting) dev_warn(ddev, "config %d interface %d has no " "altsetting %d\n", cfgno, inums[i], j); } } return 0; }
static int tsens_probe(struct platform_device *pdev) { int ret, i; struct device *dev; struct device_node *np; struct tsens_priv *priv; const struct tsens_plat_data *data; const struct of_device_id *id; u32 num_sensors; if (pdev->dev.of_node) dev = &pdev->dev; else dev = pdev->dev.parent; np = dev->of_node; id = of_match_node(tsens_table, np); if (id) data = id->data; else data = &data_8960; num_sensors = data->num_sensors; if (np) of_property_read_u32(np, "#qcom,sensors", &num_sensors); if (num_sensors <= 0) { dev_err(dev, "invalid number of sensors\n"); return -EINVAL; } priv = devm_kzalloc(dev, struct_size(priv, sensor, num_sensors), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; priv->num_sensors = num_sensors; priv->ops = data->ops; for (i = 0; i < priv->num_sensors; i++) { if (data->hw_ids) priv->sensor[i].hw_id = data->hw_ids[i]; else priv->sensor[i].hw_id = i; } priv->feat = data->feat; priv->fields = data->fields; if (!priv->ops || !priv->ops->init || !priv->ops->get_temp) return -EINVAL; ret = priv->ops->init(priv); if (ret < 0) { dev_err(dev, "tsens init failed\n"); return ret; } if (priv->ops->calibrate) { ret = priv->ops->calibrate(priv); if (ret < 0) { if (ret != -EPROBE_DEFER) dev_err(dev, "tsens calibration failed\n"); return ret; } } ret = tsens_register(priv); platform_set_drvdata(pdev, priv); return ret; }
static void __init berlin2_clock_setup(struct device_node *np) { struct device_node *parent_np = of_get_parent(np); const char *parent_names[9]; struct clk *clk; struct clk_hw *hw; struct clk_hw **hws; u8 avpll_flags = 0; int n, ret; clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); if (!clk_data) return; clk_data->num = MAX_CLKS; hws = clk_data->hws; gbase = of_iomap(parent_np, 0); if (!gbase) return; /* overwrite default clock names with DT provided ones */ clk = of_clk_get_by_name(np, clk_names[REFCLK]); if (!IS_ERR(clk)) { clk_names[REFCLK] = __clk_get_name(clk); clk_put(clk); } clk = of_clk_get_by_name(np, clk_names[VIDEO_EXT0]); if (!IS_ERR(clk)) { clk_names[VIDEO_EXT0] = __clk_get_name(clk); clk_put(clk); } /* simple register PLLs */ ret = berlin2_pll_register(&bg2_pll_map, gbase + REG_SYSPLLCTL0, clk_names[SYSPLL], clk_names[REFCLK], 0); if (ret) goto bg2_fail; ret = berlin2_pll_register(&bg2_pll_map, gbase + REG_MEMPLLCTL0, clk_names[MEMPLL], clk_names[REFCLK], 0); if (ret) goto bg2_fail; ret = berlin2_pll_register(&bg2_pll_map, gbase + REG_CPUPLLCTL0, clk_names[CPUPLL], clk_names[REFCLK], 0); if (ret) goto bg2_fail; if (of_device_is_compatible(np, "marvell,berlin2-global-register")) avpll_flags |= BERLIN2_AVPLL_SCRAMBLE_QUIRK; /* audio/video VCOs */ ret = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL0, "avpll_vcoA", clk_names[REFCLK], avpll_flags, 0); if (ret) goto bg2_fail; for (n = 0; n < 8; n++) { ret = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL0, clk_names[AVPLL_A1 + n], n, "avpll_vcoA", avpll_flags, 0); if (ret) goto bg2_fail; } ret = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL31, "avpll_vcoB", clk_names[REFCLK], BERLIN2_AVPLL_BIT_QUIRK | avpll_flags, 0); if (ret) goto bg2_fail; for (n = 0; n < 8; n++) { ret = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL31, clk_names[AVPLL_B1 + n], n, "avpll_vcoB", BERLIN2_AVPLL_BIT_QUIRK | avpll_flags, 0); if (ret) goto bg2_fail; } /* reference clock bypass switches */ parent_names[0] = clk_names[SYSPLL]; parent_names[1] = clk_names[REFCLK]; hw = clk_hw_register_mux(NULL, "syspll_byp", parent_names, 2, 0, gbase + REG_CLKSWITCH0, 0, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; clk_names[SYSPLL] = clk_hw_get_name(hw); parent_names[0] = clk_names[MEMPLL]; parent_names[1] = clk_names[REFCLK]; hw = clk_hw_register_mux(NULL, "mempll_byp", parent_names, 2, 0, gbase + REG_CLKSWITCH0, 1, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; clk_names[MEMPLL] = clk_hw_get_name(hw); parent_names[0] = clk_names[CPUPLL]; parent_names[1] = clk_names[REFCLK]; hw = clk_hw_register_mux(NULL, "cpupll_byp", parent_names, 2, 0, gbase + REG_CLKSWITCH0, 2, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; clk_names[CPUPLL] = clk_hw_get_name(hw); /* clock muxes */ parent_names[0] = clk_names[AVPLL_B3]; parent_names[1] = clk_names[AVPLL_A3]; hw = clk_hw_register_mux(NULL, clk_names[AUDIO1_PLL], parent_names, 2, 0, gbase + REG_CLKSELECT2, 29, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; parent_names[0] = clk_names[VIDEO0_PLL]; parent_names[1] = clk_names[VIDEO_EXT0]; hw = clk_hw_register_mux(NULL, clk_names[VIDEO0_IN], parent_names, 2, 0, gbase + REG_CLKSELECT3, 4, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; parent_names[0] = clk_names[VIDEO1_PLL]; parent_names[1] = clk_names[VIDEO_EXT0]; hw = clk_hw_register_mux(NULL, clk_names[VIDEO1_IN], parent_names, 2, 0, gbase + REG_CLKSELECT3, 6, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; parent_names[0] = clk_names[AVPLL_A2]; parent_names[1] = clk_names[AVPLL_B2]; hw = clk_hw_register_mux(NULL, clk_names[VIDEO1_PLL], parent_names, 2, 0, gbase + REG_CLKSELECT3, 7, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; parent_names[0] = clk_names[VIDEO2_PLL]; parent_names[1] = clk_names[VIDEO_EXT0]; hw = clk_hw_register_mux(NULL, clk_names[VIDEO2_IN], parent_names, 2, 0, gbase + REG_CLKSELECT3, 9, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; parent_names[0] = clk_names[AVPLL_B1]; parent_names[1] = clk_names[AVPLL_A5]; hw = clk_hw_register_mux(NULL, clk_names[VIDEO2_PLL], parent_names, 2, 0, gbase + REG_CLKSELECT3, 10, 1, 0, &lock); if (IS_ERR(hw)) goto bg2_fail; /* clock divider cells */ for (n = 0; n < ARRAY_SIZE(bg2_divs); n++) { const struct berlin2_div_data *dd = &bg2_divs[n]; int k; for (k = 0; k < dd->num_parents; k++) parent_names[k] = clk_names[dd->parent_ids[k]]; hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase, dd->name, dd->div_flags, parent_names, dd->num_parents, dd->flags, &lock); } /* clock gate cells */ for (n = 0; n < ARRAY_SIZE(bg2_gates); n++) { const struct berlin2_gate_data *gd = &bg2_gates[n]; hws[CLKID_GETH0 + n] = clk_hw_register_gate(NULL, gd->name, gd->parent_name, gd->flags, gbase + REG_CLKENABLE, gd->bit_idx, 0, &lock); } /* twdclk is derived from cpu/3 */ hws[CLKID_TWD] = clk_hw_register_fixed_factor(NULL, "twd", "cpu", 0, 1, 3); /* check for errors on leaf clocks */ for (n = 0; n < MAX_CLKS; n++) { if (!IS_ERR(hws[n])) continue; pr_err("%pOF: Unable to register leaf clock %d\n", np, n); goto bg2_fail; } /* register clk-provider */ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); return; bg2_fail: iounmap(gbase); }
/* * Finalize the current TX message header * * Sets the message header to be at the proper location depending on * how many descriptors we have (check documentation at the file's * header for more info on that). * * Appends padding bytes to make sure the whole TX message (counting * from the 'relocated' message header) is aligned to * tx_block_size. We assume the _append() code has left enough space * in the FIFO for that. If there are no payloads, just pass, as it * won't be transferred. * * The amount of padding bytes depends on how many payloads are in the * TX message, as the "msg header and payload descriptors" will be * shifted up in the buffer. */ static void i2400m_tx_close(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; struct i2400m_msg_hdr *tx_msg_moved; size_t aligned_size, padding, hdr_size; void *pad_buf; unsigned num_pls; if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */ goto out; num_pls = le16_to_cpu(tx_msg->num_pls); /* We can get this situation when a new message was started * and there was no space to add payloads before hitting the tail (and taking padding into consideration). */ if (num_pls == 0) { tx_msg->size |= I2400M_TX_SKIP; goto out; } /* Relocate the message header * * Find the current header size, align it to 16 and if we need * to move it so the tail is next to the payloads, move it and * set the offset. * * If it moved, this header is good only for transmission; the * original one (it is kept if we moved) is still used to * figure out where the next TX message starts (and where the * offset to the moved header is). */ hdr_size = struct_size(tx_msg, pld, le16_to_cpu(tx_msg->num_pls)); hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN); tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; tx_msg_moved = (void *) tx_msg + tx_msg->offset; memmove(tx_msg_moved, tx_msg, hdr_size); tx_msg_moved->size -= tx_msg->offset; /* * Now figure out how much we have to add to the (moved!) * message so the size is a multiple of i2400m->bus_tx_block_size. */ aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); padding = aligned_size - tx_msg_moved->size; if (padding > 0) { pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0); if (unlikely(WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL))) { /* This should not happen -- append should verify * there is always space left at least to append * tx_block_size */ dev_err(dev, "SW BUG! Possible data leakage from memory the " "device should not read for padding - " "size %lu aligned_size %zu tx_buf %p in " "%zu out %zu\n", (unsigned long) tx_msg_moved->size, aligned_size, i2400m->tx_buf, i2400m->tx_in, i2400m->tx_out); } else memset(pad_buf, 0xad, padding); } tx_msg_moved->padding = cpu_to_le16(padding); tx_msg_moved->size += padding; if (tx_msg != tx_msg_moved) tx_msg->size += padding; out: i2400m->tx_msg = NULL; }
NL80211_PMSR_ATTR_PEERS); if (!peers) return -EINVAL; count = 0; nla_for_each_nested(peer, peers, rem) { count++; if (count > rdev->wiphy.pmsr_capa->max_peers) { NL_SET_ERR_MSG_ATTR(info->extack, peer, "Too many peers used"); return -EINVAL; } } req = kzalloc(struct_size(req, peers, count), GFP_KERNEL); if (!req) return -ENOMEM; if (info->attrs[NL80211_ATTR_TIMEOUT]) req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]); if (info->attrs[NL80211_ATTR_MAC]) { if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) { NL_SET_ERR_MSG_ATTR(info->extack, info->attrs[NL80211_ATTR_MAC], "device cannot randomize MAC address"); err = -EINVAL; goto out_err; }
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) { int ret; struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, .ops = &scmi_clk_ops, .name = sclk->info->name, }; sclk->hw.init = &init; ret = devm_clk_hw_register(dev, &sclk->hw); if (!ret) clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate, sclk->info->range.max_rate); return ret; } static int scmi_clocks_probe(struct scmi_device *sdev) { int idx, count, err; struct clk_hw **hws; struct clk_hw_onecell_data *clk_data; struct device *dev = &sdev->dev; struct device_node *np = dev->of_node; const struct scmi_handle *handle = sdev->handle; if (!handle || !handle->clk_ops) return -ENODEV; count = handle->clk_ops->count_get(handle); if (count < 0) { dev_err(dev, "%s: invalid clock output count\n", np->name); return -EINVAL; } clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count), GFP_KERNEL); if (!clk_data) return -ENOMEM; clk_data->num = count; hws = clk_data->hws; for (idx = 0; idx < count; idx++) { struct scmi_clk *sclk; sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); if (!sclk) return -ENOMEM; sclk->info = handle->clk_ops->info_get(handle, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); continue; } sclk->id = idx; sclk->handle = handle; err = scmi_clk_ops_init(dev, sclk); if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk); hws[idx] = NULL; } else { dev_dbg(dev, "Registered clock:%s\n", sclk->info->name); hws[idx] = &sclk->hw; } } return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data); }
int main(int argc, char *argv[]) { #ifndef GEN_HASKELL printf("/* This file is created automatically. Do not edit by hand.*/\n\n"); printf("#define STD_HDR_SIZE %" FMT_SizeT "\n", (size_t)sizeofW(StgHeader) - sizeofW(StgProfHeader)); /* grrr.. PROFILING is on so we need to subtract sizeofW(StgProfHeader) */ printf("#define PROF_HDR_SIZE %" FMT_SizeT "\n", (size_t)sizeofW(StgProfHeader)); printf("#define BLOCK_SIZE %u\n", BLOCK_SIZE); printf("#define MBLOCK_SIZE %u\n", MBLOCK_SIZE); printf("#define BLOCKS_PER_MBLOCK %" FMT_SizeT "\n", (lnat)BLOCKS_PER_MBLOCK); // could be derived, but better to save doing the calculation twice printf("\n\n"); #endif field_offset(StgRegTable, rR1); field_offset(StgRegTable, rR2); field_offset(StgRegTable, rR3); field_offset(StgRegTable, rR4); field_offset(StgRegTable, rR5); field_offset(StgRegTable, rR6); field_offset(StgRegTable, rR7); field_offset(StgRegTable, rR8); field_offset(StgRegTable, rR9); field_offset(StgRegTable, rR10); field_offset(StgRegTable, rF1); field_offset(StgRegTable, rF2); field_offset(StgRegTable, rF3); field_offset(StgRegTable, rF4); field_offset(StgRegTable, rD1); field_offset(StgRegTable, rD2); field_offset(StgRegTable, rL1); field_offset(StgRegTable, rSp); field_offset(StgRegTable, rSpLim); field_offset(StgRegTable, rHp); field_offset(StgRegTable, rHpLim); field_offset(StgRegTable, rCCCS); field_offset(StgRegTable, rCurrentTSO); field_offset(StgRegTable, rCurrentNursery); field_offset(StgRegTable, rHpAlloc); struct_field(StgRegTable, rRet); struct_field(StgRegTable, rNursery); def_offset("stgEagerBlackholeInfo", FUN_OFFSET(stgEagerBlackholeInfo)); def_offset("stgGCEnter1", FUN_OFFSET(stgGCEnter1)); def_offset("stgGCFun", FUN_OFFSET(stgGCFun)); field_offset(Capability, r); field_offset(Capability, lock); struct_field(Capability, no); struct_field(Capability, mut_lists); struct_field(Capability, context_switch); struct_field(Capability, interrupt); struct_field(Capability, sparks); struct_field(bdescr, start); struct_field(bdescr, free); struct_field(bdescr, blocks); struct_field(bdescr, gen_no); struct_field(bdescr, link); struct_size(generation); struct_field(generation, n_new_large_words); struct_size(CostCentreStack); struct_field(CostCentreStack, ccsID); struct_field(CostCentreStack, mem_alloc); struct_field(CostCentreStack, scc_count); struct_field(CostCentreStack, prevStack); struct_field(CostCentre, ccID); struct_field(CostCentre, link); struct_field(StgHeader, info); struct_field_("StgHeader_ccs", StgHeader, prof.ccs); struct_field_("StgHeader_ldvw", StgHeader, prof.hp.ldvw); struct_size(StgSMPThunkHeader); closure_payload(StgClosure,payload); struct_field(StgEntCounter, allocs); struct_field(StgEntCounter, registeredp); struct_field(StgEntCounter, link); struct_field(StgEntCounter, entry_count); closure_size(StgUpdateFrame); closure_size(StgCatchFrame); closure_size(StgStopFrame); closure_size(StgMutArrPtrs); closure_field(StgMutArrPtrs, ptrs); closure_field(StgMutArrPtrs, size); closure_size(StgArrWords); closure_field(StgArrWords, bytes); closure_payload(StgArrWords, payload); closure_field(StgTSO, _link); closure_field(StgTSO, global_link); closure_field(StgTSO, what_next); closure_field(StgTSO, why_blocked); closure_field(StgTSO, block_info); closure_field(StgTSO, blocked_exceptions); closure_field(StgTSO, id); closure_field(StgTSO, cap); closure_field(StgTSO, saved_errno); closure_field(StgTSO, trec); closure_field(StgTSO, flags); closure_field(StgTSO, dirty); closure_field(StgTSO, bq); closure_field_("StgTSO_cccs", StgTSO, prof.cccs); closure_field(StgTSO, stackobj); closure_field(StgStack, sp); closure_field_offset(StgStack, stack); closure_field(StgStack, stack_size); closure_field(StgStack, dirty); struct_size(StgTSOProfInfo); opt_struct_size(StgTSOProfInfo,PROFILING); closure_field(StgUpdateFrame, updatee); closure_field(StgCatchFrame, handler); closure_field(StgCatchFrame, exceptions_blocked); closure_size(StgPAP); closure_field(StgPAP, n_args); closure_field_gcptr(StgPAP, fun); closure_field(StgPAP, arity); closure_payload(StgPAP, payload); thunk_size(StgAP); closure_field(StgAP, n_args); closure_field_gcptr(StgAP, fun); closure_payload(StgAP, payload); thunk_size(StgAP_STACK); closure_field(StgAP_STACK, size); closure_field_gcptr(StgAP_STACK, fun); closure_payload(StgAP_STACK, payload); thunk_size(StgSelector); closure_field_gcptr(StgInd, indirectee); closure_size(StgMutVar); closure_field(StgMutVar, var); closure_size(StgAtomicallyFrame); closure_field(StgAtomicallyFrame, code); closure_field(StgAtomicallyFrame, next_invariant_to_check); closure_field(StgAtomicallyFrame, result); closure_field(StgInvariantCheckQueue, invariant); closure_field(StgInvariantCheckQueue, my_execution); closure_field(StgInvariantCheckQueue, next_queue_entry); closure_field(StgAtomicInvariant, code); closure_field(StgTRecHeader, enclosing_trec); closure_size(StgCatchSTMFrame); closure_field(StgCatchSTMFrame, handler); closure_field(StgCatchSTMFrame, code); closure_size(StgCatchRetryFrame); closure_field(StgCatchRetryFrame, running_alt_code); closure_field(StgCatchRetryFrame, first_code); closure_field(StgCatchRetryFrame, alt_code); closure_field(StgTVarWatchQueue, closure); closure_field(StgTVarWatchQueue, next_queue_entry); closure_field(StgTVarWatchQueue, prev_queue_entry); closure_field(StgTVar, current_value); closure_size(StgWeak); closure_field(StgWeak,link); closure_field(StgWeak,key); closure_field(StgWeak,value); closure_field(StgWeak,finalizer); closure_field(StgWeak,cfinalizer); closure_size(StgDeadWeak); closure_field(StgDeadWeak,link); closure_size(StgMVar); closure_field(StgMVar,head); closure_field(StgMVar,tail); closure_field(StgMVar,value); closure_size(StgMVarTSOQueue); closure_field(StgMVarTSOQueue, link); closure_field(StgMVarTSOQueue, tso); closure_size(StgBCO); closure_field(StgBCO, instrs); closure_field(StgBCO, literals); closure_field(StgBCO, ptrs); closure_field(StgBCO, arity); closure_field(StgBCO, size); closure_payload(StgBCO, bitmap); closure_size(StgStableName); closure_field(StgStableName,sn); closure_size(StgBlockingQueue); closure_field(StgBlockingQueue, bh); closure_field(StgBlockingQueue, owner); closure_field(StgBlockingQueue, queue); closure_field(StgBlockingQueue, link); closure_size(MessageBlackHole); closure_field(MessageBlackHole, link); closure_field(MessageBlackHole, tso); closure_field(MessageBlackHole, bh); struct_field_("RtsFlags_ProfFlags_showCCSOnException", RTS_FLAGS, ProfFlags.showCCSOnException); struct_field_("RtsFlags_DebugFlags_apply", RTS_FLAGS, DebugFlags.apply); struct_field_("RtsFlags_DebugFlags_sanity", RTS_FLAGS, DebugFlags.sanity); struct_field_("RtsFlags_DebugFlags_weak", RTS_FLAGS, DebugFlags.weak); struct_field_("RtsFlags_GcFlags_initialStkSize", RTS_FLAGS, GcFlags.initialStkSize); struct_field_("RtsFlags_MiscFlags_tickInterval", RTS_FLAGS, MiscFlags.tickInterval); struct_size(StgFunInfoExtraFwd); struct_field(StgFunInfoExtraFwd, slow_apply); struct_field(StgFunInfoExtraFwd, fun_type); struct_field(StgFunInfoExtraFwd, arity); struct_field_("StgFunInfoExtraFwd_bitmap", StgFunInfoExtraFwd, b.bitmap); struct_size(StgFunInfoExtraRev); struct_field(StgFunInfoExtraRev, slow_apply_offset); struct_field(StgFunInfoExtraRev, fun_type); struct_field(StgFunInfoExtraRev, arity); struct_field_("StgFunInfoExtraRev_bitmap", StgFunInfoExtraRev, b.bitmap); struct_field(StgLargeBitmap, size); field_offset(StgLargeBitmap, bitmap); struct_size(snEntry); struct_field(snEntry,sn_obj); struct_field(snEntry,addr); #ifdef mingw32_HOST_OS struct_size(StgAsyncIOResult); struct_field(StgAsyncIOResult, reqID); struct_field(StgAsyncIOResult, len); struct_field(StgAsyncIOResult, errCode); #endif return 0; }
/* * Build a server list from a VLDB record. */ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, struct key *key, struct afs_vldb_entry *vldb, u8 type_mask) { struct afs_server_list *slist; struct afs_server *server; int ret = -ENOMEM, nr_servers = 0, i, j; for (i = 0; i < vldb->nr_servers; i++) if (vldb->fs_mask[i] & type_mask) nr_servers++; slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); if (!slist) goto error; refcount_set(&slist->usage, 1); rwlock_init(&slist->lock); /* Make sure a records exists for each server in the list. */ for (i = 0; i < vldb->nr_servers; i++) { if (!(vldb->fs_mask[i] & type_mask)) continue; server = afs_lookup_server(cell, key, &vldb->fs_server[i]); if (IS_ERR(server)) { ret = PTR_ERR(server); if (ret == -ENOENT || ret == -ENOMEDIUM) continue; goto error_2; } /* Insertion-sort by UUID */ for (j = 0; j < slist->nr_servers; j++) if (memcmp(&slist->servers[j].server->uuid, &server->uuid, sizeof(server->uuid)) >= 0) break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { afs_put_server(cell->net, server); continue; } memmove(slist->servers + j + 1, slist->servers + j, (slist->nr_servers - j) * sizeof(struct afs_server_entry)); } slist->servers[j].server = server; slist->nr_servers++; } if (slist->nr_servers == 0) { ret = -EDESTADDRREQ; goto error_2; } return slist; error_2: afs_put_serverlist(cell->net, slist); error: return ERR_PTR(ret); }
static int sun8i_tcon_top_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct clk_hw_onecell_data *clk_data; struct sun8i_tcon_top *tcon_top; const struct sun8i_tcon_top_quirks *quirks; struct resource *res; void __iomem *regs; int ret, i; quirks = of_device_get_match_data(&pdev->dev); tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL); if (!tcon_top) return -ENOMEM; clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_NUM), GFP_KERNEL); if (!clk_data) return -ENOMEM; tcon_top->clk_data = clk_data; spin_lock_init(&tcon_top->reg_lock); tcon_top->rst = devm_reset_control_get(dev, NULL); if (IS_ERR(tcon_top->rst)) { dev_err(dev, "Couldn't get our reset line\n"); return PTR_ERR(tcon_top->rst); } tcon_top->bus = devm_clk_get(dev, "bus"); if (IS_ERR(tcon_top->bus)) { dev_err(dev, "Couldn't get the bus clock\n"); return PTR_ERR(tcon_top->bus); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); tcon_top->regs = regs; if (IS_ERR(regs)) return PTR_ERR(regs); ret = reset_control_deassert(tcon_top->rst); if (ret) { dev_err(dev, "Could not deassert ctrl reset control\n"); return ret; } ret = clk_prepare_enable(tcon_top->bus); if (ret) { dev_err(dev, "Could not enable bus clock\n"); goto err_assert_reset; } /* * At least on H6, some registers have some bits set by default * which may cause issues. Clear them here. */ writel(0, regs + TCON_TOP_PORT_SEL_REG); writel(0, regs + TCON_TOP_GATE_SRC_REG); /* * TCON TOP has two muxes, which select parent clock for each TCON TV * channel clock. Parent could be either TCON TV or TVE clock. For now * we leave this fixed to TCON TV, since TVE driver for R40 is not yet * implemented. Once it is, graph needs to be traversed to determine * if TVE is active on each TCON TV. If it is, mux should be switched * to TVE clock parent. */ clk_data->hws[CLK_TCON_TOP_TV0] = sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs, &tcon_top->reg_lock, TCON_TOP_TCON_TV0_GATE, 0); if (quirks->has_tcon_tv1) clk_data->hws[CLK_TCON_TOP_TV1] = sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs, &tcon_top->reg_lock, TCON_TOP_TCON_TV1_GATE, 1); if (quirks->has_dsi) clk_data->hws[CLK_TCON_TOP_DSI] = sun8i_tcon_top_register_gate(dev, "dsi", regs, &tcon_top->reg_lock, TCON_TOP_TCON_DSI_GATE, 2); for (i = 0; i < CLK_NUM; i++) if (IS_ERR(clk_data->hws[i])) { ret = PTR_ERR(clk_data->hws[i]); goto err_unregister_gates; } clk_data->num = CLK_NUM; ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, clk_data); if (ret) goto err_unregister_gates; dev_set_drvdata(dev, tcon_top); return 0; err_unregister_gates: for (i = 0; i < CLK_NUM; i++) if (!IS_ERR_OR_NULL(clk_data->hws[i])) clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); err_assert_reset: reset_control_assert(tcon_top->rst); return ret; }
/*-------------------------------------------------------------------------*/ svalue_t * x_map_struct (svalue_t *sp, int num_arg) /* EFUN map() on structs * * mixed * map(struct arg, string func, string|object ob, mixed extra...) * mixed * map(struct arg, closure cl, mixed extra...) * mixed * map(struct arr, mapping map [, int col]) * * Map the elements of <arr> through a filter defined by the other * arguments, and return an array of the elements returned by the filter. * * The filter can be a function call: * * <obj>-><fun>(elem, <extra>...) * * or a mapping query: * * <map>[elem[,idx]] * * In the mapping case, if <map>[elem[,idx]] does not exist, the original * value is returned in the result. * [Note: argument type and range checking for idx is done in v_map()] * * <obj> can both be an object reference or a filename. If <ob> is * omitted, or neither an object nor a string, then this_object() is used. * * As a bonus, all references to destructed objects in <arr> are replaced * by proper 0es. */ { struct_t *st; struct_t *res; svalue_t *arg; svalue_t *v, *w, *x; mp_int cnt; inter_sp = sp; arg = sp - num_arg + 1; st = arg->u.strct; cnt = (mp_int)struct_size(st); if (arg[1].type == T_MAPPING) { /* --- Map through mapping --- */ mapping_t *m; p_int column = 0; /* mapping column to use */ m = arg[1].u.map; if (num_arg > 2) column = arg[2].u.number; res = struct_new(st->type); if (!res) errorf("(map_struct) Out of memory: struct[%"PRIdMPINT"] for result\n", cnt); push_struct(inter_sp, res); /* In case of errors */ for (w = st->member, x = res->member; --cnt >= 0; w++, x++) { if (destructed_object_ref(w)) assign_svalue(w, &const0); v = get_map_value(m, w); if (v == &const0) assign_svalue_no_free(x, w); else assign_svalue_no_free(x, v + column); } if (num_arg > 2) free_svalue(arg+2); free_svalue(arg+1); /* the mapping */ sp = arg; } else { /* --- Map through function call --- */ callback_t cb; int error_index; error_index = setup_efun_callback(&cb, arg+1, num_arg-1); if (error_index >= 0) { vefun_bad_arg(error_index+2, arg); /* NOTREACHED */ return arg; } inter_sp = sp = arg+1; put_callback(sp, &cb); num_arg = 2; res = struct_new(st->type); if (!res) errorf("(map_struct) Out of memory: struct[%"PRIdMPINT"] for result\n", cnt); push_struct(inter_sp, res); /* In case of errors */ /* Loop through arr and res, mapping the values from arr */ for (w = st->member, x = res->member; --cnt >= 0; w++, x++) { if (current_object->flags & O_DESTRUCTED) continue; if (destructed_object_ref(w)) assign_svalue(w, &const0); if (!callback_object(&cb)) errorf("object used by map_array destructed"); push_svalue(w); v = apply_callback(&cb, 1); if (v) { transfer_svalue_no_free(x, v); v->type = T_INVALID; } } free_callback(&cb); } /* The arguments have been removed already, now just replace * the struct on the stack with the result. */ free_struct(st); arg->u.strct = res; /* Keep svalue type T_STRUCT */ return arg; } /* x_map_struct () */
static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card) { struct device *dev = card->dev; struct snd_soc_dai_link *link; struct device_node *np, *codec, *cpu, *node = dev->of_node; struct apq8016_sbc_data *data; int ret, num_links; ret = snd_soc_of_parse_card_name(card, "qcom,model"); if (ret) { dev_err(dev, "Error parsing card name: %d\n", ret); return ERR_PTR(ret); } /* DAPM routes */ if (of_property_read_bool(node, "qcom,audio-routing")) { ret = snd_soc_of_parse_audio_routing(card, "qcom,audio-routing"); if (ret) return ERR_PTR(ret); } /* Populate links */ num_links = of_get_child_count(node); /* Allocate the private data and the DAI link array */ data = devm_kzalloc(dev, struct_size(data, dai_link, num_links), GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); card->dai_link = &data->dai_link[0]; card->num_links = num_links; link = data->dai_link; for_each_child_of_node(node, np) { cpu = of_get_child_by_name(np, "cpu"); codec = of_get_child_by_name(np, "codec"); if (!cpu || !codec) { dev_err(dev, "Can't find cpu/codec DT node\n"); return ERR_PTR(-EINVAL); } link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0); if (!link->cpu_of_node) { dev_err(card->dev, "error getting cpu phandle\n"); return ERR_PTR(-EINVAL); } ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name); if (ret) { dev_err(card->dev, "error getting cpu dai name\n"); return ERR_PTR(ret); } ret = snd_soc_of_get_dai_link_codecs(dev, codec, link); if (ret < 0) { dev_err(card->dev, "error getting codec dai name\n"); return ERR_PTR(ret); } link->platform_of_node = link->cpu_of_node; ret = of_property_read_string(np, "link-name", &link->name); if (ret) { dev_err(card->dev, "error getting codec dai_link name\n"); return ERR_PTR(ret); } link->stream_name = link->name; link->init = apq8016_sbc_dai_init; link++; }
/*-------------------------------------------------------------------------*/ static size_t svalue_size (svalue_t *v, mp_int * pTotal) /* Compute the memory usage of *<v> (modified to reflect data sharing), * calling svalue_size() recursively if necessary, and return it. * The size of *v itself is not included. * *<pUnshared> and *<pShared> are set to the total unshared and shared * datasize. */ { mp_int i, composite, total, overhead; assert_stack_gap(); *pTotal = 0; total = overhead = composite = 0; switch(v->type) { case T_OBJECT: case T_NUMBER: case T_FLOAT: return 0; case T_STRING: case T_SYMBOL: // If ref==0 the string is probably shared a lot, but we can't estimate // the correct number, so we return 0 as memory consumption for the // string. if (v->u.str->info.ref) { *pTotal = mstr_mem_size(v->u.str); return *pTotal / v->u.str->info.ref; } else return 0; case T_MAPPING: { struct svalue_size_locals locals; if (NULL == register_pointer(ptable, v->u.map) ) return 0; if (v->u.map->ref) { overhead = (mp_uint)mapping_overhead(v->u.map); locals.total = 0; locals.composite = 0; locals.num_values = v->u.map->num_values; walk_mapping(v->u.map, svalue_size_map_filter, &locals); *pTotal = locals.total + overhead; return (overhead + locals.composite) / v->u.map->ref; } else return 0; } case T_POINTER: case T_QUOTED_ARRAY: { if (v->u.vec == &null_vector) return 0; if (NULL == register_pointer(ptable, v->u.vec) ) return 0; if (v->u.vec->ref) { overhead = sizeof *v->u.vec - sizeof v->u.vec->item + sizeof(svalue_t) * v->u.vec->size + sizeof(char *); for (i=0; i < (mp_int)VEC_SIZE(v->u.vec); i++) { composite += svalue_size(&v->u.vec->item[i], &total); *pTotal += total; } *pTotal += overhead; return (overhead + composite) / v->u.vec->ref; } else return 0; } case T_STRUCT: { struct_t *st = v->u.strct; if (NULL == register_pointer(ptable, st) ) return 0; if (st->ref) { overhead = sizeof *st - sizeof st->member + sizeof(svalue_t) * struct_size(st); for (i=0; i < (mp_int)struct_size(st); i++) { composite += svalue_size(&st->member[i], &total); *pTotal += total; } *pTotal += overhead; return (overhead + composite) / st->ref; } else return 0; } case T_CLOSURE: { int num_values; svalue_t *svp; lambda_t *l; if (!CLOSURE_MALLOCED(v->x.closure_type)) return 0; if (!CLOSURE_REFERENCES_CODE(v->x.closure_type)) { if (v->x.closure_type == CLOSURE_LFUN) composite = SIZEOF_LAMBDA(v->u.lambda->function.lfun.context_size); else /* CLOSURE_IDENTIFIER || CLOSURE_PRELIMINARY */ composite = sizeof *v->u.lambda; composite += sizeof(char *); *pTotal = composite; return composite / v->u.lambda->ref; } /* CLOSURE_LAMBDA */ composite = overhead = 0; l = v->u.lambda; if (v->x.closure_type == CLOSURE_BOUND_LAMBDA) { total = sizeof *l - sizeof l->function + sizeof l->function.lambda; *pTotal += total; composite += total / l->ref; l = l->function.lambda; } num_values = l->function.code.num_values; svp = (svalue_t *)l - num_values; if (NULL == register_pointer(ptable, svp)) return 0; overhead = sizeof(svalue_t) * num_values + sizeof (char *); { bytecode_p p = l->function.code.program; do { switch(GET_CODE(p++)) { case F_RETURN: case F_RETURN0: break; default: continue; } break; } while (1); overhead += (p - (bytecode_p)l + (sizeof(bytecode_p) - 1)) & ~(sizeof(bytecode_p) - 1); } while (--num_values >= 0) { composite += svalue_size(svp++, &total); *pTotal += total; } *pTotal += overhead; if (l->ref) return (overhead + composite) / l->ref; else return 0; } default: fatal("Illegal type: %d\n", v->type); } /*NOTREACHED*/ return 0; }
void * dispatch_read_policy(void * policy_node) { void * match_policy_node; void * route_policy_node; void * match_rule_node; void * route_rule_node; void * temp_node; char buffer[1024]; char * temp_str; int ret; MATCH_RULE * temp_match_rule; ROUTE_RULE * temp_route_rule; DISPATCH_POLICY * policy=dispatch_policy_create(); if(policy==NULL) return -EINVAL; temp_node=json_find_elem("sender",policy_node); if(temp_node!=NULL) { ret=json_node_getvalue(temp_node,buffer,1024); if(ret<0) return ret; if(ret>DIGEST_SIZE) return -EINVAL; Memcpy(policy->sender,buffer,ret); } // get the match policy json node match_policy_node=json_find_elem("MATCH_RULES",policy_node); if(match_policy_node==NULL) return -EINVAL; // get the match policy json node route_policy_node=json_find_elem("ROUTE_RULES",policy_node); if(route_policy_node==NULL) return -EINVAL; // read the match rule match_rule_node=json_get_first_child(match_policy_node); while(match_rule_node!=NULL) { void * record_template; ret=Galloc0(&temp_match_rule,sizeof(MATCH_RULE)); if(ret<0) return NULL; ret=json_2_struct(match_rule_node,temp_match_rule,match_rule_template); if(ret<0) return -EINVAL; temp_node=json_find_elem("value",match_rule_node); if(temp_node!=NULL) { void * record_desc; void * value_struct; record_template=memdb_get_template(temp_match_rule->type,temp_match_rule->subtype); if(record_template==NULL) return NULL; record_template=clone_struct_template(record_template); if(record_template==NULL) return NULL; temp_match_rule->match_template=record_template; ret=json_marked_struct(temp_node,record_template,match_flag); if(ret<0) return NULL; ret=Galloc0(&value_struct,struct_size(record_template)); if(ret<0) { free_struct_template(record_template); return NULL; } ret=json_2_part_struct(temp_node,value_struct,temp_match_rule->match_template,match_flag); if(ret<0) return NULL; temp_match_rule->value=value_struct; ret=dispatch_policy_addmatchrule(policy,temp_match_rule); if(ret<0) return NULL; } // __route_policy_add(policy->match_list,temp_match_rule); match_rule_node=json_get_next_child(match_policy_node); } // read the route policy // first,read the main route policy route_rule_node=json_get_first_child(route_policy_node); while(route_rule_node!=NULL) { ret=Galloc0(&temp_route_rule,sizeof(ROUTE_RULE)); if(ret<0) return NULL; temp_route_rule=malloc(sizeof(ROUTE_RULE)); ret=json_2_struct(route_rule_node,temp_route_rule,route_rule_template); if(ret<0) return -EINVAL; ret=dispatch_policy_addrouterule(policy,temp_route_rule); if(ret<0) return NULL; route_rule_node=json_get_next_child(route_policy_node); } return policy; }
static void ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; int i; int ret; if (!rdma_is_port_valid(device, port)) return; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret); goto err; } if (!rdma_protocol_roce(device, port)) { ret = config_non_roce_gid_cache(device, port, tprops->gid_tbl_len); if (ret) goto err; } pkey_cache = kmalloc(struct_size(pkey_cache, table, tprops->pkey_tbl_len), GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { dev_warn(&device->dev, "ib_query_pkey failed (%d) for index %d\n", ret, i); goto err; } } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.ports[port - rdma_start_port(device)].pkey; device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache; device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc; device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; device->cache.ports[port - rdma_start_port(device)].subnet_prefix = tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); if (enforce_security) ib_security_cache_change(device, port, tprops->subnet_prefix); kfree(old_pkey_cache); kfree(tprops); return; err: kfree(pkey_cache); kfree(tprops); }
int json_2_message(char * json_str,void ** message) { void * root_node; void * head_node; void * tag_node; void * record_node; void * curr_record; void * expand_node; void * curr_expand; void * record_value; void * expand_value; struct message_box * msg_box; MSG_HEAD * msg_head; MSG_EXPAND * msg_expand; int record_no; int expand_no; void * precord; void * pexpand; int i; int ret; char buffer[DIGEST_SIZE]; int offset; int type; int subtype; offset=json_solve_str(&root_node,json_str); if(offset<0) return offset; // get json node's head head_node=json_find_elem("HEAD",root_node); if(head_node==NULL) head_node=json_find_elem("head",root_node); if(head_node==NULL) return -EINVAL; tag_node=json_find_elem("tag",head_node); if(tag_node!=NULL) // default tag value is "MESG" { ret=json_node_getvalue(tag_node,buffer,10); if(ret!=4) return -EINVAL; if(Memcmp(buffer,"MESG",ret)!=0) return -EINVAL; } msg_box=message_init(); msg_head=message_get_head(msg_box); json_2_struct(head_node,msg_head,msg_box->head_template); // get json node's record // init message box ret=message_record_init(msg_box); if(ret<0) return ret; record_node=json_find_elem("RECORD",root_node); if(record_node==NULL) record_node=json_find_elem("record",root_node); if(record_node==NULL) return -EINVAL; curr_record=json_get_first_child(record_node); if(curr_record==NULL) return -EINVAL; char node_name[DIGEST_SIZE*2]; ret=json_node_getname(curr_record,node_name); if(!strcmp(node_name,"BIN_FORMAT")) { BYTE * radix64_string; radix64_string=malloc(4096); if(radix64_string==NULL) return -ENOMEM; ret=json_node_getvalue(curr_record,radix64_string,4096); if(ret<0) return -EINVAL; int radix64_len=strnlen(radix64_string,4096); msg_head->record_size=radix_to_bin_len(radix64_len); msg_box->blob=malloc(msg_head->record_size); if(msg_box->blob==NULL) return -ENOMEM; ret=radix64_to_bin(msg_box->blob,radix64_len,radix64_string); } else { for(i=0;i<msg_head->record_num;i++) { if(curr_record==NULL) return -EINVAL; ret=Galloc0(&precord,struct_size(msg_box->record_template)); if(ret<=0) return -EINVAL; json_2_struct(curr_record,precord,msg_box->record_template); message_add_record(msg_box,precord); curr_record=json_get_next_child(record_node); } } // get json_node's expand expand_no=msg_head->expand_num; msg_head->expand_num=0; expand_node=json_find_elem("EXPAND",root_node); if(expand_node==NULL) expand_node=json_find_elem("expand",root_node); if(expand_node!=NULL) { char buf[20]; void * curr_expand_template; curr_expand=json_get_first_child(expand_node); for(i=0;i<expand_no;i++) { if(curr_expand==NULL) return -EINVAL; ret=Galloc0(&msg_expand,struct_size(message_get_expand_template())); if(ret<0) return -ENOMEM; ret=json_2_struct(curr_expand,&msg_expand,message_get_expand_template()); if(ret<0) return ret; void * tempnode; if((tempnode=json_find_elem(curr_expand,"BIN_DATA"))==NULL) { curr_expand_template=memdb_get_template(msg_expand->type,msg_expand->subtype); if(curr_expand_template==NULL) return -EINVAL; struct_free(msg_expand,message_get_expand_template()); ret=Galloc(&msg_expand,struct_size(curr_expand_template)); if(ret<0) return -ENOMEM; ret=json_2_struct(curr_expand,msg_expand,curr_expand_template); if(ret<0) return ret; } message_add_expand(msg_box,msg_expand); curr_expand=json_get_next_child(expand_node); } } *message=msg_box; msg_box->box_state = MSG_BOX_RECOVER; return offset; }