const void *fw_fetch(u32 layer, u32 *sid, u32 *fw_iter) { const toc_e *e = dex_section[FW].toc; u32 ssid = *sid; u32 max = dex_header.fw_layers[layer + 1].idx; u32 i = dex_header.fw_layers[layer].idx + (fw_iter ? *fw_iter: 0); for (;i < max; i++){ if (e[i].val == ssid){ if (fw_iter) *fw_iter = i - dex_header.fw_layers[layer].idx; if (i + 1 < max){ *sid = e[i + 1].val; }else *sid = -1; return fetch_item(FW, i); }else if (e[i].val > ssid){ if (fw_iter) *fw_iter = i - dex_header.fw_layers[layer].idx; *sid = e[i].val; return NULL; } } *sid = -1; *fw_iter = i - dex_header.fw_layers[layer].idx; return NULL; }
const void *fw_find(u32 layer, u32 sid) { int first = dex_header.fw_layers[layer].idx; /* empty layer */ if (first >= dex_section[FW].nof_entries) return NULL; const toc_e *start = &dex_section[FW].toc[first]; int num = dex_header.fw_layers[layer + 1].idx - dex_header.fw_layers[layer].idx; const toc_e *e = toc_search(sid, start, num); if (e) return fetch_item(FW, e - start + first); else return NULL; }
static int encode_layer(const Pvoid_t ix) { uint sid, no = 0; growing_glist *gg; GGLIST_INIT(gg, 100); for (sid = 0; sid < dex_section[FW].nof_entries; sid++){ const void *d = fetch_item(FW, sid); u32 offs = 0; u32 xid, tst; gg->lst.len = 0; DEX_FOREACH_VAL(d, &offs, xid) J1T(tst, ix, xid); if (tst){ GGLIST_APPEND(gg, xid); } DEX_FOREACH_VAL_END if (gg->lst.len){ toc_e toc; toc.val = sid; //dex_section[FW].toc[i].val; toc.offs = ftello64(data_f); fwrite(&toc, sizeof(toc_e), 1, toc_f); int i; for (i = 0; i < gg->lst.len; i++){ if (!gg->lst.lst[i]) dub_die("FOUND"); } encode_segment(&gg->lst); ++no; } } free(gg); dub_msg("%u / %u sids matched", no, dex_section[FW].nof_entries); return no; }
int inva_find(u32 xid, const inva_e **data) { const toc_e *e = toc_search(xid, dex_section[INVA].toc, dex_section[INVA].nof_entries); /* toc_e *e = bsearch(&cmp, dex_section[INVA].toc, dex_section[INVA].nof_entries, sizeof(toc_e), inva_cmp); */ if (!e){ if (data) *data = NULL; return -1; } int idx = e - dex_section[INVA].toc; if (data) *data = (const inva_e*)fetch_item(INVA, idx); return idx; }
const u32 *decode_segment(u32 sid, uint segment_size) { static u32 *doc; if (!doc) doc = xmalloc(segment_size * 4); memset(doc, 0, segment_size * 4); const void *xidlst = fetch_item(POS, sid); const void *poslst = &xidlst[dex_section[POS].toc[sid].val]; u32 xid, poffs; u32 offs = xid = poffs = 0; u32 tlen = elias_gamma_read(xidlst, &offs) - 1; if (!tlen) return NULL; u32 rice_f1 = rice_read(xidlst, &offs, 2); u32 rice_f2 = rice_read(xidlst, &offs, 2); while (tlen--){ xid += rice_read(xidlst, &offs, rice_f1); poffs += rice_read(xidlst, &offs, rice_f2); u32 pos; u32 tmp = poffs; DEX_FOREACH_VAL(poslst, &tmp, pos) /* if (pos - 1 >= segment_size) dub_die("pos %u larger than segment size %u", pos - 1, segment_size); */ doc[pos - 1] = xid; DEX_FOREACH_VAL_END } return doc; }
static Pvoid_t ixemes_freq_range(int p, int min_f, int max_f) { Pvoid_t ix = NULL; Word_t xid; int i, tst; for (i = 0; i < dex_section[INVA].nof_entries; i++){ xid = dex_section[INVA].toc[i].val; if (xid >= XID_META_FREQUENT_F && xid <= XID_META_FREQUENT_L) continue; if (xid <= XID_TOKEN_FREQUENT_L && xid >= XID_TOKEN_FREQUENT_F) continue; const inva_e *e = (const inva_e*)fetch_item(INVA, i); if (e->len > min_f && e->len <= max_f){ J1S(tst, ix, xid); } } xid = 0; J1F(tst, ix, xid); fw_layers[p].min_xid = xid; xid = -1; J1L(tst, ix, xid); fw_layers[p].max_xid = xid; J1C(tst, ix, 0, -1); dub_msg("Layer %u: number of xids %u min %u max %u", p, tst, fw_layers[p].min_xid, fw_layers[p].max_xid); return ix; }
struct hid_device *hid_parse_report(__u8 *start, unsigned size) { struct hid_device *device; struct hid_parser *parser; struct hid_item item; __u8 *end; unsigned i; static int (*dispatch_type[])(struct hid_parser *parser, struct hid_item *item) = { hid_parser_main, hid_parser_global, hid_parser_local, hid_parser_reserved }; if (!(device = kzalloc(sizeof(struct hid_device), GFP_KERNEL))) return NULL; if (!(device->collection = kzalloc(sizeof(struct hid_collection) * HID_DEFAULT_NUM_COLLECTIONS, GFP_KERNEL))) { kfree(device); return NULL; } device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; for (i = 0; i < HID_REPORT_TYPES; i++) INIT_LIST_HEAD(&device->report_enum[i].report_list); if (!(device->rdesc = kmalloc(size, GFP_KERNEL))) { kfree(device->collection); kfree(device); return NULL; } memcpy(device->rdesc, start, size); device->rsize = size; if (!(parser = vmalloc(sizeof(struct hid_parser)))) { kfree(device->rdesc); kfree(device->collection); kfree(device); return NULL; } memset(parser, 0, sizeof(struct hid_parser)); parser->device = device; end = start + size; while ((start = fetch_item(start, end, &item)) != NULL) { if (item.format != HID_ITEM_FORMAT_SHORT) { dbg_hid("unexpected long global item\n"); hid_free_device(device); vfree(parser); return NULL; } if (dispatch_type[item.type](parser, &item)) { dbg_hid("item %u %u %u %u parsing failed\n", item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag); hid_free_device(device); vfree(parser); return NULL; } if (start == end) { if (parser->collection_stack_ptr) { dbg_hid("unbalanced collection at end of report description\n"); hid_free_device(device); vfree(parser); return NULL; } if (parser->local.delimiter_depth) { dbg_hid("unbalanced delimiter at end of report description\n"); hid_free_device(device); vfree(parser); return NULL; } vfree(parser); return device; } } dbg_hid("item fetching failed at offset %d\n", (int)(end - start)); hid_free_device(device); vfree(parser); return NULL; }
const const doc_e *info_find(u32 did) { const toc_e *e = toc_search(did, dex_section[INFO].toc, DEX_NOF_SEG); return (const doc_e*)fetch_item(INFO, e - dex_section[INFO].toc); }