/** * ovs_vport_find_upcall_portid - find the upcall portid to send upcall. * * @vport: vport from which the missed packet is received. * @skb: skb that the missed packet was received. * * Uses the skb_get_hash() to select the upcall portid to send the * upcall. * * Returns the portid of the target socket. Must be called with rcu_read_lock. */ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) { struct vport_portids *ids; u32 ids_index; u32 hash; ids = rcu_dereference(vport->upcall_portids); if (ids->n_ids == 1 && ids->ids[0] == 0) return 0; hash = skb_get_hash(skb); ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids); return ids->ids[ids_index]; }
static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) { u64 ticks; len += q->packet_overhead; if (q->cell_size) { u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); if (len > cells * q->cell_size) /* extra cell needed for remainder */ cells++; len = cells * (q->cell_size + q->cell_overhead); } ticks = (u64)len * NSEC_PER_SEC; do_div(ticks, q->rate); return PSCHED_NS2TICKS(ticks); }
static int fa_element_to_part_nr(struct flex_array *fa, unsigned int element_nr) { return reciprocal_divide(element_nr, fa->reciprocal_elems); }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @fentry: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are * filtering, @filter is the array of filter instructions. * Because all jumps are guaranteed to be before last instruction, * and last instruction guaranteed to be a RET, we dont need to check * flen. (We used to pass to this function the length of filter) */ unsigned int sk_run_filter(const struct sk_buff *skb, const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ u32 tmp; int k; /* * Process array of filter instructions. */ for (;; fentry++) { #if defined(CONFIG_X86_32) #define K (fentry->k) #else const u32 K = fentry->k; #endif switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += K; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= K; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= K; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A = reciprocal_divide(A, K); continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= K; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= K; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= K; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= K; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: fentry += K; continue; case BPF_S_JMP_JGT_K: fentry += (A > K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: fentry += (A >= K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: fentry += (A == K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: fentry += (A & K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: fentry += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: fentry += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: fentry += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: fentry += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = K; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } return 0; case BPF_S_LD_H_ABS: k = K; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } return 0; case BPF_S_LD_B_ABS: k = K; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } return 0; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + K; goto load_w; case BPF_S_LD_H_IND: k = X + K; goto load_h; case BPF_S_LD_B_IND: k = X + K; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, K, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = K; continue; case BPF_S_LDX_IMM: X = K; continue; case BPF_S_LD_MEM: A = mem[K]; continue; case BPF_S_LDX_MEM: X = mem[K]; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return K; case BPF_S_RET_A: return A; case BPF_S_ST: mem[K] = A; continue; case BPF_S_STX: mem[K] = X; continue; case BPF_S_ANC_PROTOCOL: A = ntohs(skb->protocol); continue; case BPF_S_ANC_PKTTYPE: A = skb->pkt_type; continue; case BPF_S_ANC_IFINDEX: if (!skb->dev) return 0; A = skb->dev->ifindex; continue; case BPF_S_ANC_MARK: A = skb->mark; continue; case BPF_S_ANC_QUEUE: A = skb->queue_mapping; continue; case BPF_S_ANC_HATYPE: if (!skb->dev) return 0; A = skb->dev->type; continue; case BPF_S_ANC_RXHASH: A = skb->rxhash; continue; case BPF_S_ANC_CPU: A = raw_smp_processor_id(); continue; case BPF_S_ANC_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case BPF_S_ANC_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } #ifdef CONFIG_SECCOMP_FILTER case BPF_S_ANC_SECCOMP_LD_W: A = seccomp_bpf_load(fentry->k); continue; #endif default: WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", fentry->code, fentry->jt, fentry->jf, fentry->k); return 0; } } return 0; }
static int nflash_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) { struct nflash_mtd *nflash = (struct nflash_mtd *) mtd->priv; struct mtd_partition *part = NULL; int i, ret = 0; uint addr, len, blocksize; uint part_start_blk, part_end_blk; uint blknum, new_addr, erase_blknum; uint reciprocal_blocksize; addr = erase->addr; len = erase->len; blocksize = mtd->erasesize; reciprocal_blocksize = reciprocal_value(blocksize); /* Check address range */ if (!len) return 0; if ((addr + len) > mtd->size) return -EINVAL; if (addr & (blocksize - 1)) return -EINVAL; /* Locate the part */ for (i = 0; nflash_parts[i].name; i++) { if (addr >= nflash_parts[i].offset && ((addr + len) <= (nflash_parts[i].offset + nflash_parts[i].size))) { part = &nflash_parts[i]; break; } } if (!part) return -EINVAL; NFLASH_LOCK(nflash); /* Find the effective start block address to erase */ part_start_blk = reciprocal_divide(part->offset & ~(blocksize-1), reciprocal_blocksize); part_end_blk = reciprocal_divide(((part->offset + part->size) + (blocksize-1)), reciprocal_blocksize); new_addr = part_start_blk * blocksize; /* The block number to be skipped relative to the start address of * the MTD partition */ blknum = reciprocal_divide(addr - new_addr, reciprocal_blocksize); for (i = part_start_blk; (i < part_end_blk) && (blknum > 0); i++) { if (nflash->map[i] != 0) { new_addr += blocksize; } else { new_addr += blocksize; blknum--; } } /* Erase the blocks from the new block address */ erase_blknum = reciprocal_divide(len + (blocksize-1), reciprocal_blocksize); if ((new_addr + (erase_blknum * blocksize)) > (part->offset + part->size)) { ret = -EINVAL; goto done; } for (i = new_addr; erase_blknum; i += blocksize) { /* Skip bad block erase */ uint j = reciprocal_divide(i, reciprocal_blocksize); if (nflash->map[j] != 0) { continue; } if ((ret = hndnand_erase(nflash->nfl, i)) < 0) { hndnand_mark_badb(nflash->nfl, i); nflash->map[i / blocksize] = 1; } else { erase_blknum--; } } done: /* Set erase status */ if (ret) erase->state = MTD_ERASE_FAILED; else erase->state = MTD_ERASE_DONE; NFLASH_UNLOCK(nflash); /* Call erase callback */ if (erase->callback) erase->callback(erase); return ret; }
static int nflash_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct nflash_mtd *nflash = (struct nflash_mtd *) mtd->priv; int bytes, ret = 0; struct mtd_partition *part = NULL; u_char *block = NULL; u_char *ptr = (u_char *)buf; uint offset, blocksize, mask, blk_offset, off; uint skip_bytes = 0, good_bytes = 0; int blk_idx, i; int read_len, write_len, copy_len = 0; loff_t from = to; u_char *write_ptr; int docopy = 1; uint r_blocksize, part_blk_start, part_blk_end; /* Locate the part */ for (i = 0; nflash_parts[i].name; i++) { if (to >= nflash_parts[i].offset && ((nflash_parts[i+1].name == NULL) || (to < (nflash_parts[i].offset + nflash_parts[i].size)))) { part = &nflash_parts[i]; break; } } if (!part) return -EINVAL; /* Check address range */ if (!len) return 0; if ((to + len) > (part->offset + part->size)) return -EINVAL; offset = to; blocksize = mtd->erasesize; r_blocksize = reciprocal_value(blocksize); if (!(block = kmalloc(blocksize, GFP_KERNEL))) return -ENOMEM; NFLASH_LOCK(nflash); mask = blocksize - 1; /* Check and skip bad blocks */ blk_offset = offset & ~mask; good_bytes = part->offset & ~mask; part_blk_start = reciprocal_divide(good_bytes, r_blocksize); part_blk_end = reciprocal_divide(part->offset + part->size, r_blocksize); for (blk_idx = part_blk_start; blk_idx < part_blk_end; blk_idx++) { if (nflash->map[blk_idx] != 0) { skip_bytes += blocksize; } else { if (good_bytes == blk_offset) break; good_bytes += blocksize; } } if (blk_idx == part_blk_end) { ret = -EINVAL; goto done; } blk_offset = blocksize * blk_idx; /* Backup and erase one block at a time */ *retlen = 0; while (len) { if (docopy) { /* Align offset */ from = offset & ~mask; /* Copy existing data into holding block if necessary */ if (((offset & (blocksize-1)) != 0) || (len < blocksize)) { ret = _nflash_mtd_read(mtd, part, from, blocksize, &read_len, block); if (ret) goto done; if (read_len != blocksize) { ret = -EINVAL; goto done; } } /* Copy input data into holding block */ copy_len = min(len, blocksize - (offset & mask)); memcpy(block + (offset & mask), ptr, copy_len); } off = (uint) from + skip_bytes; /* Erase block */ if ((ret = hndnand_erase(nflash->nfl, off)) < 0) { hndnand_mark_badb(nflash->nfl, off); nflash->map[blk_idx] = 1; skip_bytes += blocksize; docopy = 0; } else { /* Write holding block */ write_ptr = block; write_len = blocksize; while (write_len) { if ((bytes = hndnand_write(nflash->nfl, from + skip_bytes, (uint) write_len, (uchar *) write_ptr)) < 0) { hndnand_mark_badb(nflash->nfl, off); nflash->map[blk_idx] = 1; skip_bytes += blocksize; docopy = 0; break; } from += bytes; write_len -= bytes; write_ptr += bytes; docopy = 1; } if (docopy) { offset += copy_len; len -= copy_len; ptr += copy_len; *retlen += copy_len; } } /* Check and skip bad blocks */ if (len) { blk_offset += blocksize; blk_idx++; while ((nflash->map[blk_idx] != 0) && (blk_offset < (part->offset+part->size))) { skip_bytes += blocksize; blk_offset += blocksize; blk_idx++; } if (blk_offset >= (part->offset+part->size)) { ret = -EINVAL; goto done; } } } done: NFLASH_UNLOCK(nflash); if (block) kfree(block); return ret; }