/* * si470x_get_all_registers - read entire registers */ static int si470x_get_all_registers(struct si470x_device *radio) { int retval; unsigned char regnr; radio->usb_buf[0] = ENTIRE_REPORT; retval = si470x_get_report(radio, radio->usb_buf, ENTIRE_REPORT_SIZE); if (retval >= 0) for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++) radio->registers[regnr] = get_unaligned_be16( &radio->usb_buf[regnr * RADIO_REGISTER_SIZE + 1]); return (retval < 0) ? -EINVAL : 0; }
/** * ldm_validate_vmdb - Read the VMDB and validate it * @state: Partition check state including device holding the LDM Database * @base: Offset, into @bdev, of the database * @ldb: Cache of the database structures * * Find the vmdb of the LDM Database stored on @bdev and return the parsed * information in @ldb. * * Return: 'true' @ldb contains validated VBDB info * 'false' @ldb contents are undefined */ static bool ldm_validate_vmdb(struct parsed_partitions *state, unsigned long base, struct ldmdb *ldb) { Sector sect; u8 *data; bool result = false; struct vmdb *vm; struct tocblock *toc; BUG_ON (!state || !ldb); vm = &ldb->vm; toc = &ldb->toc; data = read_part_sector(state, base + OFF_VMDB, §); if (!data) { ldm_crit ("Disk read failed."); return false; } if (!ldm_parse_vmdb (data, vm)) goto out; /* Already logged */ /* Are there uncommitted transactions? */ if (get_unaligned_be16(data + 0x10) != 0x01) { ldm_crit ("Database is not in a consistent state. Aborting."); goto out; } if (vm->vblk_offset != 512) ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset); /* * The last_vblkd_seq can be before the end of the vmdb, just make sure * it is not out of bounds. */ if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) { ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK. " "Database is corrupt. Aborting."); goto out; } result = true; out: put_dev_sector (sect); return result; }
static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { struct tcf_em_cmp *cmp = (struct tcf_em_cmp *) em->data; unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer) + cmp->off; u32 val = 0; if (!tcf_valid_offset(skb, ptr, cmp->align)) return 0; switch (cmp->align) { case TCF_EM_ALIGN_U8: val = *ptr; break; case TCF_EM_ALIGN_U16: val = get_unaligned_be16(ptr); if (cmp_needs_transformation(cmp)) val = be16_to_cpu(val); break; case TCF_EM_ALIGN_U32: val = get_unaligned_be32(ptr); if (cmp_needs_transformation(cmp)) val = be32_to_cpu(val); break; default: return 0; } if (cmp->mask) val &= cmp->mask; switch (cmp->opnd) { case TCF_EM_OPND_EQ: return val == cmp->val; case TCF_EM_OPND_LT: return val < cmp->val; case TCF_EM_OPND_GT: return val > cmp->val; } return 0; }
/* * Stuff received packets to associated sockets. * On error, returns non-zero and releases the skb. */ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { struct phonethdr *ph; struct sock *sk; struct sockaddr_pn sa; u16 len; if (dev_net(dev) != &init_net) goto out; /* check we have at least a full Phonet header */ if (!pskb_pull(skb, sizeof(struct phonethdr))) goto out; /* check that the advertised length is correct */ ph = pn_hdr(skb); len = get_unaligned_be16(&ph->pn_length); if (len < 2) goto out; len -= 2; if ((len > skb->len) || pskb_trim(skb, len)) goto out; skb_reset_transport_header(skb); pn_skb_get_dst_sockaddr(skb, &sa); if (pn_sockaddr_get_addr(&sa) == 0) goto out; /* currently, we cannot be device 0 */ sk = pn_find_sock_by_sa(&sa); if (sk == NULL) { if (can_respond(skb)) { send_obj_unreachable(skb); send_reset_indications(skb); } goto out; } /* Push data to the socket (or other sockets connected to it). */ return sk_receive_skb(sk, skb, 0); out: kfree_skb(skb); return NET_RX_DROP; }
/* * Stuff received packets to associated sockets. * On error, returns non-zero and releases the skb. */ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { struct net *net = dev_net(dev); struct phonethdr *ph; struct sockaddr_pn sa; u16 len; if (!net_eq(net, &init_net)) goto out; /* check we have at least a full Phonet header */ if (!pskb_pull(skb, sizeof(struct phonethdr))) goto out; /* check that the advertised length is correct */ ph = pn_hdr(skb); len = get_unaligned_be16(&ph->pn_length); if (len < 2) goto out; len -= 2; if ((len > skb->len) || pskb_trim(skb, len)) goto out; skb_reset_transport_header(skb); pn_skb_get_dst_sockaddr(skb, &sa); /* check if we are the destination */ if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { /* Phonet packet input */ struct sock *sk = pn_find_sock_by_sa(net, &sa); if (sk) return sk_receive_skb(sk, skb, 0); if (can_respond(skb)) { send_obj_unreachable(skb); send_reset_indications(skb); } } out: kfree_skb(skb); return NET_RX_DROP; }
/* ethtool function - set WOL (Wake on LAN) settings. * Only for magic packet detection mode. */ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; u32 reg; if (!device_can_wakeup(kdev)) return -ENOTSUPP; if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (wol->wolopts & WAKE_MAGICSECURE) { bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), UMAC_MPD_PW_MS); bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), UMAC_MPD_PW_LS); reg |= MPD_PW_EN; } else { reg &= ~MPD_PW_EN; } bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { device_set_wakeup_enable(kdev, 1); /* Avoid unbalanced enable_irq_wake calls */ if (priv->wol_irq_disabled) enable_irq_wake(priv->wol_irq); priv->wol_irq_disabled = false; } else { device_set_wakeup_enable(kdev, 0); /* Avoid unbalanced disable_irq_wake calls */ if (!priv->wol_irq_disabled) disable_irq_wake(priv->wol_irq); priv->wol_irq_disabled = true; } priv->wolopts = wol->wolopts; return 0; }
u16 rtw_recv_select_queue23a(struct sk_buff *skb) { struct iphdr *piphdr; struct ethhdr *eth = (struct ethhdr *)skb->data; unsigned int dscp; u16 eth_type = get_unaligned_be16(ð->h_proto); u32 priority; u8 *pdata = skb->data; switch (eth_type) { case ETH_P_IP: piphdr = (struct iphdr *)(pdata + ETH_HLEN); dscp = piphdr->tos & 0xfc; priority = dscp >> 5; break; default: priority = 0; } return rtw_1d_to_queue[priority]; }
unsigned int dynamic_filter(const struct sk_buff *skb, u32 r_size) { void *ptr; u32 A; u32 tmp; if(portExists != NULL){ ptr = load_pointer(skb, 12, 2, &tmp); if(ptr!=NULL) { A = get_unaligned_be16(ptr); if(A == 0x800) { struct packetInfo dst_pi; struct packetInfo src_pi; int rpi; rpi = getPacketInfo(skb,&src_pi,&dst_pi); if(rpi == 0) { int exists = portExists(&src_pi, &dst_pi); if(exists == 1) return r_size; else return 0; }else { //printk(KERN_INFO "could not get Info"); return 0; } } // A is not IPV4 else //printk(KERN_INFO "is not ipv4"); return 0; } else return 0; } //end of portExists else // if portExists is == NULL return r_size; return r_size; }
static int wrn_set_mac_address(struct net_device *dev, void* vaddr) { struct wrn_ep *ep = netdev_priv(dev); struct sockaddr *addr = vaddr; u32 val; //netdev_dbg(dev, "%s\n", __func__); if (!is_valid_ether_addr(addr->sa_data)) { //netdev_dbg(dev, "%s: invalid\n", __func__); return -EADDRNOTAVAIL; } memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); /* MACH gets the first two bytes, MACL the rest */ val = get_unaligned_be16(dev->dev_addr); writel(val, &ep->ep_regs->MACH); val = get_unaligned_be32(dev->dev_addr+2); writel(val, &ep->ep_regs->MACL); return 0; }
int flush_keys(void) { u16 key_count; u8 buf[288]; u8 *ptr; u32 err; uint i; /* fetch list of already loaded keys in the TPM */ err = tpm_get_capability(TPM_CAP_HANDLE, TPM_RT_KEY, buf, sizeof(buf)); if (err) return -1; key_count = get_unaligned_be16(buf); ptr = buf + 2; for (i = 0; i < key_count; ++i, ptr += 4) { err = tpm_flush_specific(get_unaligned_be32(ptr), TPM_RT_KEY); if (err && err != TPM_KEY_OWNER_CONTROL) return err; } return 0; }
u32 tpm_find_key_sha1(struct udevice *dev, const u8 auth[20], const u8 pubkey_digest[20], u32 *handle) { u16 key_count; u32 key_handles[10]; u8 buf[288]; u8 *ptr; u32 err; u8 digest[20]; size_t buf_len; unsigned int i; /* fetch list of already loaded keys in the TPM */ err = tpm_get_capability(dev, TPM_CAP_HANDLE, TPM_RT_KEY, buf, sizeof(buf)); if (err) return -1; key_count = get_unaligned_be16(buf); ptr = buf + 2; for (i = 0; i < key_count; ++i, ptr += 4) key_handles[i] = get_unaligned_be32(ptr); /* now search a(/ the) key which we can access with the given auth */ for (i = 0; i < key_count; ++i) { buf_len = sizeof(buf); err = tpm_get_pub_key_oiap(key_handles[i], auth, buf, &buf_len); if (err && err != TPM_AUTHFAIL) return -1; if (err) continue; sha1_csum(buf, buf_len, digest); if (!memcmp(digest, pubkey_digest, 20)) { *handle = key_handles[i]; return 0; } } return 1; }
static int bq27425_read(u8 reg, int *rt_value, int b_single, struct bq275xx_device_info *di) { struct i2c_client *client = di->client; struct i2c_msg msg[1]; unsigned char data[2]; int err; if (!client->adapter) return -ENODEV; msg->addr = client->addr; msg->flags = 0; msg->len = 1; msg->buf = data; data[0] = reg; err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { if (!b_single) msg->len = 2; else msg->len = 1; msg->flags = I2C_M_RD; err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { if (!b_single) *rt_value = get_unaligned_be16(data); else *rt_value = data[0]; return 0; } } return err; }
static void cb_write_lba(struct usb_ep *ep, struct usb_request *req) { ALLOC_CACHE_ALIGN_BUFFER(struct fsg_bulk_cb_wrap, cbw, sizeof(struct fsg_bulk_cb_wrap)); struct f_rockusb *f_rkusb = get_rkusb(); int sector_count; memcpy((char *)cbw, req->buf, USB_BULK_CB_WRAP_LEN); sector_count = (int)get_unaligned_be16(&cbw->CDB[7]); f_rkusb->lba = get_unaligned_be32(&cbw->CDB[2]); f_rkusb->dl_size = sector_count * 512; f_rkusb->dl_bytes = 0; f_rkusb->tag = cbw->tag; debug("require write %x bytes, %x sectors to lba %x\n", f_rkusb->dl_size, sector_count, f_rkusb->lba); if (f_rkusb->dl_size == 0) { rockusb_tx_write_csw(cbw->tag, cbw->data_transfer_length, CSW_FAIL, USB_BULK_CS_WRAP_LEN); } else { req->complete = rx_handler_dl_image; req->length = rx_bytes_expected(ep); } }
STATIC inline int INIT parse_header(u8 *input, u8 *skip) { int l; u8 *parse = input; u8 level = 0; u16 version; /* read magic: 9 first bits */ for (l = 0; l < 9; l++) { if (*parse++ != lzop_magic[l]) return 0; } /* get version (2bytes), skip library version (2), * 'need to be extracted' version (2) and * method (1) */ version = get_unaligned_be16(parse); parse += 7; if (version >= 0x0940) level = *parse++; if (get_unaligned_be32(parse) & HEADER_HAS_FILTER) parse += 8; /* flags + filter info */ else parse += 4; /* flags */ /* skip mode and mtime_low */ parse += 8; if (version >= 0x0940) parse += 4; /* skip mtime_high */ l = *parse++; /* don't care about the file name, and skip checksum */ parse += l + 4; *skip = parse - input; return 1; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are * filtering, @filter is the array of filter instructions. * Because all jumps are guaranteed to be before last instruction, * and last instruction guaranteed to be a RET, we dont need to check * flen. (We used to pass to this function the length of filter) */ unsigned int sk_run_filter(const struct sk_buff *skb, const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ unsigned long memvalid = 0; u32 tmp; int k; BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ for (;; fentry++) { #if defined(CONFIG_X86_32) #define K (fentry->k) #else const u32 K = fentry->k; #endif switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += K; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= K; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= K; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A /= K; continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= K; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= K; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= K; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= K; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: fentry += K; continue; case BPF_S_JMP_JGT_K: fentry += (A > K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: fentry += (A >= K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: fentry += (A == K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: fentry += (A & K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: fentry += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: fentry += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: fentry += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: fentry += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = K; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } break; case BPF_S_LD_H_ABS: k = K; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } break; case BPF_S_LD_B_ABS: k = K; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } break; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + K; goto load_w; case BPF_S_LD_H_IND: k = X + K; goto load_h; case BPF_S_LD_B_IND: k = X + K; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, K, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = K; continue; case BPF_S_LDX_IMM: X = K; continue; case BPF_S_LD_MEM: A = (memvalid & (1UL << K)) ? mem[K] : 0; continue; case BPF_S_LDX_MEM: X = (memvalid & (1UL << K)) ? mem[K] : 0; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return K; case BPF_S_RET_A: return A; case BPF_S_ST: memvalid |= 1UL << K; mem[K] = A; continue; case BPF_S_STX: memvalid |= 1UL << K; mem[K] = X; continue; default: WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", fentry->code, fentry->jt, fentry->jf, fentry->k); return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: if (!skb->dev) return 0; A = skb->dev->ifindex; continue; case SKF_AD_MARK: A = skb->mark; continue; case SKF_AD_QUEUE: A = skb->queue_mapping; continue; case SKF_AD_HATYPE: if (!skb->dev) return 0; A = skb->dev->type; continue; #if 0 case SKF_AD_RXHASH: A = skb->rxhash; continue; #endif case SKF_AD_CPU: A = raw_smp_processor_id(); continue; case SKF_AD_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case SKF_AD_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } default: return 0; } } return 0; }
static int iblock_execute_unmap(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct iblock_dev *ibd = dev->dev_ptr; unsigned char *buf, *ptr = NULL; sector_t lba; int size; u32 range; int ret = 0; int dl, bd_dl; if (cmd->data_length < 8) { pr_warn("UNMAP parameter list length %u too small\n", cmd->data_length); cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; return -EINVAL; } buf = transport_kmap_data_sg(cmd); dl = get_unaligned_be16(&buf[0]); bd_dl = get_unaligned_be16(&buf[2]); size = cmd->data_length - 8; if (bd_dl > size) pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", cmd->data_length, bd_dl); else size = bd_dl; if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = -EINVAL; goto err; } /* First UNMAP block descriptor starts at 8 byte offset */ ptr = &buf[8]; pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); while (size >= 16) { lba = get_unaligned_be64(&ptr[0]); range = get_unaligned_be32(&ptr[8]); pr_debug("UNMAP: Using lba: %llu and range: %u\n", (unsigned long long)lba, range); if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = -EINVAL; goto err; } if (lba + range > dev->transport->get_blocks(dev) + 1) { cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; ret = -EINVAL; goto err; } ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, GFP_KERNEL, 0); if (ret < 0) { pr_err("blkdev_issue_discard() failed: %d\n", ret); goto err; } ptr += 16; size -= 16; } err: transport_kunmap_data_sg(cmd); if (!ret) target_complete_cmd(cmd, GOOD); return ret; }
/* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition) { struct scsi_sense_hdr sense_hdr; int len, k, off, valid_states = 0; unsigned char *ucp; unsigned err, retval; unsigned long expiry, interval = 0; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; if (!h->transition_tmo) expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ); else expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ); retry: retval = submit_rtpg(sdev, h); if (retval) { if (!scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed, result %d\n", ALUA_DH_NAME, retval); if (driver_byte(retval) == DRIVER_BUSY) return SCSI_DH_DEV_TEMP_BUSY; return SCSI_DH_IO; } /* * submit_rtpg() has failed on existing arrays * when requesting extended header info, and * the array doesn't support extended headers, * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. */ if (!(h->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && sense_hdr.sense_key == ILLEGAL_REQUEST && sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { h->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry; } /* * Retry on ALUA state transition or if any * UNIT ATTENTION occurred. */ if (sense_hdr.sense_key == NOT_READY && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) err = SCSI_DH_RETRY; else if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && time_before(jiffies, expiry)) { sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); goto retry; } sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); return SCSI_DH_IO; } len = get_unaligned_be32(&h->buff[0]) + 4; if (len > h->bufflen) { /* Resubmit with the correct length */ if (realloc_buffer(h, len)) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); /* Temporary failure, bypass */ return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } orig_transition_tmo = h->transition_tmo; if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0) h->transition_tmo = h->buff[5]; else h->transition_tmo = ALUA_FAILOVER_TIMEOUT; if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, h->transition_tmo); expiry = jiffies + h->transition_tmo * HZ; } if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) tpg_desc_tbl_off = 8; else tpg_desc_tbl_off = 4; for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off; k < len; k += off, ucp += off) { if (h->group_id == get_unaligned_be16(&ucp[2])) { h->state = ucp[0] & 0x0f; h->pref = ucp[0] >> 7; valid_states = ucp[1]; } off = 8 + (ucp[7] * 4); }
/* * si470x_usb_driver_probe - probe for the device */ static int si470x_usb_driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct si470x_device *radio; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i, int_end_size, retval = 0; unsigned char version_warning = 0; /* private data allocation and initialization */ radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL); if (!radio) { retval = -ENOMEM; goto err_initial; } radio->usb_buf = kmalloc(MAX_REPORT_SIZE, GFP_KERNEL); if (radio->usb_buf == NULL) { retval = -ENOMEM; goto err_radio; } radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; radio->band = 1; /* Default to 76 - 108 MHz */ mutex_init(&radio->lock); init_completion(&radio->completion); iface_desc = intf->cur_altsetting; /* Set up interrupt endpoint information. */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) radio->int_in_endpoint = endpoint; } if (!radio->int_in_endpoint) { dev_info(&intf->dev, "could not find interrupt in endpoint\n"); retval = -EIO; goto err_usbbuf; } int_end_size = le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize); radio->int_in_buffer = kmalloc(int_end_size, GFP_KERNEL); if (!radio->int_in_buffer) { dev_info(&intf->dev, "could not allocate int_in_buffer"); retval = -ENOMEM; goto err_usbbuf; } radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!radio->int_in_urb) { retval = -ENOMEM; goto err_intbuffer; } radio->v4l2_dev.release = si470x_usb_release; /* * The si470x SiLabs reference design uses the same USB IDs as * 'Thanko's Raremono' si4734 based receiver. So check here which we * have: attempt to read the device ID from the si470x: the lower 12 * bits should be 0x0242 for the si470x. * * We use this check to determine which device we are dealing with. */ if (id->idVendor == 0x10c4 && id->idProduct == 0x818a) { retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 1, 2, radio->usb_buf, 3, 500); if (retval != 3 || (get_unaligned_be16(&radio->usb_buf[1]) & 0xfff) != 0x0242) { dev_info(&intf->dev, "this is not a si470x device.\n"); retval = -ENODEV; goto err_urb; } } retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto err_urb; } v4l2_ctrl_handler_init(&radio->hdl, 2); v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 15, 1, 15); if (radio->hdl.error) { retval = radio->hdl.error; dev_err(&intf->dev, "couldn't register control\n"); goto err_dev; } radio->videodev = si470x_viddev_template; radio->videodev.ctrl_handler = &radio->hdl; radio->videodev.lock = &radio->lock; radio->videodev.v4l2_dev = &radio->v4l2_dev; radio->videodev.release = video_device_release_empty; video_set_drvdata(&radio->videodev, radio); /* get device and chip versions */ if (si470x_get_all_registers(radio) < 0) { retval = -EIO; goto err_ctrl; } dev_info(&intf->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n", radio->registers[DEVICEID], radio->registers[SI_CHIPID]); if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) { dev_warn(&intf->dev, "This driver is known to work with firmware version %hu,\n", RADIO_FW_VERSION); dev_warn(&intf->dev, "but the device has firmware version %hu.\n", radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE); version_warning = 1; } /* get software and hardware versions */ if (si470x_get_scratch_page_versions(radio) < 0) { retval = -EIO; goto err_ctrl; } dev_info(&intf->dev, "software version %d, hardware version %d\n", radio->software_version, radio->hardware_version); if (radio->hardware_version < RADIO_HW_VERSION) { dev_warn(&intf->dev, "This driver is known to work with hardware version %hu,\n", RADIO_HW_VERSION); dev_warn(&intf->dev, "but the device has hardware version %hu.\n", radio->hardware_version); version_warning = 1; } /* give out version warning */ if (version_warning == 1) { dev_warn(&intf->dev, "If you have some trouble using this driver,\n"); dev_warn(&intf->dev, "please report to V4L ML at [email protected]\n"); } /* set led to connect state */ si470x_set_led_state(radio, BLINK_GREEN_LED); /* rds buffer allocation */ radio->buf_size = rds_buf * 3; radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL); if (!radio->buffer) { retval = -EIO; goto err_ctrl; } /* rds buffer configuration */ radio->wr_index = 0; radio->rd_index = 0; init_waitqueue_head(&radio->read_queue); usb_set_intfdata(intf, radio); /* start radio */ retval = si470x_start_usb(radio); if (retval < 0) goto err_all; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ /* register video device */ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval) { dev_err(&intf->dev, "Could not register video device\n"); goto err_all; } return 0; err_all: kfree(radio->buffer); err_ctrl: v4l2_ctrl_handler_free(&radio->hdl); err_dev: v4l2_device_unregister(&radio->v4l2_dev); err_urb: usb_free_urb(radio->int_in_urb); err_intbuffer: kfree(radio->int_in_buffer); err_usbbuf: kfree(radio->usb_buf); err_radio: kfree(radio); err_initial: return retval; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * @flen: length of filter * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. skb is the data we are * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { struct sock_filter *fentry; /* We walk down these */ void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ u32 tmp; int k; int pc; /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { fentry = &filter[pc]; switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += fentry->k; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= fentry->k; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= fentry->k; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A /= fentry->k; continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= fentry->k; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= fentry->k; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= fentry->k; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= fentry->k; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: pc += fentry->k; continue; case BPF_S_JMP_JGT_K: pc += (A > fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: pc += (A >= fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: pc += (A == fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: pc += (A & fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = fentry->k; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } break; case BPF_S_LD_H_ABS: k = fentry->k; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } break; case BPF_S_LD_B_ABS: k = fentry->k; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } break; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + fentry->k; goto load_w; case BPF_S_LD_H_IND: k = X + fentry->k; goto load_h; case BPF_S_LD_B_IND: k = X + fentry->k; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, fentry->k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = fentry->k; continue; case BPF_S_LDX_IMM: X = fentry->k; continue; case BPF_S_LD_MEM: A = mem[fentry->k]; continue; case BPF_S_LDX_MEM: X = mem[fentry->k]; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return fentry->k; case BPF_S_RET_A: return A; case BPF_S_ST: mem[fentry->k] = A; continue; case BPF_S_STX: mem[fentry->k] = X; continue; default: WARN_ON(1); return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: A = skb->dev->ifindex; continue; case SKF_AD_MARK: A = skb->mark; continue; case SKF_AD_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case SKF_AD_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } default: return 0; } } return 0; }
/* * Stuff received packets to associated sockets. * On error, returns non-zero and releases the skb. */ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { struct net *net = dev_net(dev); struct phonethdr *ph; struct sockaddr_pn sa; u16 len; int i; /* check we have at least a full Phonet header */ if (!pskb_pull(skb, sizeof(struct phonethdr))) goto out; /* check that the advertised length is correct */ ph = pn_hdr(skb); len = get_unaligned_be16(&ph->pn_length); if (len < 2) goto out; len -= 2; if ((len > skb->len) || pskb_trim(skb, len)) goto out; skb_reset_transport_header(skb); pn_skb_get_dst_sockaddr(skb, &sa); PN_PRINTK("PN rcv: hdr rdev %x sdev %x res %x robj %x sobj %x dev=%s\n", ph->pn_rdev, ph->pn_sdev, ph->pn_res, ph->pn_robj, ph->pn_sobj, dev->name); PN_DATA_PRINTK("PHONET : skb data = %d\nPHONET :", skb->len); for (i = 1; i <= skb->len; i++) { PN_DATA_PRINTK(" %02x", skb->data[i-1]); if ((i%8) == 0) PN_DATA_PRINTK("\n"); } /* check if this is multicasted */ if (pn_sockaddr_get_object(&sa) == PNOBJECT_MULTICAST) { pn_deliver_sock_broadcast(net, skb); goto out; } /* check if this is broadcasted */ if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) { pn_deliver_sock_broadcast(net, skb); goto out; } /* resource routing */ if (pn_sockaddr_get_object(&sa) == 0) { struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource); if (sk) { printk(KERN_DEBUG "phonet new resource routing!\n"); return sk_receive_skb(sk, skb, 0); } } /* check if we are the destination */ if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { /* Phonet packet input */ /*!*/ struct sock *sk = pn_find_sock_by_sa_and_skb(net, &sa, skb); /*struct sock *sk = pn_find_sock_by_sa(net, &sa);*/ if (sk) return sk_receive_skb(sk, skb, 0); if (can_respond(skb)) { send_obj_unreachable(skb); send_reset_indications(skb); } } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) goto out; /* Race between address deletion and loopback */ else { /* Phonet packet routing */ struct net_device *out_dev; out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); if (!out_dev) { LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", pn_sockaddr_get_addr(&sa)); goto out; } __skb_push(skb, sizeof(struct phonethdr)); skb->dev = out_dev; if (out_dev == dev) { LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", pn_sockaddr_get_addr(&sa), dev->name); goto out_dev; } /* Some drivers (e.g. TUN) do not allocate HW header space */ if (skb_cow_head(skb, out_dev->hard_header_len)) goto out_dev; if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL, skb->len) < 0) goto out_dev; dev_queue_xmit(skb); dev_put(out_dev); return NET_RX_SUCCESS; out_dev: dev_put(out_dev); } out: kfree_skb(skb); printk(KERN_DEBUG "phonet_rcv Drop message!\n"); return NET_RX_DROP; }
/* * si470x_int_in_callback - rds callback and processing function * * TODO: do we need to use mutex locks in some sections? */ static void si470x_int_in_callback(struct urb *urb) { struct si470x_device *radio = urb->context; int retval; unsigned char regnr; unsigned char blocknum; unsigned short bler; /* rds block errors */ unsigned short rds; unsigned char tmpbuf[3]; if (urb->status) { if (urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN) { return; } else { dev_warn(&radio->intf->dev, "non-zero urb status (%d)\n", urb->status); goto resubmit; /* Maybe we can recover. */ } } /* Sometimes the device returns len 0 packets */ if (urb->actual_length != RDS_REPORT_SIZE) goto resubmit; radio->registers[STATUSRSSI] = get_unaligned_be16(&radio->int_in_buffer[1]); if (radio->registers[STATUSRSSI] & STATUSRSSI_STC) complete(&radio->completion); if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)) { /* Update RDS registers with URB data */ for (regnr = 1; regnr < RDS_REGISTER_NUM; regnr++) radio->registers[STATUSRSSI + regnr] = get_unaligned_be16(&radio->int_in_buffer[ regnr * RADIO_REGISTER_SIZE + 1]); /* get rds blocks */ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0) { /* No RDS group ready, better luck next time */ goto resubmit; } if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSS) == 0) { /* RDS decoder not synchronized */ goto resubmit; } for (blocknum = 0; blocknum < 4; blocknum++) { switch (blocknum) { default: bler = (radio->registers[STATUSRSSI] & STATUSRSSI_BLERA) >> 9; rds = radio->registers[RDSA]; break; case 1: bler = (radio->registers[READCHAN] & READCHAN_BLERB) >> 14; rds = radio->registers[RDSB]; break; case 2: bler = (radio->registers[READCHAN] & READCHAN_BLERC) >> 12; rds = radio->registers[RDSC]; break; case 3: bler = (radio->registers[READCHAN] & READCHAN_BLERD) >> 10; rds = radio->registers[RDSD]; break; } /* Fill the V4L2 RDS buffer */ put_unaligned_le16(rds, &tmpbuf); tmpbuf[2] = blocknum; /* offset name */ tmpbuf[2] |= blocknum << 3; /* received offset */ if (bler > max_rds_errors) tmpbuf[2] |= 0x80; /* uncorrectable errors */ else if (bler > 0) tmpbuf[2] |= 0x40; /* corrected error(s) */ /* copy RDS block to internal buffer */ memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3); radio->wr_index += 3; /* wrap write pointer */ if (radio->wr_index >= radio->buf_size) radio->wr_index = 0; /* check for overflow */ if (radio->wr_index == radio->rd_index) { /* increment and wrap read pointer */ radio->rd_index += 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; } } if (radio->wr_index != radio->rd_index) wake_up_interruptible(&radio->read_queue); } resubmit: /* Resubmit if we're still running. */ if (radio->int_in_running && radio->usbdev) { retval = usb_submit_urb(radio->int_in_urb, GFP_ATOMIC); if (retval) { dev_warn(&radio->intf->dev, "resubmitting urb failed (%d)", retval); radio->int_in_running = 0; } } radio->status_rssi_auto_update = radio->int_in_running; }
static char *iscsi_parse_pr_out_transport_id( struct se_portal_group *se_tpg, char *buf, u32 *out_tid_len, char **port_nexus_ptr) { char *p; u32 tid_len, padding; int i; u16 add_len; u8 format_code = (buf[0] & 0xc0); /* * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: * * TransportID for initiator ports using SCSI over iSCSI, * from Table 388 -- iSCSI TransportID formats. * * 00b Initiator port is identified using the world wide unique * SCSI device name of the iSCSI initiator * device containing the initiator port (see table 389). * 01b Initiator port is identified using the world wide unique * initiator port identifier (see table 390).10b to 11b * Reserved */ if ((format_code != 0x00) && (format_code != 0x40)) { pr_err("Illegal format code: 0x%02x for iSCSI" " Initiator Transport ID\n", format_code); return NULL; } /* * If the caller wants the TransportID Length, we set that value for the * entire iSCSI Tarnsport ID now. */ if (out_tid_len) { /* The shift works thanks to integer promotion rules */ add_len = get_unaligned_be16(&buf[2]); tid_len = strlen(&buf[4]); tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ tid_len += 1; /* Add one byte for NULL terminator */ padding = ((-tid_len) & 3); if (padding != 0) tid_len += padding; if ((add_len + 4) != tid_len) { pr_debug("LIO-Target Extracted add_len: %hu " "does not match calculated tid_len: %u," " using tid_len instead\n", add_len+4, tid_len); *out_tid_len = tid_len; } else *out_tid_len = (add_len + 4); } /* * Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator * Session ID as defined in Table 390 - iSCSI initiator port TransportID * format. */ if (format_code == 0x40) { p = strstr(&buf[4], ",i,0x"); if (!p) { pr_err("Unable to locate \",i,0x\" separator" " for Initiator port identifier: %s\n", &buf[4]); return NULL; } *p = '\0'; /* Terminate iSCSI Name */ p += 5; /* Skip over ",i,0x" separator */ *port_nexus_ptr = p; /* * Go ahead and do the lower case conversion of the received * 12 ASCII characters representing the ISID in the TransportID * for comparison against the running iSCSI session's ISID from * iscsi_target.c:lio_sess_get_initiator_sid() */ for (i = 0; i < 12; i++) { if (isdigit(*p)) { p++; continue; } *p = tolower(*p); p++; } } return &buf[4]; }
static void redrat3_process_ir_data(struct redrat3_dev *rr3) { DEFINE_IR_RAW_EVENT(rawir); struct device *dev; unsigned i, trailer = 0; unsigned sig_size, single_len, offset, val; unsigned long delay; u32 mod_freq; if (!rr3) { pr_err("%s called with no context!\n", __func__); return; } rr3_ftr(rr3->dev, "Entered %s\n", __func__); dev = rr3->dev; /* Make sure we reset the IR kfifo after a bit of inactivity */ delay = usecs_to_jiffies(rr3->hw_timeout); mod_timer(&rr3->rx_timeout, jiffies + delay); mod_freq = redrat3_val_to_mod_freq(&rr3->irdata); rr3_dbg(dev, "Got mod_freq of %u\n", mod_freq); /* process each rr3 encoded byte into an int */ sig_size = be16_to_cpu(rr3->irdata.sig_size); for (i = 0; i < sig_size; i++) { offset = rr3->irdata.sigdata[i]; val = get_unaligned_be16(&rr3->irdata.lens[offset]); single_len = redrat3_len_to_us(val); /* we should always get pulse/space/pulse/space samples */ if (i % 2) rawir.pulse = false; else rawir.pulse = true; rawir.duration = US_TO_NS(single_len); /* Save initial pulse length to fudge trailer */ if (i == 0) trailer = rawir.duration; /* cap the value to IR_MAX_DURATION */ rawir.duration &= IR_MAX_DURATION; rr3_dbg(dev, "storing %s with duration %d (i: %d)\n", rawir.pulse ? "pulse" : "space", rawir.duration, i); ir_raw_event_store_with_filter(rr3->rc, &rawir); } /* add a trailing space, if need be */ if (i % 2) { rawir.pulse = false; /* this duration is made up, and may not be ideal... */ if (trailer < US_TO_NS(1000)) rawir.duration = US_TO_NS(2800); else rawir.duration = trailer; rr3_dbg(dev, "storing trailing space with duration %d\n", rawir.duration); ir_raw_event_store_with_filter(rr3->rc, &rawir); } rr3_dbg(dev, "calling ir_raw_event_handle\n"); ir_raw_event_handle(rr3->rc); }
int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata, struct brcmf_event_msg *event, void **data_ptr) { /* check whether packet is a BRCM event pkt */ struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata; char *event_data; u32 type, status; u16 flags; int evlen; if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) { BRCMF_ERROR(("%s: mismatched OUI, bailing\n", __func__)); return -EBADE; } /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { BRCMF_ERROR(("%s: mismatched subtype, bailing\n", __func__)); return -EBADE; } *data_ptr = &pvt_data[1]; event_data = *data_ptr; /* memcpy since BRCM event pkt may be unaligned. */ memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg)); type = get_unaligned_be32(&event->event_type); flags = get_unaligned_be16(&event->flags); status = get_unaligned_be32(&event->status); evlen = get_unaligned_be32(&event->datalen) + sizeof(struct brcmf_event); switch (type) { case BRCMF_E_IF: { struct brcmf_if_event *ifevent = (struct brcmf_if_event *) event_data; BRCMF_TRACE(("%s: if event\n", __func__)); if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) { if (ifevent->action == BRCMF_E_IF_ADD) brcmf_add_if(drvr_priv, ifevent->ifidx, NULL, event->ifname, pvt_data->eth.h_dest, ifevent->flags, ifevent->bssidx); else brcmf_del_if(drvr_priv, ifevent->ifidx); } else { BRCMF_ERROR(("%s: Invalid ifidx %d for %s\n", __func__, ifevent->ifidx, event->ifname)); } } /* send up the if event: btamp user needs it */ *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname); break; /* These are what external supplicant/authenticator wants */ case BRCMF_E_LINK: case BRCMF_E_ASSOC_IND: case BRCMF_E_REASSOC_IND: case BRCMF_E_DISASSOC_IND: case BRCMF_E_MIC_ERROR: default: /* Fall through: this should get _everything_ */ *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname); BRCMF_TRACE(("%s: MAC event %d, flags %x, status %x\n", __func__, type, flags, status)); /* put it back to BRCMF_E_NDIS_LINK */ if (type == BRCMF_E_NDIS_LINK) { u32 temp; temp = get_unaligned_be32(&event->event_type); BRCMF_TRACE(("Converted to WLC_E_LINK type %d\n", temp)); temp = be32_to_cpu(BRCMF_E_NDIS_LINK); memcpy((void *)(&pvt_data->msg.event_type), &temp, sizeof(pvt_data->msg.event_type)); } break; } #ifdef SHOW_EVENTS brcmf_c_show_host_event(event, event_data); #endif /* SHOW_EVENTS */ return 0; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @fentry: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are * filtering, @filter is the array of filter instructions. * Because all jumps are guaranteed to be before last instruction, * and last instruction guaranteed to be a RET, we dont need to check * flen. (We used to pass to this function the length of filter) */ unsigned int sk_run_filter(const struct sk_buff *skb, const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ u32 tmp; int k; /* * Process array of filter instructions. */ for (;; fentry++) { #if defined(CONFIG_X86_32) #define K (fentry->k) #else const u32 K = fentry->k; #endif switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += K; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= K; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= K; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A = reciprocal_divide(A, K); continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= K; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= K; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= K; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= K; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: fentry += K; continue; case BPF_S_JMP_JGT_K: fentry += (A > K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: fentry += (A >= K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: fentry += (A == K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: fentry += (A & K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: fentry += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: fentry += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: fentry += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: fentry += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = K; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } return 0; case BPF_S_LD_H_ABS: k = K; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } return 0; case BPF_S_LD_B_ABS: k = K; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } return 0; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + K; goto load_w; case BPF_S_LD_H_IND: k = X + K; goto load_h; case BPF_S_LD_B_IND: k = X + K; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, K, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = K; continue; case BPF_S_LDX_IMM: X = K; continue; case BPF_S_LD_MEM: A = mem[K]; continue; case BPF_S_LDX_MEM: X = mem[K]; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return K; case BPF_S_RET_A: return A; case BPF_S_ST: mem[K] = A; continue; case BPF_S_STX: mem[K] = X; continue; case BPF_S_ANC_PROTOCOL: A = ntohs(skb->protocol); continue; case BPF_S_ANC_PKTTYPE: A = skb->pkt_type; continue; case BPF_S_ANC_IFINDEX: if (!skb->dev) return 0; A = skb->dev->ifindex; continue; case BPF_S_ANC_MARK: A = skb->mark; continue; case BPF_S_ANC_QUEUE: A = skb->queue_mapping; continue; case BPF_S_ANC_HATYPE: if (!skb->dev) return 0; A = skb->dev->type; continue; case BPF_S_ANC_RXHASH: A = skb->rxhash; continue; case BPF_S_ANC_CPU: A = raw_smp_processor_id(); continue; case BPF_S_ANC_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case BPF_S_ANC_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } #ifdef CONFIG_SECCOMP_FILTER case BPF_S_ANC_SECCOMP_LD_W: A = seccomp_bpf_load(fentry->k); continue; #endif default: WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", fentry->code, fentry->jt, fentry->jf, fentry->k); return 0; } } return 0; }
bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, const struct tcphdr *th, struct synproxy_options *opts) { int length = (th->doff * 4) - sizeof(*th); u8 buf[40], *ptr; ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); if (ptr == NULL) return false; opts->options = 0; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return true; case TCPOPT_NOP: length--; continue; default: opsize = *ptr++; if (opsize < 2) return true; if (opsize > length) return true; switch (opcode) { case TCPOPT_MSS: if (opsize == TCPOLEN_MSS) { opts->mss = get_unaligned_be16(ptr); opts->options |= XT_SYNPROXY_OPT_MSS; } break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW) { opts->wscale = *ptr; if (opts->wscale > 14) opts->wscale = 14; opts->options |= XT_SYNPROXY_OPT_WSCALE; } break; case TCPOPT_TIMESTAMP: if (opsize == TCPOLEN_TIMESTAMP) { opts->tsval = get_unaligned_be32(ptr); opts->tsecr = get_unaligned_be32(ptr + 4); opts->options |= XT_SYNPROXY_OPT_TIMESTAMP; } break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM) opts->options |= XT_SYNPROXY_OPT_SACK_PERM; break; } ptr += opsize - 2; length -= opsize; } } return true; }
static uint8_t set_device_configuration_extension(struct scsi_cmd *cmd, uint8_t *p) { uint8_t *sam_stat = &cmd->dbuf_p->sam_stat; struct lu_phy_attr *lu = cmd->lu; struct priv_lu_ssc *lu_priv = cmd->lu->lu_private; struct ssc_personality_template *pm; struct mode *mp; int page_code_len; int write_mode; int pews; /* Programable Early Warning Size */ pm = lu_priv->pm; mp = lookup_pcode(&lu->mode_pg, MODE_DEVICE_CONFIGURATION, 1); /* Code error * Any device supporting this should have this mode page defined */ if (!mp) { mkSenseBuf(HARDWARE_ERROR, E_INTERNAL_TARGET_FAILURE, sam_stat); return SAM_STAT_CHECK_CONDITION; } page_code_len = get_unaligned_be16(&p[2]); if (page_code_len != 0x1c) { MHVTL_LOG("Unexpected page code length.. Unexpected results"); mkSenseBuf(ILLEGAL_REQUEST, E_INVALID_FIELD_IN_PARMS, sam_stat); return SAM_STAT_CHECK_CONDITION; } write_mode = (p[5] & 0xf0) >> 4; if (write_mode > 1) { mkSenseBuf(ILLEGAL_REQUEST, E_INVALID_FIELD_IN_PARMS, sam_stat); return SAM_STAT_CHECK_CONDITION; } MHVTL_DBG(2, "%s mode", write_mode ? "Append-only" : "Write-anywhere"); pews = get_unaligned_be16(&p[6]); if (pm->drive_supports_prog_early_warning) { MHVTL_DBG(2, "Set Programable Early Warning Size: %d", pews); lu_priv->prog_early_warning_sz = pews; update_prog_early_warning(lu); } else { MHVTL_DBG(2, "Programable Early Warning Size not supported" " by this device"); } MHVTL_DBG(2, "Volume containing encrypted logical blocks " "requires encryption: %d", p[8] & 0x01); if (pm->drive_supports_append_only_mode) { /* Can't reset append-only mode via mode page ssc4 8.3.8 */ if (lu_priv->append_only_mode && write_mode == 0) { MHVTL_LOG("Can't reset append only mode via mode page"); mkSenseBuf(ILLEGAL_REQUEST, E_INVALID_FIELD_IN_PARMS, sam_stat); return SAM_STAT_CHECK_CONDITION; } if (write_mode) { lu_priv->append_only_mode = write_mode; lu_priv->allow_overwrite = FALSE; } } /* Now update our copy of this mode page */ mp->pcodePointer[5] &= 0x0f; mp->pcodePointer[5] |= write_mode << 4; return SAM_STAT_GOOD; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * @flen: length of filter * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. skb is the data we are * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ unsigned long memvalid = 0; u32 tmp; int k; int pc; BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *fentry = &filter[pc]; u32 f_k = fentry->k; switch (fentry->code) { case BPF_ALU|BPF_ADD|BPF_X: A += X; continue; case BPF_ALU|BPF_ADD|BPF_K: A += f_k; continue; case BPF_ALU|BPF_SUB|BPF_X: A -= X; continue; case BPF_ALU|BPF_SUB|BPF_K: A -= f_k; continue; case BPF_ALU|BPF_MUL|BPF_X: A *= X; continue; case BPF_ALU|BPF_MUL|BPF_K: A *= f_k; continue; case BPF_ALU|BPF_DIV|BPF_X: if (X == 0) return 0; A /= X; continue; case BPF_ALU|BPF_DIV|BPF_K: A /= f_k; continue; case BPF_ALU|BPF_AND|BPF_X: A &= X; continue; case BPF_ALU|BPF_AND|BPF_K: A &= f_k; continue; case BPF_ALU|BPF_OR|BPF_X: A |= X; continue; case BPF_ALU|BPF_OR|BPF_K: A |= f_k; continue; case BPF_ALU|BPF_LSH|BPF_X: A <<= X; continue; case BPF_ALU|BPF_LSH|BPF_K: A <<= f_k; continue; case BPF_ALU|BPF_RSH|BPF_X: A >>= X; continue; case BPF_ALU|BPF_RSH|BPF_K: A >>= f_k; continue; case BPF_ALU|BPF_NEG: A = -A; continue; case BPF_JMP|BPF_JA: pc += f_k; continue; case BPF_JMP|BPF_JGT|BPF_K: pc += (A > f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGE|BPF_K: pc += (A >= f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JEQ|BPF_K: pc += (A == f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JSET|BPF_K: pc += (A & f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGT|BPF_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGE|BPF_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JEQ|BPF_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JSET|BPF_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_LD|BPF_W|BPF_ABS: k = f_k; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } break; case BPF_LD|BPF_H|BPF_ABS: k = f_k; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } break; case BPF_LD|BPF_B|BPF_ABS: k = f_k; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } break; case BPF_LD|BPF_W|BPF_LEN: A = skb->len; continue; case BPF_LDX|BPF_W|BPF_LEN: X = skb->len; continue; case BPF_LD|BPF_W|BPF_IND: k = X + f_k; goto load_w; case BPF_LD|BPF_H|BPF_IND: k = X + f_k; goto load_h; case BPF_LD|BPF_B|BPF_IND: k = X + f_k; goto load_b; case BPF_LDX|BPF_B|BPF_MSH: ptr = load_pointer(skb, f_k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_LD|BPF_IMM: A = f_k; continue; case BPF_LDX|BPF_IMM: X = f_k; continue; case BPF_LD|BPF_MEM: A = (memvalid & (1UL << f_k)) ? mem[f_k] : 0; continue; case BPF_LDX|BPF_MEM: X = (memvalid & (1UL << f_k)) ? mem[f_k] : 0; continue; case BPF_MISC|BPF_TAX: X = A; continue; case BPF_MISC|BPF_TXA: A = X; continue; case BPF_RET|BPF_K: return f_k; case BPF_RET|BPF_A: return A; case BPF_ST: memvalid |= 1UL << f_k; mem[f_k] = A; continue; case BPF_STX: memvalid |= 1UL << f_k; mem[f_k] = X; continue; default: WARN_ON(1); return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: A = skb->dev->ifindex; continue; case SKF_AD_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case SKF_AD_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } default: return 0; } } return 0; }
/* * Stuff received packets to associated sockets. * On error, returns non-zero and releases the skb. */ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { struct net *net = dev_net(dev); struct phonethdr *ph; struct sockaddr_pn sa; u16 len; /* check we have at least a full Phonet header */ if (!pskb_pull(skb, sizeof(struct phonethdr))) goto out; /* check that the advertised length is correct */ ph = pn_hdr(skb); len = get_unaligned_be16(&ph->pn_length); if (len < 2) goto out; len -= 2; if ((len > skb->len) || pskb_trim(skb, len)) goto out; skb_reset_transport_header(skb); pn_skb_get_dst_sockaddr(skb, &sa); /* check if this is broadcasted */ if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) { pn_deliver_sock_broadcast(net, skb); goto out; } /* check if we are the destination */ if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { /* Phonet packet input */ struct sock *sk = pn_find_sock_by_sa(net, &sa); if (sk) return sk_receive_skb(sk, skb, 0); if (can_respond(skb)) { send_obj_unreachable(skb); send_reset_indications(skb); } } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) goto out; /* Race between address deletion and loopback */ else { /* Phonet packet routing */ struct net_device *out_dev; out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); if (!out_dev) { LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", pn_sockaddr_get_addr(&sa)); goto out; } __skb_push(skb, sizeof(struct phonethdr)); skb->dev = out_dev; if (out_dev == dev) { LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", pn_sockaddr_get_addr(&sa), dev->name); goto out_dev; } /* Some drivers (e.g. TUN) do not allocate HW header space */ if (skb_cow_head(skb, out_dev->hard_header_len)) goto out_dev; if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL, skb->len) < 0) goto out_dev; dev_queue_xmit(skb); dev_put(out_dev); return NET_RX_SUCCESS; out_dev: dev_put(out_dev); } out: kfree_skb(skb); return NET_RX_DROP; }
/* * Process the MODE_SELECT command */ uint8_t ssc_mode_select(struct scsi_cmd *cmd) { uint8_t *sam_stat = &cmd->dbuf_p->sam_stat; uint8_t *buf = cmd->dbuf_p->data; int block_descriptor_sz; int page_len; uint8_t *bdb = NULL; int i; int long_lba = 0; int count; int save_page; save_page = cmd->scb[1] & 0x01; switch (cmd->scb[0]) { case MODE_SELECT: cmd->dbuf_p->sz = cmd->scb[4]; break; case MODE_SELECT_10: cmd->dbuf_p->sz = get_unaligned_be16(&cmd->scb[7]); break; default: cmd->dbuf_p->sz = 0; } count = retrieve_CDB_data(cmd->cdev, cmd->dbuf_p); MHVTL_DBG(1, "MODE SELECT (%ld) **", (long)cmd->dbuf_p->serialNo); if (!(cmd->scb[1] & 0x10)) { /* Page Format: 1 - SPC, 0 - vendor uniq */ mkSenseBuf(ILLEGAL_REQUEST, E_INVALID_FIELD_IN_CDB, sam_stat); return SAM_STAT_CHECK_CONDITION; } switch (cmd->scb[0]) { case MODE_SELECT: block_descriptor_sz = buf[3]; if (block_descriptor_sz) bdb = &buf[4]; i = 4 + block_descriptor_sz; break; case MODE_SELECT_10: block_descriptor_sz = get_unaligned_be16(&buf[6]); long_lba = buf[4] & 1; if (block_descriptor_sz) bdb = &buf[8]; i = 8 + block_descriptor_sz; break; default: mkSenseBuf(ILLEGAL_REQUEST, E_INVALID_OP_CODE, sam_stat); return SAM_STAT_CHECK_CONDITION; } if (bdb) { if (long_lba) { mkSenseBuf(ILLEGAL_REQUEST, E_INVALID_FIELD_IN_CDB, sam_stat); MHVTL_DBG(1, "Warning can not " "handle long descriptor block (long_lba bit)"); return SAM_STAT_CHECK_CONDITION; } memcpy(modeBlockDescriptor, bdb, block_descriptor_sz); } /* Ignore mode pages if 'save page' bit not set */ if (!save_page) { MHVTL_DBG(1, "Save page bit not set. Ignoring page data"); return SAM_STAT_GOOD; } #ifdef MHVTL_DEBUG if (debug) hex_dump(buf, cmd->dbuf_p->sz); #endif MHVTL_DBG(3, "count: %d, i: %d", count, i); if (i == 4) { MHVTL_DBG(3, "Offset 0: %02x %02x %02x %02x", buf[0], buf[1], buf[2], buf[3]); } else { MHVTL_DBG(3, "Offset 0: %02x %02x %02x %02x %02x %02x %02x %02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); } count -= i; while (i < count) { MHVTL_DBG(3, " %02d: %02x %02x %02x %02x %02x %02x %02x %02x", i, buf[i+0], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]); MHVTL_DBG(3, " %02d: %02x %02x %02x %02x %02x %02x %02x %02x", i+8, buf[i+8], buf[i+9], buf[i+10], buf[i+11], buf[i+12], buf[i+13], buf[i+14], buf[i+15]); MHVTL_DBG(3, " %02d: %02x %02x %02x %02x %02x %02x %02x %02x", i+16, buf[i+16], buf[i+17], buf[i+18], buf[i+19], buf[i+20], buf[i+21], buf[i+22], buf[i+23]); MHVTL_DBG(3, " %02d: %02x %02x %02x %02x %02x %02x %02x %02x", i+24, buf[i+24], buf[i+25], buf[i+26], buf[i+27], buf[i+28], buf[i+29], buf[i+30], buf[i+31]); /* Default page len is, override if sub-pages */ page_len = buf[i + 1]; switch (buf[i]) { case MODE_DATA_COMPRESSION: set_mode_compression(cmd, &buf[i]); break; case MODE_DEVICE_CONFIGURATION: /* If this is '01' it's a subpage value * i.e. DEVICE CONFIGURATION EXTENSION * If it's 0x0e, it indicates a page length * for MODE DEVICE CONFIGURATION */ if (buf[i + 1] == 0x01) { if (set_device_configuration_extension(cmd, &buf[i])) return SAM_STAT_CHECK_CONDITION; /* Subpage 1 - override default page length */ page_len = get_unaligned_be16(&buf[i + 2]); } else set_device_configuration(cmd, &buf[i]); break; default: MHVTL_DBG_PRT_CDB(1, cmd); MHVTL_DBG(1, "Mode page 0x%02x not handled", buf[i]); break; } if (page_len == 0) { /* Something wrong with data structure */ page_len = cmd->dbuf_p->sz; MHVTL_LOG("Problem with mode select data structure"); } i += page_len; /* Next mode page */ } return SAM_STAT_GOOD; }