int hci_h4p_bc4_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue) { static const u8 nokia_oui[3] = {0x00, 0x19, 0x4F}; struct sk_buff *skb; unsigned int offset; int retries, count, i, not_valid; unsigned long flags; info->fw_error = 0; BT_DBG("Sending firmware"); skb = skb_dequeue(fw_queue); if (!skb) return -ENOMSG; /* Check if this is bd_address packet */ if (skb->data[15] == 0x01 && skb->data[16] == 0x00) { offset = 21; skb->data[offset + 1] = 0x00; skb->data[offset + 5] = 0x00; not_valid = 1; for (i = 0; i < 6; i++) { if (info->bd_addr[i] != 0x00) { not_valid = 0; break; } } if (not_valid) { dev_info(info->dev, "Valid bluetooth address not found, setting some random\n"); /* When address is not valid, use some random */ memcpy(info->bd_addr, nokia_oui, 3); get_random_bytes(info->bd_addr + 3, 3); } skb->data[offset + 7] = info->bd_addr[0]; skb->data[offset + 6] = info->bd_addr[1]; skb->data[offset + 4] = info->bd_addr[2]; skb->data[offset + 0] = info->bd_addr[3]; skb->data[offset + 3] = info->bd_addr[4]; skb->data[offset + 2] = info->bd_addr[5]; } for (count = 1; ; count++) { BT_DBG("Sending firmware command %d", count); init_completion(&info->fw_completion); skb_queue_tail(&info->txq, skb); spin_lock_irqsave(&info->lock, flags); hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_THRI); spin_unlock_irqrestore(&info->lock, flags); skb = skb_dequeue(fw_queue); if (!skb) break; if (!wait_for_completion_timeout(&info->fw_completion, msecs_to_jiffies(1000))) { dev_err(info->dev, "No reply to fw command\n"); return -ETIMEDOUT; } if (info->fw_error) { dev_err(info->dev, "FW error\n"); return -EPROTO; } }; /* Wait for chip warm reset */ retries = 100; while ((!skb_queue_empty(&info->txq) || !(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) && retries--) { msleep(10); } if (!retries) { dev_err(info->dev, "Transmitter not empty\n"); return -ETIMEDOUT; } hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); if (hci_h4p_wait_for_cts(info, 1, 100)) { dev_err(info->dev, "cts didn't deassert after final speed\n"); return -ETIMEDOUT; } retries = 100; do { init_completion(&info->init_completion); hci_h4p_send_alive_packet(info); retries--; } while (!wait_for_completion_timeout(&info->init_completion, 100) && retries > 0); if (!retries) { dev_err(info->dev, "No alive reply after speed change\n"); return -ETIMEDOUT; } return 0; }
static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct iphdr *top_iph; struct ip_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; /* Strip IP+ESP header. */ __skb_pull(skb, skb->h.raw - skb->data); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; desc.tfm = tfm; desc.flags = 0; blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); __skb_push(skb, skb->data - skb->nh.raw); top_iph = skb->nh.iph; esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4); top_iph->tot_len = htons(skb->len + alen); *(u8*)(trailer->tail - 1) = top_iph->protocol; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; __be32 *udpdata32; uh = (struct udphdr *)esph; uh->source = encap->encap_sport; uh->dest = encap->encap_dport; uh->len = htons(skb->len + alen - top_iph->ihl*4); uh->check = 0; switch (encap->encap_type) { default: case UDP_ENCAP_ESPINUDP: esph = (struct ip_esp_hdr *)(uh + 1); break; case UDP_ENCAP_ESPINUDP_NON_IKE: udpdata32 = (__be32 *)(uh + 1); udpdata32[0] = udpdata32[1] = 0; esph = (struct ip_esp_hdr *)(udpdata32 + 2); break; } top_iph->protocol = IPPROTO_UDP; } else top_iph->protocol = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { get_random_bytes(esp->conf.ivec, esp->conf.ivlen); esp->conf.ivinitted = 1; } crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (unlikely(err)) goto error; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, sizeof(*esph) + esp->conf.ivlen + clen); memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } ip_send_check(top_iph); error: return err; }
static int ext2_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext2_sb_info * sbi; struct ext2_super_block * es; struct inode *root; unsigned long block; unsigned long sb_block = get_sb_block(&data); unsigned long logic_sb_block; unsigned long offset = 0; unsigned long def_mount_opts; long ret = -EINVAL; int blocksize = BLOCK_SIZE; int db_count; int i, j; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; /* * See what the current blocksize for the device is, and * use that as the blocksize. Otherwise (or if the blocksize * is smaller than the default) use the default. * This is important for devices that have a hardware * sectorsize that is larger than the default. */ blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { printk ("EXT2-fs: unable to set blocksize\n"); goto failed_sbi; } /* * If the superblock doesn't start on a hardware sector boundary, * calculate the offset. */ if (blocksize != BLOCK_SIZE) { logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { printk ("EXT2-fs: unable to read superblock\n"); goto failed_sbi; } /* * Note: s_es must be initialized as soon as possible because * some ext2 macro-instructions depend on its value */ es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT2_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT2_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT2_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT2_FS_XATTR if (def_mount_opts & EXT2_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT2_FS_POSIX_ACL if (def_mount_opts & EXT2_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); set_opt(sbi->s_mount_opt, RESERVATION); ext3301_enc_key = 0; ext3301_no_encrypt = 0; if (!parse_options ((char *) data, sbi)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset EXT2_MOUNT_XIP if not */ if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U))) printk("EXT2-fs warning: feature flags set on rev 0 fs, " "running e2fsck is recommended\n"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP); if (features) { printk("EXT2-fs: %s: couldn't mount because of " "unsupported optional features (%x).\n", sb->s_id, le32_to_cpu(features)); goto failed_mount; } if (!(sb->s_flags & MS_RDONLY) && (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){ printk("EXT2-fs: %s: couldn't mount RDWR because of " "unsupported optional features (%x).\n", sb->s_id, le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) { if (!silent) printk("XIP: Unsupported blocksize\n"); goto failed_mount; } /* If the blocksize doesn't match, re-read the thing.. */ if (sb->s_blocksize != blocksize) { brelse(bh); if (!sb_set_blocksize(sb, blocksize)) { printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n"); goto failed_sbi; } logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if(!bh) { printk("EXT2-fs: Couldn't read superblock on " "2nd try.\n"); goto failed_sbi; } es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) { printk ("EXT2-fs: Magic mismatch, very weird !\n"); goto failed_mount; } } sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) || !is_power_of_2(sbi->s_inode_size) || (sbi->s_inode_size > blocksize)) { printk ("EXT2-fs: unsupported inode size: %d\n", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (sbi->s_frag_size == 0) goto cantfind_ext2; sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = sb->s_blocksize / sizeof (struct ext2_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2 (EXT2_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2 (EXT2_DESC_PER_BLOCK(sb)); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; if (sb->s_blocksize != bh->b_size) { if (!silent) printk ("VFS: Unsupported blocksize on dev " "%s.\n", sb->s_id); goto failed_mount; } if (sb->s_blocksize != sbi->s_frag_size) { printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n", sbi->s_frag_size, sb->s_blocksize); goto failed_mount; } if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #blocks per group too big: %lu\n", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #fragments per group too big: %lu\n", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #inodes per group too big: %lu\n", sbi->s_inodes_per_group); goto failed_mount; } if (EXT2_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext2; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT2_BLOCKS_PER_GROUP(sb)) + 1; db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { printk ("EXT2-fs: not enough memory\n"); goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL); if (!sbi->s_debts) { printk ("EXT2-fs: not enough memory\n"); goto failed_mount_group_desc; } for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { for (j = 0; j < i; j++) brelse (sbi->s_group_desc[j]); printk ("EXT2-fs: unable to read group descriptors\n"); goto failed_mount_group_desc; } } if (!ext2_check_descriptors (sb)) { printk ("EXT2-fs: group descriptors corrupted!\n"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* * Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext2_rsv_window_add(sb, &sbi->s_rsv_window_head); err = percpu_counter_init(&sbi->s_freeblocks_counter, ext2_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext2_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext2_count_dirs(sb)); } if (err) { printk(KERN_ERR "EXT2-fs: insufficient memory\n"); goto failed_mount3; } /* * set up enough so that it can read an inode */ sb->s_op = &ext2_sops; sb->s_export_op = &ext2_export_ops; sb->s_xattr = ext2_xattr_handlers; root = ext2_iget(sb, EXT2_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n"); goto failed_mount3; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); printk(KERN_ERR "EXT2-fs: get root inode failed\n"); ret = -ENOMEM; goto failed_mount3; } if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) ext2_warning(sb, __func__, "mounting ext3 filesystem as ext2"); ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY); return 0; cantfind_ext2: if (!silent) printk("VFS: Can't find an ext2 filesystem on dev %s.\n", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); failed_mount_group_desc: kfree(sbi->s_group_desc); kfree(sbi->s_debts); failed_mount: brelse(bh); failed_sbi: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; }
/** * __dtc_single_mode - dma test case one-thread from memory to memory * * Returns 0 if success, the err line number if failed. */ u32 __dtc_single_mode(void) { u32 uret = 0; u32 uqueued_normal = 0; void *src_vaddr = NULL, *dst_vaddr = NULL; u32 src_paddr = 0, dst_paddr = 0; dm_hdl_t dma_hdl = (dm_hdl_t)NULL; struct dma_cb_t done_cb; struct dma_op_cb_t op_cb; struct dma_config_t dma_config; pr_info("%s enter\n", __func__); /* prepare the buffer and data */ src_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&src_paddr, GFP_KERNEL); if(NULL == src_vaddr) { uret = __LINE__; goto end; } pr_info("%s: src_vaddr 0x%08x, src_paddr 0x%08x\n", __func__, (u32)src_vaddr, src_paddr); dst_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&dst_paddr, GFP_KERNEL); if(NULL == dst_vaddr) { uret = __LINE__; goto end; } pr_info("%s: dst_vaddr 0x%08x, dst_paddr 0x%08x\n", __func__, (u32)dst_vaddr, dst_paddr); get_random_bytes(src_vaddr, DTC_TOTAL_LEN); memset(dst_vaddr, 0x54, DTC_TOTAL_LEN); atomic_set(&g_acur_cnt, 0); g_src_addr = src_paddr; g_dst_addr = dst_paddr; dma_hdl = sw_dma_request("m2m_dma", DMA_WORK_MODE_SINGLE); if(NULL == dma_hdl) { uret = __LINE__; goto end; } pr_info("%s: sw_dma_request success, dma_hdl 0x%08x\n", __func__, (u32)dma_hdl); /* set callback */ memset(&done_cb, 0, sizeof(done_cb)); memset(&op_cb, 0, sizeof(op_cb)); done_cb.func = __cb_qd_single_mode; done_cb.parg = NULL; if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_QD_CB, (void *)&done_cb)) { uret = __LINE__; goto end; } pr_info("%s: set queuedone_cb success\n", __func__); done_cb.func = __cb_fd_single_mode; done_cb.parg = NULL; if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_FD_CB, (void *)&done_cb)) { uret = __LINE__; goto end; } pr_info("%s: set fulldone_cb success\n", __func__); done_cb.func = __cb_hd_single_mode; done_cb.parg = NULL; if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_HD_CB, (void *)&done_cb)) { uret = __LINE__; goto end; } pr_info("%s: set halfdone_cb success\n", __func__); op_cb.func = __cb_op_single_mode; op_cb.parg = NULL; if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_OP_CB, (void *)&op_cb)) { uret = __LINE__; goto end; } pr_info("%s: set op_cb success\n", __func__); memset(&dma_config, 0, sizeof(dma_config)); dma_config.xfer_type = DMAXFER_D_BWORD_S_BWORD; dma_config.address_type = DMAADDRT_D_LN_S_LN; dma_config.para = 0; dma_config.irq_spt = CHAN_IRQ_HD | CHAN_IRQ_FD | CHAN_IRQ_QD; dma_config.src_addr = src_paddr; dma_config.dst_addr = dst_paddr; dma_config.byte_cnt = DTC_ONE_LEN; //dma_config.conti_mode = 1; dma_config.bconti_mode = false; dma_config.src_drq_type = DRQSRC_SDRAM; dma_config.dst_drq_type = DRQDST_SDRAM; if(0 != sw_dma_config(dma_hdl, &dma_config, ENQUE_PHASE_NORMAL)) { uret = __LINE__; goto end; } uqueued_normal++; pr_info("%s: sw_dma_config success\n", __func__); sw_dma_dump_chan(dma_hdl); g_qd_cnt = 0; /* start dma */ if(0 != sw_dma_ctl(dma_hdl, DMA_OP_START, NULL)) { uret = __LINE__; goto end; } /* normal enqueue and callback enqueue simutanously */ { u32 ucur_cnt = 0, ucur_saddr = 0, ucur_daddr = 0; u32 uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN; while((ucur_cnt = atomic_add_return(1, &g_acur_cnt)) < uloop_cnt) { ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN; ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN; if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) printk("%s err, line %d\n", __func__, __LINE__); uqueued_normal++; } } if(0 != __waitdone_single_mode()) { uret = __LINE__; goto end; } pr_info("%s: __waitdone_single_mode sucess\n", __func__); /* NOTE: must sleep here, see the analysis in __cb_qd_single_mode, 2012-11-14 */ msleep(2000); /* check if data ok */ if(0 == memcmp(src_vaddr, dst_vaddr, DTC_TOTAL_LEN)) { //pr_info("%s: data check ok! g_qd_cnt %d\n", __func__, g_qd_cnt); printk("%s: data check ok! g_qd_cnt %d, normal queued %d\n", __func__, g_qd_cnt, uqueued_normal); } else { pr_err("%s: data check err!\n", __func__); uret = __LINE__; /* return err */ goto end; } /* stop and free dma channel */ if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) { uret = __LINE__; goto end; } pr_info("%s: sw_dma_stop success\n", __func__); if(0 != sw_dma_release(dma_hdl)) { uret = __LINE__; goto end; } dma_hdl = (dm_hdl_t)NULL; pr_info("%s: sw_dma_release success\n", __func__); end: if(0 != uret) pr_err("%s err, line %d!\n", __func__, uret); else pr_info("%s, success!\n", __func__); if((dm_hdl_t)NULL != dma_hdl) { pr_err("%s, stop and release dma handle now!\n", __func__); if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) pr_err("%s err, line %d!\n", __func__, __LINE__); if(0 != sw_dma_release(dma_hdl)) pr_err("%s err, line %d!\n", __func__, __LINE__); } pr_err("%s, line %d!\n", __func__, __LINE__); if(NULL != src_vaddr) dma_free_coherent(NULL, DTC_TOTAL_LEN, src_vaddr, src_paddr); if(NULL != dst_vaddr) dma_free_coherent(NULL, DTC_TOTAL_LEN, dst_vaddr, dst_paddr); pr_err("%s, end!\n", __func__); return uret; }
static int splat_taskq_test10(struct file *file, void *arg) { taskq_t *tq; splat_taskq_arg_t **tqas; atomic_t count; int i, j, rc = 0; int minalloc = 1; int maxalloc = 10; int nr_tasks = 100; int canceled = 0; int completed = 0; int blocked = 0; clock_t start, cancel; tqas = vmalloc(sizeof(*tqas) * nr_tasks); if (tqas == NULL) return -ENOMEM; memset(tqas, 0, sizeof(*tqas) * nr_tasks); splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n", SPLAT_TASKQ_TEST10_NAME, "delay", minalloc, maxalloc, nr_tasks); if ((tq = taskq_create(SPLAT_TASKQ_TEST10_NAME, 3, maxclsyspri, minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST10_NAME); rc = -EINVAL; goto out_free; } atomic_set(&count, 0); for (i = 0; i < nr_tasks; i++) { splat_taskq_arg_t *tq_arg; uint32_t rnd; /* A random timeout in jiffies of at most 5 seconds */ get_random_bytes((void *)&rnd, 4); rnd = rnd % (5 * HZ); tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP); tq_arg->file = file; tq_arg->name = SPLAT_TASKQ_TEST10_NAME; tq_arg->count = &count; tqas[i] = tq_arg; /* * Dispatch every 1/3 one immediately to mix it up, the cancel * code is inherently racy and we want to try and provoke any * subtle concurrently issues. */ if ((i % 3) == 0) { tq_arg->expire = ddi_get_lbolt(); tq_arg->id = taskq_dispatch(tq, splat_taskq_test10_func, tq_arg, TQ_SLEEP); } else { tq_arg->expire = ddi_get_lbolt() + rnd; tq_arg->id = taskq_dispatch_delay(tq, splat_taskq_test10_func, tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd); } if (tq_arg->id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' dispatch failed\n", SPLAT_TASKQ_TEST10_NAME); kmem_free(tq_arg, sizeof(splat_taskq_arg_t)); taskq_wait(tq); rc = -EINVAL; goto out; } else { splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' dispatch %lu in %lu jiffies\n", SPLAT_TASKQ_TEST10_NAME, (unsigned long)tq_arg->id, !(i % 3) ? 0 : tq_arg->expire - ddi_get_lbolt()); } } /* * Start randomly canceling tasks for the duration of the test. We * happen to know the valid task id's will be in the range 1..nr_tasks * because the taskq is private and was just created. However, we * have no idea of a particular task has already executed or not. */ splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' randomly " "canceling task ids\n", SPLAT_TASKQ_TEST10_NAME); start = ddi_get_lbolt(); i = 0; while (ddi_time_before(ddi_get_lbolt(), start + 5 * HZ)) { taskqid_t id; uint32_t rnd; i++; cancel = ddi_get_lbolt(); get_random_bytes((void *)&rnd, 4); id = 1 + (rnd % nr_tasks); rc = taskq_cancel_id(tq, id); /* * Keep track of the results of the random cancels. */ if (rc == 0) { canceled++; } else if (rc == ENOENT) { completed++; } else if (rc == EBUSY) { blocked++; } else { rc = -EINVAL; break; } /* * Verify we never get blocked to long in taskq_cancel_id(). * The worst case is 10ms if we happen to cancel the task * which is currently executing. We allow a factor of 2x. */ if (ddi_get_lbolt() - cancel > HZ / 50) { splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' cancel for %lu took %lu\n", SPLAT_TASKQ_TEST10_NAME, (unsigned long)id, ddi_get_lbolt() - cancel); rc = -ETIMEDOUT; break; } get_random_bytes((void *)&rnd, 4); msleep(1 + (rnd % 100)); rc = 0; } taskq_wait(tq); /* * Cross check the results of taskq_cancel_id() with the number of * times the dispatched function actually ran successfully. */ if ((rc == 0) && (nr_tasks - canceled != atomic_read(&count))) rc = -EDOM; splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' %d attempts, " "%d canceled, %d completed, %d blocked, %d/%d tasks run\n", SPLAT_TASKQ_TEST10_NAME, i, canceled, completed, blocked, atomic_read(&count), nr_tasks); splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' destroying %d\n", SPLAT_TASKQ_TEST10_NAME, rc); out: taskq_destroy(tq); out_free: for (j = 0; j < nr_tasks && tqas[j] != NULL; j++) kmem_free(tqas[j], sizeof(splat_taskq_arg_t)); vfree(tqas); return rc; }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; long err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i; try_onemore: err = -EINVAL; raw_super = NULL; raw_super_buf = NULL; recovery = 0; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery); if (err) goto free_sbi; sb->s_fs_info = sbi; default_options(sbi); /* parse mount options */ options = kstrdup((const char *)data, GFP_KERNEL); if (data && !options) { err = -ENOMEM; goto free_sb_buf; } err = parse_options(sb, options); if (err) goto free_options; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); /* disallow all the data/node/meta page writes */ set_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_options; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } /* sanity checking of checkpoint */ err = -EINVAL; if (sanity_check_ckpt(sbi)) { f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); init_extent_cache_info(sbi); init_ino_entry_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } f2fs_join_shrinker(sbi); /* if there are nt orphan nodes free them */ err = recover_orphan_inodes(sbi); if (err) goto free_node_inode; /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); err = -EINVAL; goto free_node_inode; } sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. */ if (bdev_read_only(sb->s_bdev) && !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { err = -EROFS; goto free_kobj; } if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); err = recover_fsync_data(sbi); if (err) { need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; } } /* recover_fsync_data() cleared this already */ clear_sbi_flag(sbi, SBI_POR_DOING); /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } kfree(options); /* recover broken superblock */ if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { f2fs_msg(sb, KERN_INFO, "Recover invalid superblock"); f2fs_commit_super(sbi, true); } return 0; free_kobj: kobject_del(&sbi->s_kobj); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: mutex_lock(&sbi->umount_mutex); f2fs_leave_shrinker(sbi); iput(sbi->node_inode); mutex_unlock(&sbi->umount_mutex); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: kfree(options); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); /* give only one another chance */ if (retry) { retry = false; shrink_dcache_sb(sb); goto try_onemore; } return err; }
int sadb_msg_getspi_parse(struct sock *sk, struct sadb_msg* msg, struct sadb_msg **reply) { struct sadb_ext *ext_msgs[SADB_EXT_MAX+1], *reply_ext_msgs[SADB_EXT_MAX+1]; int error = 0, found_avail = 0; __u32 newspi = 0; __u32 spi_max = 0, spi_min = 0; struct ipsec_sa sadb_entry; struct sadb_address *src, *dst; if (!msg) { PFKEY_DEBUG("msg==null\n"); error = -EINVAL; goto err; } memset(ext_msgs, 0, sizeof(ext_msgs)); memset(reply_ext_msgs, 0, sizeof(reply_ext_msgs)); error = sadb_msg_detect_ext(msg, ext_msgs); if (error) { PFKEY_DEBUG("error in sadb_msg_detect_ext\n"); goto err; } memset(reply_ext_msgs, 0, sizeof(reply_ext_msgs)); if (ext_msgs[SADB_EXT_ADDRESS_SRC] && ext_msgs[SADB_EXT_ADDRESS_DST] && ext_msgs[SADB_EXT_SPIRANGE]) { src = (struct sadb_address*)ext_msgs[SADB_EXT_ADDRESS_SRC]; dst = (struct sadb_address*)ext_msgs[SADB_EXT_ADDRESS_DST]; memset(&sadb_entry, 0, sizeof(struct ipsec_sa)); error = sadb_address_to_sockaddr(src, (struct sockaddr*)&sadb_entry.src); if (error) { PFKEY_DEBUG("error in translate src address\n"); goto err; } sadb_entry.prefixlen_s = src->sadb_address_prefixlen; error = sadb_address_to_sockaddr(dst, (struct sockaddr*)&sadb_entry.dst); if (error) { PFKEY_DEBUG("error in translate dst address\n"); } sadb_entry.prefixlen_d = dst->sadb_address_prefixlen; spi_min = ((struct sadb_spirange*)ext_msgs[SADB_EXT_SPIRANGE])->sadb_spirange_min; spi_max = ((struct sadb_spirange*)ext_msgs[SADB_EXT_SPIRANGE])->sadb_spirange_max; /* SPI which is under 255 is reserved by IANA. * Additionally, 256 and 257 reserved ofr internal use. */ if (spi_min < 258) { PFKEY_DEBUG("SPI value is reserved.(SPI<258)\n"); goto err; } if (spi_min == spi_max) { PFKEY_DEBUG("spi_min and spi_max are equal\n"); error = sadb_find_by_address_proto_spi((struct sockaddr*)&sadb_entry.src, sadb_entry.prefixlen_s, (struct sockaddr*)&sadb_entry.dst, sadb_entry.prefixlen_d, spi_min, msg->sadb_msg_type, NULL /*only check*/); if (error == -ESRCH) { newspi = spi_min; found_avail = 1; } else { PFKEY_DEBUG("sadb_find_by_address_proto_spi return %d\n", error); goto err; } } else if (ntohl(spi_min) < ntohl(spi_max)) { /* This codes are derived from FreeS/WAN */ int i = 0; __u32 rand_val; __u32 spi_diff; PFKEY_DEBUG("spi_min and spi_max are defference\n"); while ( ( i < (spi_diff = (ntohl(spi_max) - ntohl(spi_min)))) && !found_avail ) { get_random_bytes((void*) &rand_val, /* sizeof(extr->tdb->tdb_said.spi) */ ( (spi_diff < (2^8)) ? 1 : ( (spi_diff < (2^16)) ? 2 : ( (spi_diff < (2^24)) ? 3 : 4 ) ) ) ); newspi = htonl(ntohl(spi_min) + (rand_val % (spi_diff + 1))); PFKEY_DEBUG("new spi is %d\n", ntohl(newspi)); i++; error = sadb_find_by_address_proto_spi( (struct sockaddr*)&sadb_entry.src, sadb_entry.prefixlen_s, (struct sockaddr*)&sadb_entry.dst, sadb_entry.prefixlen_d, newspi, msg->sadb_msg_type, NULL /* only check */); if (error == -ESRCH) { found_avail = 1; break; } else { PFKEY_DEBUG("sadb_find_by_address_proto_spi return %d\n", error); goto err; } } } else { PFKEY_DEBUG("invalid spi range\n"); error = -EINVAL; goto err; } if (found_avail) { sadb_entry.spi = newspi; sadb_entry.state = SADB_SASTATE_LARVAL; error = sadb_append(&sadb_entry); if (error) { PFKEY_DEBUG("sadb_append return %d\n", error); goto err; } } else { PFKEY_DEBUG("could not find available spi\n"); goto err; } } else { PFKEY_DEBUG("necessary ext messages are not available\n"); error = -EINVAL; goto err; } error = pfkey_sa_build(&reply_ext_msgs[SADB_EXT_SA], SADB_EXT_SA, newspi, 0, 0, 0, 0, SADB_SAFLAGS_PFS); if (error) { PFKEY_DEBUG("pfkey_address_build faild\n"); goto err; } reply_ext_msgs[0] = (struct sadb_ext*) msg; reply_ext_msgs[SADB_EXT_ADDRESS_SRC] = ext_msgs[SADB_EXT_ADDRESS_SRC]; reply_ext_msgs[SADB_EXT_ADDRESS_DST] = ext_msgs[SADB_EXT_ADDRESS_DST]; error = pfkey_msg_build(reply, reply_ext_msgs, EXT_BITS_OUT); err: return error; }
/** * Clear log and detect resize of log device. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_clear_log(struct walb_dev *wdev, struct walb_ctl *ctl) { u64 new_ldev_size, old_ldev_size; u8 new_uuid[UUID_SIZE], old_uuid[UUID_SIZE]; unsigned int pbs = wdev->physical_bs; bool is_grown = false; struct walb_super_sector *super; u64 lsid0_off; struct lsid_set lsids; u64 old_ring_buffer_size; u32 new_salt; ASSERT(ctl->command == WALB_IOCTL_CLEAR_LOG); LOGn("WALB_IOCTL_CLEAR_LOG.\n"); /* Freeze iocore and checkpointing. */ iocore_freeze(wdev); stop_checkpointing(&wdev->cpd); /* Get old/new log device size. */ old_ldev_size = wdev->ldev_size; new_ldev_size = wdev->ldev->bd_part->nr_sects; if (old_ldev_size > new_ldev_size) { LOGe("Log device shrink not supported.\n"); goto error0; } /* Backup variables. */ old_ring_buffer_size = wdev->ring_buffer_size; backup_lsid_set(wdev, &lsids); /* Initialize lsid(s). */ spin_lock(&wdev->lsid_lock); wdev->lsids.latest = 0; wdev->lsids.flush = 0; wdev->lsids.completed = 0; wdev->lsids.permanent = 0; wdev->lsids.written = 0; wdev->lsids.prev_written = 0; wdev->lsids.oldest = 0; spin_unlock(&wdev->lsid_lock); /* Grow the walblog device. */ if (old_ldev_size < new_ldev_size) { LOGn("Detect log device size change.\n"); /* Grow the disk. */ is_grown = true; if (!resize_disk(wdev->log_gd, new_ldev_size)) { LOGe("grow disk failed.\n"); iocore_set_readonly(wdev); goto error1; } LOGn("Grown log device size from %"PRIu64" to %"PRIu64".\n", old_ldev_size, new_ldev_size); wdev->ldev_size = new_ldev_size; /* Recalculate ring buffer size. */ wdev->ring_buffer_size = addr_pb(pbs, new_ldev_size) - get_ring_buffer_offset(pbs); } /* Generate new uuid and salt. */ get_random_bytes(new_uuid, 16); get_random_bytes(&new_salt, sizeof(new_salt)); wdev->log_checksum_salt = new_salt; /* Update superblock image. */ spin_lock(&wdev->lsuper0_lock); super = get_super_sector(wdev->lsuper0); memcpy(old_uuid, super->uuid, UUID_SIZE); memcpy(super->uuid, new_uuid, UUID_SIZE); super->ring_buffer_size = wdev->ring_buffer_size; super->log_checksum_salt = new_salt; /* super->metadata_size; */ lsid0_off = get_offset_of_lsid_2(super, 0); spin_unlock(&wdev->lsuper0_lock); /* Sync super sector. */ if (!walb_sync_super_block(wdev)) { LOGe("sync superblock failed.\n"); iocore_set_readonly(wdev); goto error2; } /* Invalidate first logpack */ if (!invalidate_lsid(wdev, 0)) { LOGe("invalidate lsid 0 failed.\n"); iocore_set_readonly(wdev); goto error2; } /* Clear log overflow. */ iocore_clear_log_overflow(wdev); /* Melt iocore and checkpointing. */ start_checkpointing(&wdev->cpd); iocore_melt(wdev); return 0; error2: restore_lsid_set(wdev, &lsids); wdev->ring_buffer_size = old_ring_buffer_size; #if 0 wdev->ldev_size = old_ldev_size; if (!resize_disk(wdev->log_gd, old_ldev_size)) { LOGe("resize_disk to shrink failed.\n"); } #endif error1: start_checkpointing(&wdev->cpd); iocore_melt(wdev); error0: return -EFAULT; }
/*---------------------------------------------------------------- * prism2mgmt_scan * * Initiate a scan for BSSs. * * This function corresponds to MLME-scan.request and part of * MLME-scan.confirm. As far as I can tell in the standard, there * are no restrictions on when a scan.request may be issued. We have * to handle in whatever state the driver/MAC happen to be. * * Arguments: * wlandev wlan device structure * msgp ptr to msg buffer * * Returns: * 0 success and done * <0 success, but we're waiting for something to finish. * >0 an error occurred while handling the message. * Side effects: * * Call context: * process thread (usually) * interrupt ----------------------------------------------------------------*/ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp) { int result = 0; hfa384x_t *hw = wlandev->priv; p80211msg_dot11req_scan_t *msg = msgp; u16 roamingmode, word; int i, timeout; int istmpenable = 0; hfa384x_HostScanRequest_data_t scanreq; /* gatekeeper check */ if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major, hw->ident_sta_fw.minor, hw->ident_sta_fw.variant) < HFA384x_FIRMWARE_VERSION(1, 3, 2)) { printk(KERN_ERR "HostScan not supported with current firmware (<1.3.2).\n"); result = 1; msg->resultcode.data = P80211ENUM_resultcode_not_supported; goto exit; } memset(&scanreq, 0, sizeof(scanreq)); /* save current roaming mode */ result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_CNFROAMINGMODE, &roamingmode); if (result) { printk(KERN_ERR "getconfig(ROAMMODE) failed. result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } /* drop into mode 3 for the scan */ result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFROAMINGMODE, HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM); if (result) { printk(KERN_ERR "setconfig(ROAMINGMODE) failed. result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } /* active or passive? */ if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major, hw->ident_sta_fw.minor, hw->ident_sta_fw.variant) > HFA384x_FIRMWARE_VERSION(1, 5, 0)) { if (msg->scantype.data != P80211ENUM_scantype_active) word = cpu_to_le16(msg->maxchanneltime.data); else word = 0; result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFPASSIVESCANCTRL, word); if (result) { printk(KERN_WARNING "Passive scan not supported with " "current firmware. (<1.5.1)\n"); } } /* set up the txrate to be 2MBPS. Should be fastest basicrate... */ word = HFA384x_RATEBIT_2; scanreq.txRate = cpu_to_le16(word); /* set up the channel list */ word = 0; for (i = 0; i < msg->channellist.data.len; i++) { u8 channel = msg->channellist.data.data[i]; if (channel > 14) continue; /* channel 1 is BIT 0 ... channel 14 is BIT 13 */ word |= (1 << (channel - 1)); } scanreq.channelList = cpu_to_le16(word); /* set up the ssid, if present. */ scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len); memcpy(scanreq.ssid.data, msg->ssid.data.data, msg->ssid.data.len); /* Enable the MAC port if it's not already enabled */ result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_PORTSTATUS, &word); if (result) { printk(KERN_ERR "getconfig(PORTSTATUS) failed. " "result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } if (word == HFA384x_PORTSTATUS_DISABLED) { u16 wordbuf[17]; result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFROAMINGMODE, HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM); if (result) { printk(KERN_ERR "setconfig(ROAMINGMODE) failed. result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } /* Construct a bogus SSID and assign it to OwnSSID and * DesiredSSID */ wordbuf[0] = cpu_to_le16(WLAN_SSID_MAXLEN); get_random_bytes(&wordbuf[1], WLAN_SSID_MAXLEN); result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFOWNSSID, wordbuf, HFA384x_RID_CNFOWNSSID_LEN); if (result) { printk(KERN_ERR "Failed to set OwnSSID.\n"); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFDESIREDSSID, wordbuf, HFA384x_RID_CNFDESIREDSSID_LEN); if (result) { printk(KERN_ERR "Failed to set DesiredSSID.\n"); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } /* bsstype */ result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFPORTTYPE, HFA384x_PORTTYPE_IBSS); if (result) { printk(KERN_ERR "Failed to set CNFPORTTYPE.\n"); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } /* ibss options */ result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CREATEIBSS, HFA384x_CREATEIBSS_JOINCREATEIBSS); if (result) { printk(KERN_ERR "Failed to set CREATEIBSS.\n"); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } result = hfa384x_drvr_enable(hw, 0); if (result) { printk(KERN_ERR "drvr_enable(0) failed. " "result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } istmpenable = 1; } /* Figure out our timeout first Kus, then HZ */ timeout = msg->channellist.data.len * msg->maxchanneltime.data; timeout = (timeout * HZ) / 1000; /* Issue the scan request */ hw->scanflag = 0; result = hfa384x_drvr_setconfig(hw, HFA384x_RID_HOSTSCAN, &scanreq, sizeof(hfa384x_HostScanRequest_data_t)); if (result) { printk(KERN_ERR "setconfig(SCANREQUEST) failed. result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } /* sleep until info frame arrives */ wait_event_interruptible_timeout(hw->cmdq, hw->scanflag, timeout); msg->numbss.status = P80211ENUM_msgitem_status_data_ok; if (hw->scanflag == -1) hw->scanflag = 0; msg->numbss.data = hw->scanflag; hw->scanflag = 0; /* Disable port if we temporarily enabled it. */ if (istmpenable) { result = hfa384x_drvr_disable(hw, 0); if (result) { printk(KERN_ERR "drvr_disable(0) failed. " "result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } } /* restore original roaming mode */ result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFROAMINGMODE, roamingmode); if (result) { printk(KERN_ERR "setconfig(ROAMMODE) failed. result=%d\n", result); msg->resultcode.data = P80211ENUM_resultcode_implementation_failure; goto exit; } result = 0; msg->resultcode.data = P80211ENUM_resultcode_success; exit: msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; return result; }
static int find_group_orlov(struct super_block *sb, struct inode *parent) { int parent_group = EXT3_I(parent)->i_block_group; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int ngroups = sbi->s_groups_count; int inodes_per_group = EXT3_INODES_PER_GROUP(sb); unsigned int freei, avefreei; ext3_fsblk_t freeb, avefreeb; ext3_fsblk_t blocks_per_dir; unsigned int ndirs; int max_debt, max_dirs, min_inodes; ext3_grpblk_t min_blocks; int group = -1, i; struct ext3_group_desc *desc; freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); avefreeb = freeb / ngroups; ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); if ((parent == sb->s_root->d_inode) || (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) { int best_ndir = inodes_per_group; int best_group = -1; get_random_bytes(&group, sizeof(group)); parent_group = (unsigned)group % ngroups; for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, NULL); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) continue; if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) continue; if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) continue; best_group = group; best_ndir = le16_to_cpu(desc->bg_used_dirs_count); } if (best_group >= 0) return best_group; goto fallback; } blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs; max_dirs = ndirs / ngroups + inodes_per_group / 16; min_inodes = avefreei - inodes_per_group / 4; min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4; max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST); if (max_debt * INODE_COST > inodes_per_group) max_debt = inodes_per_group / INODE_COST; if (max_debt > 255) max_debt = 255; if (max_debt == 0) max_debt = 1; for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, NULL); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) continue; if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) continue; if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) continue; return group; } fallback: for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, NULL); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) return group; } if (avefreei) { /* * The free-inodes counter is approximate, and for really small * filesystems the above test can fail to find any blockgroups */ avefreei = 0; goto fallback; } return -1; }
HPT_U32 os_get_stamp(void) { HPT_U32 stamp; get_random_bytes(&stamp, sizeof(stamp)); return stamp; }
int dhd_write_rdwr_korics_macaddr(struct dhd_info *dhd, struct ether_addr *mac) { struct file *fp = NULL; char macbuffer[18] = {0}; mm_segment_t oldfs = {0}; char randommac[3] = {0}; char buf[18] = {0}; char *filepath_efs = MACINFO_EFS; int is_zeromac = 0; int ret = 0; /* MAC address copied from efs/wifi.mac.info */ fp = filp_open(filepath_efs, O_RDONLY, 0); if (IS_ERR(fp)) { /* File Doesn't Exist. Create and write mac addr. */ fp = filp_open(filepath_efs, O_RDWR | O_CREAT, 0666); if (IS_ERR(fp)) { DHD_ERROR(("[WIFI] %s: File open error\n", filepath_efs)); return -1; } oldfs = get_fs(); set_fs(get_ds()); /* Generating the Random Bytes for * 3 last octects of the MAC address */ get_random_bytes(randommac, 3); sprintf(macbuffer, "%02X:%02X:%02X:%02X:%02X:%02X\n", 0x60, 0xd0, 0xa9, randommac[0], randommac[1], randommac[2]); DHD_ERROR(("[WIFI] The Random Generated MAC ID : %s\n", macbuffer)); if (fp->f_mode & FMODE_WRITE) { ret = fp->f_op->write(fp, (const char *)macbuffer, sizeof(macbuffer), &fp->f_pos); if (ret < 0) DHD_ERROR(("[WIFI] Mac address [%s]" " Failed to write into File:" " %s\n", macbuffer, filepath_efs)); else DHD_ERROR(("[WIFI] Mac address [%s]" " written into File: %s\n", macbuffer, filepath_efs)); } set_fs(oldfs); } else { /* Reading the MAC Address from .mac.info file * (the existed file or just created file) */ ret = kernel_read(fp, 0, buf, 18); /* to prevent abnormal string display when mac address * is displayed on the screen. */ buf[17] = '\0'; /* Remove security log */ /* DHD_ERROR(("Read MAC : [%s] [%d] \r\n", buf, * strncmp(buf, "00:00:00:00:00:00", 17))); */ if ((buf[0] == '\0') || (strncmp(buf, "00:00:00:00:00:00", 17) == 0)) { is_zeromac = 1; } } if (ret) sscanf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", (unsigned int *)&(mac->octet[0]), (unsigned int *)&(mac->octet[1]), (unsigned int *)&(mac->octet[2]), (unsigned int *)&(mac->octet[3]), (unsigned int *)&(mac->octet[4]), (unsigned int *)&(mac->octet[5])); else DHD_INFO(("dhd_bus_start: Reading from the" " '%s' returns 0 bytes\n", filepath_efs)); if (fp) filp_close(fp, NULL); if (!is_zeromac) { /* Writing Newly generated MAC ID to the Dongle */ if (_dhd_set_mac_address(dhd, 0, mac) == 0) DHD_INFO(("dhd_bus_start: MACID is overwritten\n")); else DHD_ERROR(("dhd_bus_start: _dhd_set_mac_address() " "failed\n")); } else { DHD_ERROR(("dhd_bus_start:Is ZeroMAC BypassWrite.mac.info!\n")); } return 0; }
int dhd_check_rdwr_macaddr(struct dhd_info *dhd, dhd_pub_t *dhdp, struct ether_addr *mac) { struct file *fp_mac = NULL; struct file *fp_nvm = NULL; char macbuffer[18] = {0}; char randommac[3] = {0}; char buf[18] = {0}; char *filepath_data = MACINFO; char *filepath_efs = MACINFO_EFS; #ifdef CONFIG_TARGET_LOCALE_NA char *nvfilepath = "/data/misc/wifi/.nvmac.info"; #else char *nvfilepath = "/efs/wifi/.nvmac.info"; #endif char cur_mac[128] = {0}; char dummy_mac[ETHER_ADDR_LEN] = {0x00, 0x90, 0x4C, 0xC5, 0x12, 0x38}; char cur_macbuffer[18] = {0}; int ret = -1; g_imac_flag = MACADDR_NONE; fp_nvm = filp_open(nvfilepath, O_RDONLY, 0); if (IS_ERR(fp_nvm)) { /* file does not exist */ /* read MAC Address */ strcpy(cur_mac, "cur_etheraddr"); ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, cur_mac, sizeof(cur_mac), 0, 0); if (ret < 0) { DHD_ERROR(("Current READ MAC error \r\n")); memset(cur_mac, 0, ETHER_ADDR_LEN); return -1; } else { DHD_ERROR(("MAC (OTP) : " "[%02X:%02X:%02X:%02X:%02X:%02X] \r\n", cur_mac[0], cur_mac[1], cur_mac[2], cur_mac[3], cur_mac[4], cur_mac[5])); } sprintf(cur_macbuffer, "%02X:%02X:%02X:%02X:%02X:%02X\n", cur_mac[0], cur_mac[1], cur_mac[2], cur_mac[3], cur_mac[4], cur_mac[5]); fp_mac = filp_open(filepath_data, O_RDONLY, 0); if (IS_ERR(fp_mac)) { /* file does not exist */ /* read mac is the dummy mac (00:90:4C:C5:12:38) */ if (memcmp(cur_mac, dummy_mac, ETHER_ADDR_LEN) == 0) g_imac_flag = MACADDR_MOD_RANDOM; else if (strncmp(buf, "00:00:00:00:00:00", 17) == 0) g_imac_flag = MACADDR_MOD_RANDOM; else g_imac_flag = MACADDR_MOD; } else { int is_zeromac; ret = kernel_read(fp_mac, 0, buf, 18); filp_close(fp_mac, NULL); buf[17] = '\0'; is_zeromac = strncmp(buf, "00:00:00:00:00:00", 17); DHD_ERROR(("MAC (FILE): [%s] [%d] \r\n", buf, is_zeromac)); if (is_zeromac == 0) { DHD_ERROR(("Zero MAC detected." " Trying Random MAC.\n")); g_imac_flag = MACADDR_MOD_RANDOM; } else { sscanf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", (unsigned int *)&(mac->octet[0]), (unsigned int *)&(mac->octet[1]), (unsigned int *)&(mac->octet[2]), (unsigned int *)&(mac->octet[3]), (unsigned int *)&(mac->octet[4]), (unsigned int *)&(mac->octet[5])); /* current MAC address is same as previous one */ if (memcmp(cur_mac, mac->octet, ETHER_ADDR_LEN) == 0) { g_imac_flag = MACADDR_NONE; } else { /* change MAC address */ if (_dhd_set_mac_address(dhd, 0, mac) == 0) { DHD_INFO(("%s: MACID is" " overwritten\n", __FUNCTION__)); g_imac_flag = MACADDR_MOD; } else { DHD_ERROR(("%s: " "_dhd_set_mac_address()" " failed\n", __FUNCTION__)); g_imac_flag = MACADDR_NONE; } } } } fp_mac = filp_open(filepath_efs, O_RDONLY, 0); if (IS_ERR(fp_mac)) { /* file does not exist */ /* read mac is the dummy mac (00:90:4C:C5:12:38) */ if (memcmp(cur_mac, dummy_mac, ETHER_ADDR_LEN) == 0) g_imac_flag = MACADDR_MOD_RANDOM; else if (strncmp(buf, "00:00:00:00:00:00", 17) == 0) g_imac_flag = MACADDR_MOD_RANDOM; else g_imac_flag = MACADDR_MOD; } else { int is_zeromac; ret = kernel_read(fp_mac, 0, buf, 18); filp_close(fp_mac, NULL); buf[17] = '\0'; is_zeromac = strncmp(buf, "00:00:00:00:00:00", 17); DHD_ERROR(("MAC (FILE): [%s] [%d] \r\n", buf, is_zeromac)); if (is_zeromac == 0) { DHD_ERROR(("Zero MAC detected." " Trying Random MAC.\n")); g_imac_flag = MACADDR_MOD_RANDOM; } else { sscanf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", (unsigned int *)&(mac->octet[0]), (unsigned int *)&(mac->octet[1]), (unsigned int *)&(mac->octet[2]), (unsigned int *)&(mac->octet[3]), (unsigned int *)&(mac->octet[4]), (unsigned int *)&(mac->octet[5])); /* current MAC address is same as previous one */ if (memcmp(cur_mac, mac->octet, ETHER_ADDR_LEN) == 0) { g_imac_flag = MACADDR_NONE; } else { /* change MAC address */ if (_dhd_set_mac_address(dhd, 0, mac) == 0) { DHD_INFO(("%s: MACID is" " overwritten\n", __FUNCTION__)); g_imac_flag = MACADDR_MOD; } else { DHD_ERROR(("%s: " "_dhd_set_mac_address()" " failed\n", __FUNCTION__)); g_imac_flag = MACADDR_NONE; } } } } } else { /* COB type. only COB. */ /* Reading the MAC Address from .nvmac.info file * (the existed file or just created file) */ ret = kernel_read(fp_nvm, 0, buf, 18); /* to prevent abnormal string display when mac address * is displayed on the screen. */ buf[17] = '\0'; DHD_ERROR(("Read MAC : [%s] [%d] \r\n", buf, strncmp(buf, "00:00:00:00:00:00", 17))); if ((buf[0] == '\0') || (strncmp(buf, "00:00:00:00:00:00", 17) == 0)) { g_imac_flag = MACADDR_COB_RANDOM; } else { sscanf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", (unsigned int *)&(mac->octet[0]), (unsigned int *)&(mac->octet[1]), (unsigned int *)&(mac->octet[2]), (unsigned int *)&(mac->octet[3]), (unsigned int *)&(mac->octet[4]), (unsigned int *)&(mac->octet[5])); /* Writing Newly generated MAC ID to the Dongle */ if (_dhd_set_mac_address(dhd, 0, mac) == 0) { DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); g_imac_flag = MACADDR_COB; } else { DHD_ERROR(("%s: _dhd_set_mac_address()" " failed\n", __FUNCTION__)); } } filp_close(fp_nvm, NULL); } if ((g_imac_flag == MACADDR_COB_RANDOM) || (g_imac_flag == MACADDR_MOD_RANDOM)) { get_random_bytes(randommac, 3); sprintf(macbuffer, "%02X:%02X:%02X:%02X:%02X:%02X\n", 0x60, 0xd0, 0xa9, randommac[0], randommac[1], randommac[2]); DHD_ERROR(("[WIFI] The Random Generated MAC ID : %s\n", macbuffer)); sscanf(macbuffer, "%02X:%02X:%02X:%02X:%02X:%02X", (unsigned int *)&(mac->octet[0]), (unsigned int *)&(mac->octet[1]), (unsigned int *)&(mac->octet[2]), (unsigned int *)&(mac->octet[3]), (unsigned int *)&(mac->octet[4]), (unsigned int *)&(mac->octet[5])); if (_dhd_set_mac_address(dhd, 0, mac) == 0) { DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); g_imac_flag = MACADDR_COB; } else { DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); } } return 0; }
static int esp_init_state(struct xfrm_state *x, void *args) { struct esp_data *esp = NULL; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; esp = kmalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; memset(esp, 0, sizeof(*esp)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); if (esp->auth.tfm == NULL) goto error; esp->auth.icv = esp_hmac_digest; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_tfm_alg_digestsize(esp->auth.tfm)) { NETDEBUG(printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_tfm_alg_digestsize(esp->auth.tfm), aalg_desc->uinfo.auth.icv_fullbits/8)); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; if (x->props.ealgo == SADB_EALG_NULL) esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); else esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); if (esp->conf.tfm == NULL) goto error; esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; get_random_bytes(esp->conf.ivec, esp->conf.ivlen); } if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode) x->props.header_len += sizeof(struct iphdr); if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; switch (encap->encap_type) { default: goto error; case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; case UDP_ENCAP_ESPINUDP_NON_IKE: x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); break; } } x->data = esp; x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len; return 0; error: x->data = esp; esp_destroy(x); x->data = NULL; return -EINVAL; }
int setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc; int baselen; unsigned int tilen; struct ntlmv2_resp *buf; char ntlmv2_hash[16]; unsigned char *tiblob = NULL; /* target info blob */ if (ses->server->secType == RawNTLMSSP) { if (!ses->domainName) { rc = find_domain_name(ses, nls_cp); if (rc) { cERROR(1, "error %d finding domain name", rc); goto setup_ntlmv2_rsp_ret; } } } else { rc = build_avpair_blob(ses, nls_cp); if (rc) { cERROR(1, "error %d building av pair blob", rc); goto setup_ntlmv2_rsp_ret; } } baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); tilen = ses->auth_key.len; tiblob = ses->auth_key.response; ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); if (!ses->auth_key.response) { rc = ENOMEM; ses->auth_key.len = 0; cERROR(1, "%s: Can't allocate auth blob", __func__); goto setup_ntlmv2_rsp_ret; } ses->auth_key.len += baselen; buf = (struct ntlmv2_resp *) (ses->auth_key.response + CIFS_SESS_KEY_SIZE); buf->blob_signature = cpu_to_le32(0x00000101); buf->reserved = 0; buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); buf->reserved2 = 0; memcpy(ses->auth_key.response + baselen, tiblob, tilen); /* calculate ntlmv2_hash */ rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp); if (rc) { cERROR(1, "could not get v2 hash rc %d", rc); goto setup_ntlmv2_rsp_ret; } /* calculate first part of the client response (CR1) */ rc = CalcNTLMv2_response(ses, ntlmv2_hash); if (rc) { cERROR(1, "Could not calculate CR1 rc: %d", rc); goto setup_ntlmv2_rsp_ret; } /* now calculate the session key for NTLMv2 */ rc = crypto_shash_setkey(ses->server->secmech.hmacmd5, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cERROR(1, "%s: Could not set NTLMV2 Hash as a key", __func__); goto setup_ntlmv2_rsp_ret; } rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash); if (rc) { cERROR(1, "%s: Could not init hmacmd5", __func__); goto setup_ntlmv2_rsp_ret; } rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, ses->auth_key.response + CIFS_SESS_KEY_SIZE, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cERROR(1, "%s: Could not update with response", __func__); goto setup_ntlmv2_rsp_ret; } rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash, ses->auth_key.response); if (rc) cERROR(1, "%s: Could not generate md5 hash", __func__); setup_ntlmv2_rsp_ret: kfree(tiblob); return rc; }
static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, u8 local_io, u8 remote_io) { struct hci_conn *hcon = conn->hcon; struct smp_chan *smp = conn->smp_chan; u8 method; u32 passkey = 0; int ret = 0; /* Initialize key for JUST WORKS */ memset(smp->tk, 0, sizeof(smp->tk)); clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); /* If neither side wants MITM, use JUST WORKS */ /* If either side has unknown io_caps, use JUST WORKS */ /* Otherwise, look up method from the table */ if (!(auth & SMP_AUTH_MITM) || local_io > SMP_IO_KEYBOARD_DISPLAY || remote_io > SMP_IO_KEYBOARD_DISPLAY) method = JUST_WORKS; else method = gen_method[remote_io][local_io]; /* If not bonding, don't ask user to confirm a Zero TK */ if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) method = JUST_WORKS; /* If Just Works, Continue with Zero TK */ if (method == JUST_WORKS) { set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); return 0; } /* Not Just Works/Confirm results in MITM Authentication */ if (method != JUST_CFM) set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags); /* If both devices have Keyoard-Display I/O, the master * Confirms and the slave Enters the passkey. */ if (method == OVERLAP) { if (hcon->link_mode & HCI_LM_MASTER) method = CFM_PASSKEY; else method = REQ_PASSKEY; } /* Generate random passkey. Not valid until confirmed. */ if (method == CFM_PASSKEY) { u8 key[16]; memset(key, 0, sizeof(key)); get_random_bytes(&passkey, sizeof(passkey)); passkey %= 1000000; put_unaligned_le32(passkey, key); swap128(key, smp->tk); BT_DBG("PassKey: %d", passkey); } hci_dev_lock(hcon->hdev); if (method == REQ_PASSKEY) ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type); else ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, cpu_to_le32(passkey), 0); hci_dev_unlock(hcon->hdev); return ret; }
static int krng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { get_random_bytes(rdata, dlen); return 0; }
int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) { struct smp_cmd_pairing *req, *rsp; struct smp_chan *smp = conn->smp_chan; __u8 *keydist; BT_DBG("conn %p force %d", conn, force); if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) return 0; rsp = (void *) &smp->prsp[1]; /* The responder sends its keys first */ if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07)) return 0; req = (void *) &smp->preq[1]; if (conn->hcon->out) { keydist = &rsp->init_key_dist; *keydist &= req->init_key_dist; } else { keydist = &rsp->resp_key_dist; *keydist &= req->resp_key_dist; } BT_DBG("keydist 0x%x", *keydist); if (*keydist & SMP_DIST_ENC_KEY) { struct smp_cmd_encrypt_info enc; struct smp_cmd_master_ident ident; struct hci_conn *hcon = conn->hcon; u8 authenticated; __le16 ediv; get_random_bytes(enc.ltk, sizeof(enc.ltk)); get_random_bytes(&ediv, sizeof(ediv)); get_random_bytes(ident.rand, sizeof(ident.rand)); smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); authenticated = hcon->sec_level == BT_SECURITY_HIGH; hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK_SLAVE, 1, authenticated, enc.ltk, smp->enc_key_size, ediv, ident.rand); ident.ediv = ediv; smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); *keydist &= ~SMP_DIST_ENC_KEY; } if (*keydist & SMP_DIST_ID_KEY) { struct smp_cmd_ident_addr_info addrinfo; struct smp_cmd_ident_info idinfo; /* Send a dummy key */ get_random_bytes(idinfo.irk, sizeof(idinfo.irk)); smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); /* Just public address */ memset(&addrinfo, 0, sizeof(addrinfo)); bacpy(&addrinfo.bdaddr, &conn->hcon->src); smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), &addrinfo); *keydist &= ~SMP_DIST_ID_KEY; } if (*keydist & SMP_DIST_SIGN) { struct smp_cmd_sign_info sign; /* Send a dummy key */ get_random_bytes(sign.csrk, sizeof(sign.csrk)); smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); *keydist &= ~SMP_DIST_SIGN; } if (conn->hcon->out || force) { clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); cancel_delayed_work_sync(&conn->security_timer); smp_chan_destroy(conn); } return 0; }
static int esp6_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; if (x->encap) goto error; esp = kmalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; memset(esp, 0, sizeof(*esp)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); if (esp->auth.tfm == NULL) goto error; esp->auth.icv = esp_hmac_digest; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_tfm_alg_digestsize(esp->auth.tfm)) { printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_tfm_alg_digestsize(esp->auth.tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; if (x->props.ealgo == SADB_EALG_NULL) esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); else esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); if (esp->conf.tfm == NULL) goto error; esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; get_random_bytes(esp->conf.ivec, esp->conf.ivlen); } if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; if (x->props.mode) x->props.header_len += sizeof(struct ipv6hdr); x->data = esp; return 0; error: x->data = esp; esp6_destroy(x); x->data = NULL; return -EINVAL; }
/** * tb_domain_challenge_switch_key() - Challenge and approve switch * @tb: Domain the switch belongs to * @sw: Switch to approve * * For switches that support secure connect, this function generates * random challenge and sends it to the switch. The switch responds to * this and if the response matches our random challenge, the switch is * approved and connected. * * Return: %0 on success and negative errno in case of failure. */ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) { u8 challenge[TB_SWITCH_KEY_SIZE]; u8 response[TB_SWITCH_KEY_SIZE]; u8 hmac[TB_SWITCH_KEY_SIZE]; struct tb_switch *parent_sw; struct crypto_shash *tfm; struct shash_desc *shash; int ret; if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) return -EPERM; /* The parent switch must be authorized before this one */ parent_sw = tb_to_switch(sw->dev.parent); if (!parent_sw || !parent_sw->authorized) return -EINVAL; get_random_bytes(challenge, sizeof(challenge)); ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); if (ret) return ret; tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); if (ret) goto err_free_tfm; shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto err_free_tfm; } shash->tfm = tfm; shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; memset(hmac, 0, sizeof(hmac)); ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); if (ret) goto err_free_shash; /* The returned HMAC must match the one we calculated */ if (memcmp(response, hmac, sizeof(hmac))) { ret = -EKEYREJECTED; goto err_free_shash; } crypto_free_shash(tfm); kfree(shash); return tb->cm_ops->approve_switch(tb, sw); err_free_shash: kfree(shash); err_free_tfm: crypto_free_shash(tfm); return ret; }
static void _rtl_init_mac80211(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct ieee80211_supported_band *sband; if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && rtlhal->bandset == BAND_ON_BOTH) { /* 1: 2.4 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; /* 2: 5 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; } else { if (rtlhal->current_bandtype == BAND_ON_2_4G) { /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; } else if (rtlhal->current_bandtype == BAND_ON_5G) { /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("Err BAND %d\n", rtlhal->current_bandtype)); } } /* <5> set hw caps */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_BEACON_FILTER | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0; /* swlps or hwlps has been set in diff chip in init_sw_vars */ if (rtlpriv->psc.swctrl_lps) hw->flags |= IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */ 0; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->rts_threshold = 2347; hw->queues = AC_MAX; hw->extra_tx_headroom = RTL_TX_HEADER_SIZE; /* TODO: Correct this value for our hw */ /* TODO: define these hard code value */ hw->channel_change_time = 100; hw->max_listen_interval = 10; hw->max_rate_tries = 4; /* hw->max_rates = 1; */ hw->sta_data_size = sizeof(struct rtl_sta_info); /* <6> mac address */ if (is_valid_ether_addr(rtlefuse->dev_addr)) { SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); } else { u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1); SET_IEEE80211_PERM_ADDR(hw, rtlmac); } }
/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * @nilfs: the_nilfs struct * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent, struct the_nilfs *nilfs) { struct nilfs_sb_info *sbi; struct inode *root; __u64 cno; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; get_nilfs(nilfs); sbi->s_nilfs = nilfs; sbi->s_super = sb; atomic_set(&sbi->s_count, 1); err = init_nilfs(nilfs, sbi, (char *)data); if (err) goto failed_sbi; spin_lock_init(&sbi->s_inode_lock); INIT_LIST_HEAD(&sbi->s_dirty_files); INIT_LIST_HEAD(&sbi->s_list); /* * Following initialization is overlapped because * nilfs_sb_info structure has been cleared at the beginning. * But we reserve them to keep our interest and make ready * for the future change. */ get_random_bytes(&sbi->s_next_generation, sizeof(sbi->s_next_generation)); spin_lock_init(&sbi->s_next_gen_lock); sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; sb->s_bdi = nilfs->ns_bdi; err = load_nilfs(nilfs, sbi); if (err) goto failed_sbi; cno = nilfs_last_cno(nilfs); if (sb->s_flags & MS_RDONLY) { if (nilfs_test_opt(sbi, SNAPSHOT)) { down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, sbi->s_snapshot_cno); up_read(&nilfs->ns_segctor_sem); if (err < 0) { if (err == -ENOENT) err = -EINVAL; goto failed_sbi; } if (!err) { printk(KERN_ERR "NILFS: The specified checkpoint is " "not a snapshot " "(checkpoint number=%llu).\n", (unsigned long long)sbi->s_snapshot_cno); err = -EINVAL; goto failed_sbi; } cno = sbi->s_snapshot_cno; } } err = nilfs_attach_checkpoint(sbi, cno); if (err) { printk(KERN_ERR "NILFS: error loading a checkpoint" " (checkpoint number=%llu).\n", (unsigned long long)cno); goto failed_sbi; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_segment_constructor(sbi); if (err) goto failed_checkpoint; } root = nilfs_iget(sb, NILFS_ROOT_INO); if (IS_ERR(root)) { printk(KERN_ERR "NILFS: get root inode failed\n"); err = PTR_ERR(root); goto failed_segctor; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); printk(KERN_ERR "NILFS: corrupt root inode.\n"); err = -EINVAL; goto failed_segctor; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); printk(KERN_ERR "NILFS: get root dentry failed\n"); err = -ENOMEM; goto failed_segctor; } if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sbi); up_write(&nilfs->ns_sem); } down_write(&nilfs->ns_super_sem); if (!nilfs_test_opt(sbi, SNAPSHOT)) nilfs->ns_current = sbi; up_write(&nilfs->ns_super_sem); return 0; failed_segctor: nilfs_detach_segment_constructor(sbi); failed_checkpoint: nilfs_detach_checkpoint(sbi); failed_sbi: put_nilfs(nilfs); sb->s_fs_info = NULL; nilfs_put_sbinfo(sbi); return err; }
static int splat_taskq_test9(struct file *file, void *arg) { taskq_t *tq; atomic_t count; int i, rc = 0; int minalloc = 1; int maxalloc = 10; int nr_tasks = 100; splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n", SPLAT_TASKQ_TEST9_NAME, "delay", minalloc, maxalloc, nr_tasks); if ((tq = taskq_create(SPLAT_TASKQ_TEST9_NAME, 3, maxclsyspri, minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST9_NAME); return -EINVAL; } atomic_set(&count, 0); for (i = 1; i <= nr_tasks; i++) { splat_taskq_arg_t *tq_arg; taskqid_t id; uint32_t rnd; /* A random timeout in jiffies of at most 5 seconds */ get_random_bytes((void *)&rnd, 4); rnd = rnd % (5 * HZ); tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP); tq_arg->file = file; tq_arg->name = SPLAT_TASKQ_TEST9_NAME; tq_arg->expire = ddi_get_lbolt() + rnd; tq_arg->count = &count; splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' delay dispatch %u jiffies\n", SPLAT_TASKQ_TEST9_NAME, rnd); id = taskq_dispatch_delay(tq, splat_taskq_test9_func, tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd); if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' delay dispatch failed\n", SPLAT_TASKQ_TEST9_NAME); kmem_free(tq_arg, sizeof(splat_taskq_arg_t)); taskq_wait(tq); rc = -EINVAL; goto out; } } splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' waiting for " "%d delay dispatches\n", SPLAT_TASKQ_TEST9_NAME, nr_tasks); taskq_wait(tq); if (atomic_read(&count) != nr_tasks) rc = -ERANGE; splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' %d/%d delay " "dispatches finished on time\n", SPLAT_TASKQ_TEST9_NAME, atomic_read(&count), nr_tasks); splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' destroying\n", SPLAT_TASKQ_TEST9_NAME); out: taskq_destroy(tq); return rc; }
void __init initialize_hashidentrnd(void) { get_random_bytes(&hashidentrnd, sizeof(hashidentrnd)); }
/* void */ int ipsec_klips_init(void) { int error = 0; unsigned char seed[256]; #ifdef CONFIG_KLIPS_ENC_3DES extern int des_check_key; /* turn off checking of keys */ des_check_key=0; #endif /* CONFIG_KLIPS_ENC_3DES */ KLIPS_PRINT(1, "klips_info:ipsec_init: " "KLIPS startup, Openswan KLIPS IPsec stack version: %s\n", ipsec_version_code()); error = ipsec_xmit_state_cache_init (); if (error) goto error_xmit_state_cache; error = ipsec_rcv_state_cache_init (); if (error) goto error_rcv_state_cache; error |= ipsec_proc_init(); if (error) goto error_proc_init; #ifdef SPINLOCK ipsec_sadb.sadb_lock = SPIN_LOCK_UNLOCKED; #else /* SPINLOCK */ ipsec_sadb.sadb_lock = 0; #endif /* SPINLOCK */ #ifndef SPINLOCK tdb_lock.lock = 0; eroute_lock.lock = 0; #endif /* !SPINLOCK */ error |= ipsec_sadb_init(); if (error) goto error_sadb_init; error |= ipsec_radijinit(); if (error) goto error_radijinit; error |= pfkey_init(); if (error) goto error_pfkey_init; error |= register_netdevice_notifier(&ipsec_dev_notifier); if (error) goto error_netdev_notifier; #ifdef CONFIG_XFRM_ALTERNATE_STACK error = xfrm_register_alternate_rcv (ipsec_rcv); if (error) goto error_xfrm_register; #else // CONFIG_XFRM_ALTERNATE_STACK #ifdef CONFIG_KLIPS_ESP error |= openswan_inet_add_protocol(&esp_protocol, IPPROTO_ESP,"ESP"); if (error) goto error_openswan_inet_add_protocol_esp; #endif /* CONFIG_KLIPS_ESP */ #ifdef CONFIG_KLIPS_AH error |= openswan_inet_add_protocol(&ah_protocol, IPPROTO_AH,"AH"); if (error) goto error_openswan_inet_add_protocol_ah; #endif /* CONFIG_KLIPS_AH */ /* we never actually link IPCOMP to the stack */ #ifdef IPCOMP_USED_ALONE #ifdef CONFIG_KLIPS_IPCOMP error |= openswan_inet_add_protocol(&comp_protocol, IPPROTO_COMP,"IPCOMP"); if (error) goto error_openswan_inet_add_protocol_comp; #endif /* CONFIG_KLIPS_IPCOMP */ #endif #endif // CONFIG_XFRM_ALTERNATE_STACK error |= ipsec_tunnel_init_devices(); if (error) goto error_tunnel_init_devices; error |= ipsec_mast_init_devices(); #if defined(NET_26) && defined(CONFIG_IPSEC_NAT_TRAVERSAL) /* register our ESP-UDP handler */ if(udp4_register_esp_rcvencap(klips26_rcv_encap , &klips_old_encap)!=0) { printk(KERN_ERR "KLIPS: can not register klips_rcv_encap function\n"); } #endif #ifdef CONFIG_SYSCTL error |= ipsec_sysctl_register(); if (error) goto error_sysctl_register; #endif #ifdef CONFIG_KLIPS_ALG ipsec_alg_init(); #endif #ifdef CONFIG_KLIPS_OCF ipsec_ocf_init(); #endif get_random_bytes((void *)seed, sizeof(seed)); prng_init(&ipsec_prng, seed, sizeof(seed)); return error; // undo ipsec_sysctl_register error_sysctl_register: ipsec_tunnel_cleanup_devices(); error_tunnel_init_devices: #ifdef CONFIG_XFRM_ALTERNATE_STACK xfrm_deregister_alternate_rcv(ipsec_rcv); error_xfrm_register: #else // CONFIG_XFRM_ALTERNATE_STACK #ifdef IPCOMP_USED_ALONE #ifdef CONFIG_KLIPS_IPCOMP error_openswan_inet_add_protocol_comp: openswan_inet_del_protocol(&comp_protocol, IPPROTO_COMP); #endif /* CONFIG_KLIPS_IPCOMP */ #endif #ifdef CONFIG_KLIPS_AH error_openswan_inet_add_protocol_ah: openswan_inet_del_protocol(&ah_protocol, IPPROTO_AH); #endif error_openswan_inet_add_protocol_esp: openswan_inet_del_protocol(&esp_protocol, IPPROTO_ESP); #endif unregister_netdevice_notifier(&ipsec_dev_notifier); error_netdev_notifier: pfkey_cleanup(); error_pfkey_init: ipsec_radijcleanup(); error_radijinit: ipsec_sadb_cleanup(0); ipsec_sadb_free(); error_sadb_init: error_proc_init: // ipsec_proc_init() does not cleanup after itself, so we have to do it here // TODO: ipsec_proc_init() should roll back what it chaned on failure ipsec_proc_cleanup(); ipsec_rcv_state_cache_cleanup (); error_rcv_state_cache: ipsec_xmit_state_cache_cleanup (); error_xmit_state_cache: return error; }
PPH_ERROR pph_create_account(pph_context *ctx, const uint8 *username, const unsigned int username_length, const uint8 *password, const unsigned int password_length, uint8 shares){ pph_account_node *node,*next; unsigned int length; unsigned int i; pph_entry *entry_node,*last_entry; uint8 current_entry; uint8 share_data[SHARE_LENGTH]; uint8 resulting_hash[DIGEST_LENGTH]; uint8 salt_buffer[MAX_SALT_LENGTH]; // 1) SANITIZE INFORMATION // check password length if(password_length > MAX_PASSWORD_LENGTH-1){ return PPH_PASSWORD_IS_TOO_LONG; } // check username length if(username_length > MAX_USERNAME_LENGTH-1){ return PPH_USERNAME_IS_TOO_LONG; } // check share numbers, we don't check for 0 since that means thresholdless // accounts if(shares>MAX_NUMBER_OF_SHARES){ return PPH_WRONG_SHARE_COUNT; } // check correct context pointer if(ctx == NULL){ return PPH_BAD_PTR; } // check if we are able to get shares from the context vault if(ctx->is_unlocked != true || ctx->AES_key == NULL){ return PPH_CONTEXT_IS_LOCKED; } // This while loop will traverse our accounts and check if the username is // already taken. next = ctx->account_data; while(next!=NULL){ node=next; next=next->next; // only compare them if their lengths match if(username_length==node->account.username_length && !memcmp(node->account.username,username,username_length)){ return PPH_ACCOUNT_EXISTS; } } // 2) check for the type of account requested. // this will generate a share list for threshold accounts, we won't // fall inside this loop for thresholdless accounts since shares is 0. last_entry = NULL; for(i=0;i<shares;i++){ // 3) Allocate entries for each account // get a new share value gfshare_ctx_enc_getshare( ctx->share_context, ctx->next_entry, share_data); // get a salt for the password get_random_bytes(MAX_SALT_LENGTH, salt_buffer); // Try to get a new entry. entry_node=create_polyhashed_entry(password, password_length, salt_buffer, MAX_SALT_LENGTH, share_data, SHARE_LENGTH, ctx->partial_bytes); if(entry_node == NULL){ _destroy_entry_list(last_entry); return PPH_NO_MEM; } // update the share number for this entry, and update the next available // share in a round robin fashion entry_node->share_number = ctx->next_entry; ctx->next_entry++; if(ctx->next_entry==0 || ctx->next_entry>=MAX_NUMBER_OF_SHARES){ ctx->next_entry=1; } // add the node to the list entry_node->next = last_entry; last_entry=entry_node; } // This if will check for thresholdless accounts, and will build a single // entry for them. if(shares == 0){ // 3) allocate an entry for each account // get a salt for the password get_random_bytes(MAX_SALT_LENGTH, salt_buffer); // generate the entry entry_node = create_thresholdless_entry(password, password_length, salt_buffer, MAX_SALT_LENGTH, ctx->AES_key, DIGEST_LENGTH, ctx->partial_bytes); if(entry_node == NULL){ return PPH_NO_MEM; } // we now have one share entry under this list, so we increment this // parameter. shares++; } // 4) Allocate the information for the account // allocate the account information, check for memory issues and return. node=malloc(sizeof(*node)); if(node==NULL){ // we should destroy the list we created now to avoid memory leaks _destroy_entry_list(entry_node); return PPH_NO_MEM; } // fill with the user entry with the rest of the account information. memcpy(node->account.username,username,username_length); node->account.number_of_entries = shares; node->account.username_length = username_length; node->account.entries = entry_node; // 5) add the resulting account to the current context. // append it to the context list, with the rest of thee users node->next = ctx->account_data; ctx->account_data = node; // 6) return. // everything is set! return PPH_ERROR_OK; }
static int hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_HASHSIZE]) { hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); if (hashsize < IPSET_MIMINAL_HASHSIZE) hashsize = IPSET_MIMINAL_HASHSIZE; } if (tb[IPSET_ATTR_MAXELEM]) maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return -ENOMEM; h->maxelem = maxelem; get_random_bytes(&h->initval, sizeof(h->initval)); h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); h->table = ip_set_alloc( sizeof(struct htable) + jhash_size(hbits) * sizeof(struct hbucket)); if (!h->table) { kfree(h); return -ENOMEM; } h->table->htable_bits = hbits; set->data = h; if (tb[IPSET_ATTR_TIMEOUT]) { h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); set->variant = set->family == NFPROTO_IPV4 ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant; if (set->family == NFPROTO_IPV4) hash_ipportip4_gc_init(set); else hash_ipportip6_gc_init(set); } else { set->variant = set->family == NFPROTO_IPV4 ? &hash_ipportip4_variant : &hash_ipportip6_variant; } pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", set->name, jhash_size(h->table->htable_bits), h->table->htable_bits, h->maxelem, set->data, h->table); return 0; }
/** Sign a hash with DSA @param in The hash to sign @param inlen The length of the hash to sign @param r The "r" integer of the signature (caller must initialize with mp_init() first) @param s The "s" integer of the signature (caller must initialize with mp_init() first) @param key A private DSA key @return CRYPT_OK if successful */ int dsa_sign_hash_raw(const unsigned char *in, unsigned long inlen, mp_int_t r, mp_int_t s, dsa_key * key) { mp_int k, kinv, tmp; unsigned char *buf; int err; LTC_ARGCHK(in != NULL); LTC_ARGCHK(r != NULL); LTC_ARGCHK(s != NULL); LTC_ARGCHK(key != NULL); if (key->type != PK_PRIVATE) { return CRYPT_PK_NOT_PRIVATE; } /* check group order size */ if (key->qord >= LTC_MDSA_MAX_GROUP) { return CRYPT_INVALID_ARG; } buf = XMALLOC(LTC_MDSA_MAX_GROUP); if (buf == NULL) { return CRYPT_MEM; } /* Init our temps */ if ((err = mp_init_multi(&k, &kinv, &tmp, NULL)) != CRYPT_OK) { goto ERRBUF; } retry: do { /* gen random k */ get_random_bytes(buf, key->qord); /* read k */ if ((err = mp_read_unsigned_bin(&k, buf, key->qord)) != CRYPT_OK) { goto error; } /* k > 1 ? */ if (mp_cmp_d(&k, 1) != LTC_MP_GT) { goto retry; } /* test gcd */ if ((err = mp_gcd(&k, &key->q, &tmp)) != CRYPT_OK) { goto error; } } while (mp_cmp_d(&tmp, 1) != LTC_MP_EQ); /* now find 1/k mod q */ if ((err = mp_invmod(&k, &key->q, &kinv)) != CRYPT_OK) { goto error; } /* now find r = g^k mod p mod q */ if ((err = mp_exptmod(&key->g, &k, &key->p, r)) != CRYPT_OK) { goto error; } if ((err = mp_mod(r, &key->q, r)) != CRYPT_OK) { goto error; } if (mp_iszero(r) == LTC_MP_YES) { goto retry; } /* now find s = (in + xr)/k mod q */ if ((err = mp_read_unsigned_bin(&tmp, (unsigned char *)in, inlen)) != CRYPT_OK) { goto error; } if ((err = mp_mul(&key->x, r, s)) != CRYPT_OK) { goto error; } if ((err = mp_add(s, &tmp, s)) != CRYPT_OK) { goto error; } if ((err = mp_mulmod(s, &kinv, &key->q, s)) != CRYPT_OK) { goto error; } if (mp_iszero(s) == LTC_MP_YES) { goto retry; } err = CRYPT_OK; error: mp_clear_multi(&k, &kinv, &tmp, NULL); ERRBUF: #ifdef LTC_CLEAN_STACK zeromem(buf, LTC_MDSA_MAX_GROUP); #endif XFREE(buf); return err; }
int dhd_read_macaddr(struct dhd_info *dhd, struct ether_addr *mac) { struct file *fp = NULL; char macbuffer[18] = {0}; mm_segment_t oldfs = {0}; char randommac[3] = {0}; char buf[18] = {0}; char *filepath_efs = MACINFO_EFS; int ret = 0; fp = filp_open(filepath_efs, O_RDONLY, 0); if (IS_ERR(fp)) { start_readmac: /* File Doesn't Exist. Create and write mac addr. */ fp = filp_open(filepath_efs, O_RDWR | O_CREAT, 0666); if (IS_ERR(fp)) { DHD_ERROR(("[WIFI] %s: File open error\n", filepath_efs)); return -1; } oldfs = get_fs(); set_fs(get_ds()); /* Generating the Random Bytes for 3 last octects of the MAC address */ get_random_bytes(randommac, 3); sprintf(macbuffer, "%02X:%02X:%02X:%02X:%02X:%02X\n", 0x00, 0x12, 0x34, randommac[0], randommac[1], randommac[2]); DHD_ERROR(("[WIFI]The Random Generated MAC ID: %s\n", macbuffer)); if (fp->f_mode & FMODE_WRITE) { ret = fp->f_op->write(fp, (const char *)macbuffer, sizeof(macbuffer), &fp->f_pos); if (ret < 0) DHD_ERROR(("[WIFI]MAC address [%s] Failed to write into File: %s\n", macbuffer, filepath_efs)); else DHD_ERROR(("[WIFI]MAC address [%s] written into File: %s\n", macbuffer, filepath_efs)); } set_fs(oldfs); /* Reading the MAC Address from .mac.info file ( the existed file or just created file) */ ret = kernel_read(fp, 0, buf, 18); } else { /* Reading the MAC Address from .mac.info file( the existed file or just created file) */ ret = kernel_read(fp, 0, buf, 18); /* to prevent abnormal string display * when mac address is displayed on the screen. */ buf[17] = '\0'; if (strncmp(buf, "00:00:00:00:00:00", 17) < 1) { DHD_ERROR(("goto start_readmac \r\n")); filp_close(fp, NULL); goto start_readmac; } } if (ret) sscanf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", (unsigned int *)&(mac->octet[0]), (unsigned int *)&(mac->octet[1]), (unsigned int *)&(mac->octet[2]), (unsigned int *)&(mac->octet[3]), (unsigned int *)&(mac->octet[4]), (unsigned int *)&(mac->octet[5])); else DHD_ERROR(("dhd_bus_start: Reading from the '%s' returns 0 bytes\n", filepath_efs)); if (fp) filp_close(fp, NULL); /* Writing Newly generated MAC ID to the Dongle */ if (_dhd_set_mac_address(dhd, 0, mac) == 0) DHD_INFO(("dhd_bus_start: MACID is overwritten\n")); else DHD_ERROR(("dhd_bus_start: _dhd_set_mac_address() failed\n")); return 0; }
static __init int init_syncookies(void) { get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); return 0; }