int dump_node(struct f2fs_sb_info *sbi, nid_t nid) { struct node_info ni; struct f2fs_node *node_blk; int ret; ret = get_node_info(sbi, nid, &ni); ASSERT(ret >= 0); node_blk = calloc(BLOCK_SZ, 1); dev_read_block(node_blk, ni.blk_addr); DBG(1, "Node ID [0x%x]\n", nid); DBG(1, "nat_entry.block_addr [0x%x]\n", ni.blk_addr); DBG(1, "nat_entry.version [0x%x]\n", ni.version); DBG(1, "nat_entry.ino [0x%x]\n", ni.ino); if (ni.blk_addr == 0x0) { MSG(0, "Invalid nat entry\n\n"); } DBG(1, "node_blk.footer.ino [0x%x]\n", le32_to_cpu(node_blk->footer.ino)); DBG(1, "node_blk.footer.nid [0x%x]\n", le32_to_cpu(node_blk->footer.nid)); if (le32_to_cpu(node_blk->footer.ino) == ni.ino && le32_to_cpu(node_blk->footer.nid) == ni.nid) { print_node_info(node_blk); } else { MSG(0, "Invalid node block\n\n"); } free(node_blk); return 0; }
int f2fs_inline_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { __u64 byteaddr, ilen; __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED | FIEMAP_EXTENT_LAST; struct node_info ni; struct page *ipage; int err = 0; ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); if (IS_ERR(ipage)) return PTR_ERR(ipage); if (!f2fs_has_inline_data(inode)) { err = -EAGAIN; goto out; } ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode)); if (start >= ilen) goto out; if (start + len < ilen) ilen = start + len; ilen -= start; get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni); byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits; byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage); err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags); out: f2fs_put_page(ipage, 1); return err; }
int fsck_chk_xattr_blk(struct f2fs_sb_info *sbi, u32 ino, u32 x_nid, u32 *blk_cnt) { struct f2fs_fsck *fsck = F2FS_FSCK(sbi); struct node_info ni; if (x_nid == 0x0) return 0; if (f2fs_test_bit(x_nid, fsck->nat_area_bitmap) != 0x0) { f2fs_clear_bit(x_nid, fsck->nat_area_bitmap); } else { ASSERT_MSG(0, "xattr_nid duplicated [0x%x]\n", x_nid); } *blk_cnt = *blk_cnt + 1; fsck->chk.valid_blk_cnt++; fsck->chk.valid_node_cnt++; ASSERT(get_node_info(sbi, x_nid, &ni) >= 0); if (f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, ni.blk_addr), fsck->main_area_bitmap) != 0) { ASSERT_MSG(0, "Duplicated node block for x_attr. " "x_nid[0x%x] block addr[0x%x]\n", x_nid, ni.blk_addr); } f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, ni.blk_addr), fsck->main_area_bitmap); DBG(2, "ino[0x%x] x_nid[0x%x]\n", ino, x_nid); return 0; }
static void page_symlink(struct f2fs_sb_info *sbi, struct f2fs_node *inode, const char *symname, int symlen) { nid_t ino = le32_to_cpu(inode->footer.ino); struct f2fs_summary sum; struct node_info ni; char *data_blk; block_t blkaddr = NULL_ADDR; int ret; get_node_info(sbi, ino, &ni); /* store into inline_data */ if ((unsigned long)(symlen + 1) <= MAX_INLINE_DATA(inode)) { inode->i.i_inline |= F2FS_INLINE_DATA; inode->i.i_inline |= F2FS_DATA_EXIST; memcpy(inline_data_addr(inode), symname, symlen); return; } data_blk = calloc(BLOCK_SZ, 1); ASSERT(data_blk); memcpy(data_blk, symname, symlen); set_summary(&sum, ino, 0, ni.version); reserve_new_block(sbi, &blkaddr, &sum, CURSEG_WARM_DATA); ret = dev_write_block(data_blk, blkaddr); ASSERT(ret >= 0); inode->i.i_addr[get_extra_isize(inode)] = cpu_to_le32(blkaddr); free(data_blk); }
int dump_inode_from_blkaddr(struct f2fs_sb_info *sbi, u32 blk_addr) { nid_t ino, nid; int type, ret; struct f2fs_summary sum_entry; struct node_info ni; struct f2fs_node *node_blk; type = get_sum_entry(sbi, blk_addr, &sum_entry); nid = le32_to_cpu(sum_entry.nid); ret = get_node_info(sbi, nid, &ni); ASSERT(ret >= 0); DBG(1, "Note: blkaddr = main_blkaddr + segno * 512 + offset\n"); DBG(1, "Block_addr [0x%x]\n", blk_addr); DBG(1, " - Segno [0x%x]\n", GET_SEGNO(sbi, blk_addr)); DBG(1, " - Offset [0x%x]\n", OFFSET_IN_SEG(sbi, blk_addr)); DBG(1, "SUM.nid [0x%x]\n", nid); DBG(1, "SUM.type [%s]\n", seg_type_name[type]); DBG(1, "SUM.version [%d]\n", sum_entry.version); DBG(1, "SUM.ofs_in_node [%d]\n", sum_entry.ofs_in_node); DBG(1, "NAT.blkaddr [0x%x]\n", ni.blk_addr); DBG(1, "NAT.ino [0x%x]\n", ni.ino); node_blk = calloc(BLOCK_SZ, 1); read_node_blk: dev_read_block(node_blk, blk_addr); ino = le32_to_cpu(node_blk->footer.ino); nid = le32_to_cpu(node_blk->footer.nid); if (ino == nid) { print_node_info(node_blk); } else { ret = get_node_info(sbi, ino, &ni); goto read_node_blk; } free(node_blk); return ino; }
void CloudBus::print_nodes() { std::vector<GadgetronNodeInfo> nl; get_node_info(nl); GDEBUG("Number of available nodes: %d\n", nl.size()); for (std::vector<GadgetronNodeInfo>::iterator it = nl.begin(); it != nl.end(); it++) { GDEBUG(" %s, %s, %d\n", it->uuid.c_str(), it->address.c_str(), it->port); } }
// Display info of the next node touched void do_info(){ draw_information_box("SELECT A MAP POINT TO OBTAIN ITS INFORMATION."); char* info = get_node_info(full_map_graph); if (info == NULL) { about_screen(); INFO_BUTT.prs_p(INFO_BUTT); return; } free(info); info_screen(info); INFO_BUTT.prs_p(INFO_BUTT); }
static void dump_node_blk(struct f2fs_sb_info *sbi, int ntype, u32 nid, u64 *ofs) { struct node_info ni; struct f2fs_node *node_blk; u32 skip = 0; u32 i, idx; switch (ntype) { case TYPE_DIRECT_NODE: skip = idx = ADDRS_PER_BLOCK; break; case TYPE_INDIRECT_NODE: idx = NIDS_PER_BLOCK; skip = idx * ADDRS_PER_BLOCK; break; case TYPE_DOUBLE_INDIRECT_NODE: skip = 0; idx = NIDS_PER_BLOCK; break; } if (nid == 0) { *ofs += skip; return; } get_node_info(sbi, nid, &ni); node_blk = calloc(BLOCK_SZ, 1); dev_read_block(node_blk, ni.blk_addr); for (i = 0; i < idx; i++, (*ofs)++) { switch (ntype) { case TYPE_DIRECT_NODE: dump_data_blk(*ofs * F2FS_BLKSIZE, le32_to_cpu(node_blk->dn.addr[i])); break; case TYPE_INDIRECT_NODE: dump_node_blk(sbi, TYPE_DIRECT_NODE, le32_to_cpu(node_blk->in.nid[i]), ofs); break; case TYPE_DOUBLE_INDIRECT_NODE: dump_node_blk(sbi, TYPE_INDIRECT_NODE, le32_to_cpu(node_blk->in.nid[i]), ofs); break; } } free(node_blk); }
/* caller should call f2fs_lock_op() */ void handle_failed_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct node_info ni; /* * clear nlink of inode in order to release resource of inode * immediately. */ clear_nlink(inode); /* * we must call this to avoid inode being remained as dirty, resulting * in a panic when flushing dirty inodes in gdirty_list. */ update_inode_page(inode); f2fs_inode_synced(inode); /* don't make bad inode, since it becomes a regular file. */ unlock_new_inode(inode); /* * Note: we should add inode to orphan list before f2fs_unlock_op() * so we can prevent losing this orphan when encoutering checkpoint * and following suddenly power-off. */ get_node_info(sbi, inode->i_ino, &ni); if (ni.blk_addr != NULL_ADDR) { int err = acquire_orphan_inode(sbi); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_msg(sbi->sb, KERN_WARNING, "Too many orphan inodes, run fsck to fix."); } else { add_orphan_inode(inode); } alloc_nid_done(sbi, inode->i_ino); } else { set_inode_flag(inode, FI_FREE_NID); } f2fs_unlock_op(sbi); /* iput will drop the inode object */ iput(inode); }
int f2fs_find_path(struct f2fs_sb_info *sbi, char *path, nid_t *ino) { struct f2fs_node *parent; struct node_info ni; struct dentry de; int err = 0; int ret; char *p; if (path[0] != '/') return -ENOENT; *ino = F2FS_ROOT_INO(sbi); parent = calloc(BLOCK_SZ, 1); ASSERT(parent); p = strtok(path, "/"); while (p) { de.name = (const u8 *)p; de.len = strlen(p); get_node_info(sbi, *ino, &ni); if (ni.blk_addr == NULL_ADDR) { err = -ENOENT; goto err; } ret = dev_read_block(parent, ni.blk_addr); ASSERT(ret >= 0); ret = f2fs_find_entry(sbi, parent, &de); if (!ret) { err = -ENOENT; goto err; } *ino = de.ino; p = strtok(NULL, "/"); } err: free(parent); return err; }
static int __allocate_data_block(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_inode_info *fi = F2FS_I(dn->inode); struct f2fs_summary sum; struct node_info ni; int seg = CURSEG_WARM_DATA; pgoff_t fofs; if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return -EPERM; dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); if (dn->data_blkaddr == NEW_ADDR) goto alloc; if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) return -ENOSPC; alloc: get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) seg = CURSEG_DIRECT_IO; allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, &sum, seg); set_data_blkaddr(dn); /* update i_size */ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + dn->ofs_in_node; if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) i_size_write(dn->inode, ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); /* direct IO doesn't use extent cache to maximize the performance */ f2fs_drop_largest_extent(dn->inode, fofs); return 0; }
/* * In this function, we get a new node blk, and write back * node_blk would be sloadd in RAM, linked by dn->node_blk */ block_t new_node_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, unsigned int ofs) { struct f2fs_node *f2fs_inode; struct f2fs_node *node_blk; struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_summary sum; struct node_info ni; block_t blkaddr; int type; f2fs_inode = dn->inode_blk; node_blk = calloc(BLOCK_SZ, 1); ASSERT(node_blk); node_blk->footer.nid = cpu_to_le32(dn->nid); node_blk->footer.ino = f2fs_inode->footer.ino; node_blk->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT); node_blk->footer.cp_ver = ckpt->checkpoint_ver; type = CURSEG_COLD_NODE; if (IS_DNODE(node_blk)) { if (S_ISDIR(f2fs_inode->i.i_mode)) type = CURSEG_HOT_NODE; else type = CURSEG_WARM_NODE; } get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, 0, ni.version); reserve_new_block(sbi, &blkaddr, &sum, type); /* update nat info */ update_nat_blkaddr(sbi, le32_to_cpu(f2fs_inode->footer.ino), dn->nid, blkaddr); dn->node_blk = node_blk; inc_inode_blocks(dn); return blkaddr; }
int process_system( xmlNode *node) { std::string cpus_per_cu; xmlNode *child; alps_node_info ani; for (child = node->children; child != NULL; child = child->next) { if (!strcmp((const char *)child->name, nodes_element)) { // Get information for this batch of nodes get_node_info(child, ani); // Get the range of nodes this information applies to char *content = (char *)xmlNodeGetContent(child); if (content != NULL) { std::vector<int> node_ids; translate_range_string_to_vector(content, node_ids); for (unsigned int i = 0; i < node_ids.size(); i++) { if (alps_nodes.find(node_ids[i]) != alps_nodes.end()) { alps_nodes[node_ids[i]].os = ani.os; alps_nodes[node_ids[i]].hbm = ani.hbm; } else alps_nodes[node_ids[i]] = ani; } free(content); } } } return(PBSE_NONE); } // END process_system()
static void make_empty_dir(struct f2fs_sb_info *sbi, struct f2fs_node *inode) { struct f2fs_dentry_block *dent_blk; nid_t ino = le32_to_cpu(inode->footer.ino); nid_t pino = le32_to_cpu(inode->i.i_pino); struct f2fs_summary sum; struct node_info ni; block_t blkaddr = NULL_ADDR; int ret; get_node_info(sbi, ino, &ni); dent_blk = calloc(BLOCK_SZ, 1); ASSERT(dent_blk); dent_blk->dentry[0].hash_code = 0; dent_blk->dentry[0].ino = cpu_to_le32(ino); dent_blk->dentry[0].name_len = cpu_to_le16(1); dent_blk->dentry[0].file_type = F2FS_FT_DIR; memcpy(dent_blk->filename[0], ".", 1); dent_blk->dentry[1].hash_code = 0; dent_blk->dentry[1].ino = cpu_to_le32(pino); dent_blk->dentry[1].name_len = cpu_to_le16(2); dent_blk->dentry[1].file_type = F2FS_FT_DIR; memcpy(dent_blk->filename[1], "..", 2); test_and_set_bit_le(0, dent_blk->dentry_bitmap); test_and_set_bit_le(1, dent_blk->dentry_bitmap); set_summary(&sum, ino, 0, ni.version); reserve_new_block(sbi, &blkaddr, &sum, CURSEG_HOT_DATA); ret = dev_write_block(dent_blk, blkaddr); ASSERT(ret >= 0); inode->i.i_addr[get_extra_isize(inode)] = cpu_to_le32(blkaddr); free(dent_blk); }
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t pg_start, pg_end, delta, nrpages, idx; loff_t new_size; int ret; if (!S_ISREG(inode->i_mode)) return -EINVAL; new_size = i_size_read(inode) + len; if (new_size > inode->i_sb->s_maxbytes) return -EFBIG; if (offset >= i_size_read(inode)) return -EINVAL; /* insert range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; f2fs_balance_fs(sbi); if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } ret = truncate_blocks(inode, i_size_read(inode), true); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; truncate_pagecache(inode, 0, offset); pg_start = offset >> PAGE_CACHE_SHIFT; pg_end = (offset + len) >> PAGE_CACHE_SHIFT; delta = pg_end - pg_start; nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) { struct dnode_of_data dn; struct page *ipage; block_t new_addr, old_addr; f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, idx, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { goto out; } else if (ret == -ENOENT) { goto next; } else if (dn.data_blkaddr == NULL_ADDR) { f2fs_put_dnode(&dn); goto next; } else { new_addr = dn.data_blkaddr; truncate_data_blocks_range(&dn, 1); f2fs_put_dnode(&dn); } ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { ret = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, NULL, 0); ret = f2fs_reserve_block(&dn, idx + delta); if (ret) goto out; old_addr = dn.data_blkaddr; f2fs_bug_on(sbi, old_addr != NEW_ADDR); if (new_addr != NEW_ADDR) { struct node_info ni; get_node_info(sbi, dn.nid, &ni); f2fs_replace_block(sbi, &dn, old_addr, new_addr, ni.version, true); } f2fs_put_dnode(&dn); next: f2fs_unlock_op(sbi); } i_size_write(inode, new_size); return 0; out: f2fs_unlock_op(sbi); return ret; }
int f2fs_create(struct f2fs_sb_info *sbi, struct dentry *de) { struct f2fs_node *parent, *child; struct node_info ni; struct f2fs_summary sum; block_t blkaddr = NULL_ADDR; int ret; /* Find if there is a */ get_node_info(sbi, de->pino, &ni); if (ni.blk_addr == NULL_ADDR) { MSG(0, "No parent directory pino=%x\n", de->pino); return -1; } parent = calloc(BLOCK_SZ, 1); ASSERT(parent); ret = dev_read_block(parent, ni.blk_addr); ASSERT(ret >= 0); /* Must convert inline dentry before the following opertions */ ret = convert_inline_dentry(sbi, parent, ni.blk_addr); if (ret) { MSG(0, "Convert inline dentry for pino=%x failed.\n", de->pino); return -1; } ret = f2fs_find_entry(sbi, parent, de); if (ret) { MSG(0, "Skip the existing \"%s\" pino=%x ERR=%d\n", de->name, de->pino, ret); if (de->file_type == F2FS_FT_REG_FILE) de->ino = 0; goto free_parent_dir; } child = calloc(BLOCK_SZ, 1); ASSERT(child); f2fs_alloc_nid(sbi, &de->ino, 1); init_inode_block(sbi, child, de); ret = f2fs_add_link(sbi, parent, child->i.i_name, le32_to_cpu(child->i.i_namelen), le32_to_cpu(child->footer.ino), map_de_type(le16_to_cpu(child->i.i_mode)), ni.blk_addr, 1); if (ret) { MSG(0, "Skip the existing \"%s\" pino=%x ERR=%d\n", de->name, de->pino, ret); goto free_child_dir; } /* write child */ set_summary(&sum, de->ino, 0, ni.version); reserve_new_block(sbi, &blkaddr, &sum, CURSEG_HOT_NODE); /* update nat info */ update_nat_blkaddr(sbi, de->ino, de->ino, blkaddr); ret = dev_write_block(child, blkaddr); ASSERT(ret >= 0); update_free_segments(sbi); MSG(1, "Info: Create %s -> %s\n" " -- ino=%x, type=%x, mode=%x, uid=%x, " "gid=%x, cap=%"PRIx64", size=%lu, pino=%x\n", de->full_path, de->path, de->ino, de->file_type, de->mode, de->uid, de->gid, de->capabilities, de->size, de->pino); free_child_dir: free(child); free_parent_dir: free(parent); return 0; }
int main(int argc, char **argv) { int mgmt_classes[3] = {IB_SMI_CLASS, IB_SMI_DIRECT_CLASS, IB_SA_CLASS}; ib_portid_t portid = {0}; ib_portid_t *sm_id = 0, sm_portid = {0}; extern int ibdebug; int err; int timeout = 0, udebug = 0; char *ca = 0; int ca_port = 0; int port_op = 0; /* default to query */ int speed = 15; int is_switch = 1; int state, physstate, lwe, lws, lwa, lse, lss, lsa; int peerlocalportnum, peerlwe, peerlws, peerlwa, peerlse, peerlss, peerlsa; int width, peerwidth, peerspeed; uint8_t data[IB_SMP_DATA_SIZE]; ib_portid_t peerportid = {0}; int portnum = 0; ib_portid_t selfportid = {0}; int selfport = 0; static char const str_opts[] = "C:P:t:s:devDGVhu"; static const struct option long_opts[] = { { "C", 1, 0, 'C'}, { "P", 1, 0, 'P'}, { "debug", 0, 0, 'd'}, { "err_show", 0, 0, 'e'}, { "verbose", 0, 0, 'v'}, { "Direct", 0, 0, 'D'}, { "Guid", 0, 0, 'G'}, { "timeout", 1, 0, 't'}, { "s", 1, 0, 's'}, { "Version", 0, 0, 'V'}, { "help", 0, 0, 'h'}, { "usage", 0, 0, 'u'}, { } }; argv0 = argv[0]; while (1) { int ch = getopt_long(argc, argv, str_opts, long_opts, NULL); if ( ch == -1 ) break; switch(ch) { case 'd': ibdebug++; madrpc_show_errors(1); umad_debug(udebug); udebug++; break; case 'e': madrpc_show_errors(1); break; case 'D': dest_type = IB_DEST_DRPATH; break; case 'G': dest_type = IB_DEST_GUID; break; case 'C': ca = optarg; break; case 'P': ca_port = strtoul(optarg, 0, 0); break; case 's': if (ib_resolve_portid_str(&sm_portid, optarg, IB_DEST_LID, 0) < 0) IBERROR("can't resolve SM destination port %s", optarg); sm_id = &sm_portid; break; case 't': timeout = strtoul(optarg, 0, 0); madrpc_set_timeout(timeout); break; case 'v': verbose++; break; case 'V': fprintf(stderr, "%s %s\n", argv0, get_build_version() ); exit(-1); default: usage(); break; } } argc -= optind; argv += optind; if (argc < 2) usage(); madrpc_init(ca, ca_port, mgmt_classes, 3); if (ib_resolve_portid_str(&portid, argv[0], dest_type, sm_id) < 0) IBERROR("can't resolve destination port %s", argv[0]); /* First, make sure it is a switch port if it is a "set" */ if (argc >= 3) { if (!strcmp(argv[2], "enable")) port_op = 1; else if (!strcmp(argv[2], "disable")) port_op = 2; else if (!strcmp(argv[2], "reset")) port_op = 3; else if (!strcmp(argv[2], "speed")) { if (argc < 4) IBERROR("speed requires an additional parameter"); port_op = 4; /* Parse speed value */ speed = strtoul(argv[3], 0, 0); if (speed > 15) IBERROR("invalid speed value %d", speed); } } err = get_node_info(&portid, data); if (err < 0) IBERROR("smp query nodeinfo failed"); if (err) { /* not switch */ if (port_op == 0) /* query op */ is_switch = 0; else if (port_op != 4) /* other than speed op */ IBERROR("smp query nodeinfo: Node type not switch"); } if (argc-1 > 0) portnum = strtol(argv[1], 0, 0); if (port_op) printf("Initial PortInfo:\n"); else printf("PortInfo:\n"); err = get_port_info(&portid, data, portnum, port_op); if (err < 0) IBERROR("smp query portinfo failed"); /* Only if one of the "set" options is chosen */ if (port_op) { if (port_op == 1) /* Enable port */ mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 2); /* Polling */ else if ((port_op == 2) || (port_op == 3)) { /* Disable port */ mad_set_field(data, 0, IB_PORT_STATE_F, 1); /* Down */ mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 3); /* Disabled */ } else if (port_op == 4) { /* Set speed */ mad_set_field(data, 0, IB_PORT_LINK_SPEED_ENABLED_F, speed); mad_set_field(data, 0, IB_PORT_STATE_F, 0); mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 0); } err = set_port_info(&portid, data, portnum, port_op); if (err < 0) IBERROR("smp set portinfo failed"); if (port_op == 3) { /* Reset port - so also enable */ mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 2); /* Polling */ err = set_port_info(&portid, data, portnum, port_op); if (err < 0) IBERROR("smp set portinfo failed"); } } else { /* query op */ /* only compare peer port if switch port */ if (is_switch) { /* First, exclude SP0 */ if (portnum) { /* Now, make sure PortState is Active */ /* Or is PortPhysicalState LinkUp sufficient ? */ mad_decode_field(data, IB_PORT_STATE_F, &state); mad_decode_field(data, IB_PORT_PHYS_STATE_F, &physstate); if (state == 4) { /* Active */ mad_decode_field(data, IB_PORT_LINK_WIDTH_ENABLED_F, &lwe ); mad_decode_field(data, IB_PORT_LINK_WIDTH_SUPPORTED_F, &lws); mad_decode_field(data, IB_PORT_LINK_WIDTH_ACTIVE_F, &lwa); mad_decode_field(data, IB_PORT_LINK_SPEED_SUPPORTED_F, &lss); mad_decode_field(data, IB_PORT_LINK_SPEED_ACTIVE_F, &lsa); mad_decode_field(data, IB_PORT_LINK_SPEED_ENABLED_F, &lse); /* Setup portid for peer port */ memcpy(&peerportid, &portid, sizeof(peerportid)); peerportid.drpath.cnt = 1; peerportid.drpath.p[1] = portnum; /* Set DrSLID to local lid */ if (ib_resolve_self(&selfportid, &selfport, 0) < 0) IBERROR("could not resolve self"); peerportid.drpath.drslid = selfportid.lid; peerportid.drpath.drdlid = 0xffff; /* Get peer port NodeInfo to obtain peer port number */ err = get_node_info(&peerportid, data); if (err < 0) IBERROR("smp query nodeinfo failed"); mad_decode_field(data, IB_NODE_LOCAL_PORT_F, &peerlocalportnum); printf("Peer PortInfo:\n"); /* Get peer port characteristics */ err = get_port_info(&peerportid, data, peerlocalportnum, port_op); if (err < 0) IBERROR("smp query peer portinfofailed"); mad_decode_field(data, IB_PORT_LINK_WIDTH_ENABLED_F, &peerlwe ); mad_decode_field(data, IB_PORT_LINK_WIDTH_SUPPORTED_F, &peerlws); mad_decode_field(data, IB_PORT_LINK_WIDTH_ACTIVE_F, &peerlwa); mad_decode_field(data, IB_PORT_LINK_SPEED_SUPPORTED_F, &peerlss); mad_decode_field(data, IB_PORT_LINK_SPEED_ACTIVE_F, &peerlsa); mad_decode_field(data, IB_PORT_LINK_SPEED_ENABLED_F, &peerlse); /* Now validate peer port characteristics */ /* Examine Link Width */ width = get_link_width(lwe, lws); peerwidth = get_link_width(peerlwe, peerlws); validate_width(width, peerwidth, lwa); /* Examine Link Speed */ speed = get_link_speed(lse, lss); peerspeed = get_link_speed(peerlse, peerlss); validate_speed(speed, peerspeed, lsa); } } } } exit(0); }
int dump_info_from_blkaddr(struct f2fs_sb_info *sbi, u32 blk_addr) { nid_t nid; int type; struct f2fs_summary sum_entry; struct node_info ni, ino_ni; int ret = 0; MSG(0, "\n== Dump data from block address ==\n\n"); if (blk_addr < SM_I(sbi)->seg0_blkaddr) { MSG(0, "\nFS Reserved Area for SEG #0: "); ret = -EINVAL; } else if (blk_addr < SIT_I(sbi)->sit_base_addr) { MSG(0, "\nFS Metadata Area: "); ret = -EINVAL; } else if (blk_addr < NM_I(sbi)->nat_blkaddr) { MSG(0, "\nFS SIT Area: "); ret = -EINVAL; } else if (blk_addr < SM_I(sbi)->ssa_blkaddr) { MSG(0, "\nFS NAT Area: "); ret = -EINVAL; } else if (blk_addr < SM_I(sbi)->main_blkaddr) { MSG(0, "\nFS SSA Area: "); ret = -EINVAL; } else if (blk_addr > __end_block_addr(sbi)) { MSG(0, "\nOut of address space: "); ret = -EINVAL; } if (ret) { MSG(0, "User data is from 0x%x to 0x%x\n\n", SM_I(sbi)->main_blkaddr, __end_block_addr(sbi)); return ret; } type = get_sum_entry(sbi, blk_addr, &sum_entry); nid = le32_to_cpu(sum_entry.nid); get_node_info(sbi, nid, &ni); DBG(1, "Note: blkaddr = main_blkaddr + segno * 512 + offset\n"); DBG(1, "Block_addr [0x%x]\n", blk_addr); DBG(1, " - Segno [0x%x]\n", GET_SEGNO(sbi, blk_addr)); DBG(1, " - Offset [0x%x]\n", OFFSET_IN_SEG(sbi, blk_addr)); DBG(1, "SUM.nid [0x%x]\n", nid); DBG(1, "SUM.type [%s]\n", seg_type_name[type]); DBG(1, "SUM.version [%d]\n", sum_entry.version); DBG(1, "SUM.ofs_in_node [0x%x]\n", sum_entry.ofs_in_node); DBG(1, "NAT.blkaddr [0x%x]\n", ni.blk_addr); DBG(1, "NAT.ino [0x%x]\n", ni.ino); get_node_info(sbi, ni.ino, &ino_ni); /* inode block address */ if (ni.blk_addr == NULL_ADDR || ino_ni.blk_addr == NULL_ADDR) { MSG(0, "FS Userdata Area: Obsolete block from 0x%x\n", blk_addr); return -EINVAL; } /* print inode */ if (config.dbg_lv > 0) dump_node_from_blkaddr(ino_ni.blk_addr); if (type == SEG_TYPE_CUR_DATA || type == SEG_TYPE_DATA) { MSG(0, "FS Userdata Area: Data block from 0x%x\n", blk_addr); MSG(0, " - Direct node block : id = 0x%x from 0x%x\n", nid, ni.blk_addr); MSG(0, " - Inode block : id = 0x%x from 0x%x\n", ni.ino, ino_ni.blk_addr); dump_node_from_blkaddr(ino_ni.blk_addr); dump_data_offset(ni.blk_addr, le16_to_cpu(sum_entry.ofs_in_node)); } else { MSG(0, "FS Userdata Area: Node block from 0x%x\n", blk_addr); if (ni.ino == ni.nid) { MSG(0, " - Inode block : id = 0x%x from 0x%x\n", ni.ino, ino_ni.blk_addr); dump_node_from_blkaddr(ino_ni.blk_addr); } else { MSG(0, " - Node block : id = 0x%x from 0x%x\n", nid, ni.blk_addr); MSG(0, " - Inode block : id = 0x%x from 0x%x\n", ni.ino, ino_ni.blk_addr); dump_node_from_blkaddr(ino_ni.blk_addr); dump_node_offset(ni.blk_addr); } } return 0; }
static int __exchange_data_block(struct inode *inode, pgoff_t src, pgoff_t dst, bool full) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; block_t new_addr; bool do_replace = false; int ret; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { return ret; } else if (ret == -ENOENT) { new_addr = NULL_ADDR; } else { new_addr = dn.data_blkaddr; if (!is_checkpointed_data(sbi, new_addr)) { dn.data_blkaddr = NULL_ADDR; /* do not invalidate this block address */ set_data_blkaddr(&dn); f2fs_update_extent_cache(&dn); do_replace = true; } f2fs_put_dnode(&dn); } if (new_addr == NULL_ADDR) return full ? truncate_hole(inode, dst, dst + 1) : 0; if (do_replace) { struct page *ipage = get_node_page(sbi, inode->i_ino); struct node_info ni; if (IS_ERR(ipage)) { ret = PTR_ERR(ipage); goto err_out; } set_new_dnode(&dn, inode, ipage, NULL, 0); ret = f2fs_reserve_block(&dn, dst); if (ret) goto err_out; truncate_data_blocks_range(&dn, 1); get_node_info(sbi, dn.nid, &ni); f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, ni.version, true); f2fs_put_dnode(&dn); } else { struct page *psrc, *pdst; psrc = get_lock_data_page(inode, src, true); if (IS_ERR(psrc)) return PTR_ERR(psrc); pdst = get_new_data_page(inode, NULL, dst, false); if (IS_ERR(pdst)) { f2fs_put_page(psrc, 1); return PTR_ERR(pdst); } f2fs_copy_page(psrc, pdst); set_page_dirty(pdst); f2fs_put_page(pdst, 1); f2fs_put_page(psrc, 1); return truncate_hole(inode, src, src + 1); } return 0; err_out: if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) { dn.data_blkaddr = new_addr; set_data_blkaddr(&dn); f2fs_update_extent_cache(&dn); f2fs_put_dnode(&dn); } return ret; }
int fsck_chk_node_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode, u32 nid, enum FILE_TYPE ftype, enum NODE_TYPE ntype, u32 *blk_cnt) { struct f2fs_fsck *fsck = F2FS_FSCK(sbi); struct node_info ni; struct f2fs_node *node_blk = NULL; int ret = 0; IS_VALID_NID(sbi, nid); if (ftype != F2FS_FT_ORPHAN || f2fs_test_bit(nid, fsck->nat_area_bitmap) != 0x0) f2fs_clear_bit(nid, fsck->nat_area_bitmap); else ASSERT_MSG(0, "nid duplicated [0x%x]\n", nid); ret = get_node_info(sbi, nid, &ni); ASSERT(ret >= 0); /* Is it reserved block? * if block addresss was 0xffff,ffff,ffff,ffff * it means that block was already allocated, but not stored in disk */ if (ni.blk_addr == NEW_ADDR) { fsck->chk.valid_blk_cnt++; fsck->chk.valid_node_cnt++; if (ntype == TYPE_INODE) fsck->chk.valid_inode_cnt++; return 0; } IS_VALID_BLK_ADDR(sbi, ni.blk_addr); is_valid_ssa_node_blk(sbi, nid, ni.blk_addr); if (f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, ni.blk_addr), fsck->sit_area_bitmap) == 0x0) { DBG(0, "SIT bitmap is 0x0. blk_addr[0x%x] %i\n", ni.blk_addr, ni.blk_addr); ASSERT(0); }else{ DBG(0, "SIT bitmap is NOT 0x0. blk_addr[0x%x] %i\n", ni.blk_addr, ni.blk_addr); //ASSERT(0); } if (f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, ni.blk_addr), fsck->main_area_bitmap) == 0x0) { DBG(0, "SIT and main bitmap is 0x0. blk_addr[0x%x] %i\n", ni.blk_addr, ni.blk_addr); fsck->chk.valid_blk_cnt++; fsck->chk.valid_node_cnt++; }else{ DBG(0, "SIT and main bitmap is NOT 0x0. blk_addr[0x%x] %i\n", ni.blk_addr, ni.blk_addr); } node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1); ASSERT(node_blk != NULL); ret = dev_read_block(node_blk, ni.blk_addr); ASSERT(ret >= 0); ASSERT_MSG(nid == le32_to_cpu(node_blk->footer.nid), "nid[0x%x] blk_addr[0x%x] footer.nid[0x%x]\n", nid, ni.blk_addr, le32_to_cpu(node_blk->footer.nid)); if (ntype == TYPE_INODE) { ret = fsck_chk_inode_blk(sbi, nid, ftype, node_blk, blk_cnt, &ni); } else { /* it's not inode */ ASSERT(node_blk->footer.nid != node_blk->footer.ino); if (f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, ni.blk_addr), fsck->main_area_bitmap) != 0) { DBG(0, "Duplicated node block. ino[0x%x][0x%x]\n", nid, ni.blk_addr); ASSERT(0); } f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, ni.blk_addr), fsck->main_area_bitmap); switch (ntype) { case TYPE_DIRECT_NODE: ret = fsck_chk_dnode_blk(sbi, inode, nid, ftype, node_blk, blk_cnt, &ni); break; case TYPE_INDIRECT_NODE: ret = fsck_chk_idnode_blk(sbi, inode, nid, ftype, node_blk, blk_cnt); break; case TYPE_DOUBLE_INDIRECT_NODE: ret = fsck_chk_didnode_blk(sbi, inode, nid, ftype, node_blk, blk_cnt); break; default: ASSERT(0); } } ASSERT(ret >= 0); free(node_blk); return 0; }
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; int ret = 0; for (; end < nrpages; start++, end++) { block_t new_addr, old_addr; f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { goto out; } else if (ret == -ENOENT) { new_addr = NULL_ADDR; } else { new_addr = dn.data_blkaddr; truncate_data_blocks_range(&dn, 1); f2fs_put_dnode(&dn); } if (new_addr == NULL_ADDR) { set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { goto out; } else if (ret == -ENOENT) { f2fs_unlock_op(sbi); continue; } if (dn.data_blkaddr == NULL_ADDR) { f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); continue; } else { truncate_data_blocks_range(&dn, 1); } f2fs_put_dnode(&dn); } else { struct page *ipage; ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { ret = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, NULL, 0); ret = f2fs_reserve_block(&dn, start); if (ret) goto out; old_addr = dn.data_blkaddr; if (old_addr != NEW_ADDR && new_addr == NEW_ADDR) { dn.data_blkaddr = NULL_ADDR; f2fs_update_extent_cache(&dn); invalidate_blocks(sbi, old_addr); dn.data_blkaddr = new_addr; set_data_blkaddr(&dn); } else if (new_addr != NEW_ADDR) { struct node_info ni; get_node_info(sbi, dn.nid, &ni); f2fs_replace_block(sbi, &dn, old_addr, new_addr, ni.version, true); } f2fs_put_dnode(&dn); } f2fs_unlock_op(sbi); } return 0; out: f2fs_unlock_op(sbi); return ret; }
humidity_sensor_hal::humidity_sensor_hal() : m_humidity(0) , m_node_handle(-1) , m_polling_interval(POLL_1HZ_MS) , m_fired_time(0) { const string sensorhub_interval_node_name = HUM_SENSORHUB_POLL_NODE_NAME; node_info_query query; node_info info; if (!find_model_id(SENSOR_TYPE_HUMIDITY, m_model_id)) { ERR("Failed to find model id"); throw ENXIO; } query.sensorhub_controlled = m_sensorhub_controlled = is_sensorhub_controlled(sensorhub_interval_node_name); query.sensor_type = SENSOR_TYPE_HUMIDITY; query.key = HUM_INPUT_NAME; query.iio_enable_node_name = HUM_IIO_ENABLE_NODE_NAME; query.sensorhub_interval_node_name = sensorhub_interval_node_name; if (!get_node_info(query, info)) { ERR("Failed to get node info"); throw ENXIO; } show_node_info(info); m_data_node = info.data_node_path; m_enable_node = info.enable_node_path; m_interval_node = info.interval_node_path; csensor_config &config = csensor_config::get_instance(); if (!config.get(SENSOR_TYPE_HUMIDITY, m_model_id, ELEMENT_VENDOR, m_vendor)) { ERR("[VENDOR] is empty"); throw ENXIO; } if (!config.get(SENSOR_TYPE_HUMIDITY, m_model_id, ELEMENT_NAME, m_chip_name)) { ERR("[NAME] is empty\n"); throw ENXIO; } double raw_data_unit; if (!config.get(SENSOR_TYPE_HUMIDITY, m_model_id, ELEMENT_RAW_DATA_UNIT, raw_data_unit)) { ERR("[RAW_DATA_UNIT] is empty\n"); throw ENXIO; } m_raw_data_unit = (float)(raw_data_unit); if ((m_node_handle = open(m_data_node.c_str(),O_RDWR)) < 0) { ERR("Failed to open handle(%d)", m_node_handle); throw ENXIO; } int clockId = CLOCK_MONOTONIC; if (ioctl(m_node_handle, EVIOCSCLOCKID, &clockId) != 0) ERR("Fail to set monotonic timestamp for %s", m_data_node.c_str()); INFO("m_vendor = %s", m_vendor.c_str()); INFO("m_chip_name = %s", m_chip_name.c_str()); INFO("m_raw_data_unit = %f", m_raw_data_unit); INFO("humidity_sensor_hal is created!"); }
int main(int argc, char **argv) { int mgmt_classes[3] = { IB_SMI_CLASS, IB_SMI_DIRECT_CLASS, IB_SA_CLASS }; ib_portid_t portid = { 0 }; int port_op = -1; int is_switch, is_peer_switch, espeed_cap, peer_espeed_cap; int state, physstate, lwe, lws, lwa, lse, lss, lsa, lsee, lses, lsea, fdr10s, fdr10e, fdr10a; int peerlocalportnum, peerlwe, peerlws, peerlwa, peerlse, peerlss, peerlsa, peerlsee, peerlses, peerlsea, peerfdr10s, peerfdr10e, peerfdr10a; int peerwidth, peerspeed, peerespeed; uint8_t data[IB_SMP_DATA_SIZE] = { 0 }; uint8_t data2[IB_SMP_DATA_SIZE] = { 0 }; ib_portid_t peerportid = { 0 }; int portnum = 0; ib_portid_t selfportid = { 0 }; int selfport = 0; int changed = 0; int i; uint32_t vendorid, rem_vendorid; uint16_t devid, rem_devid; uint64_t val; char *endp; char usage_args[] = "<dest dr_path|lid|guid> <portnum> [<op>]\n" "\nSupported ops: enable, disable, on, off, reset, speed, espeed, fdr10,\n" "\twidth, query, down, arm, active, vls, mtu, lid, smlid, lmc,\n" "\tmkey, mkeylease, mkeyprot\n"; const char *usage_examples[] = { "3 1 disable\t\t\t# by lid", "-G 0x2C9000100D051 1 enable\t# by guid", "-D 0 1\t\t\t# (query) by direct route", "3 1 reset\t\t\t# by lid", "3 1 speed 1\t\t\t# by lid", "3 1 width 1\t\t\t# by lid", "-D 0 1 lid 0x1234 arm\t\t# by direct route", NULL }; ibdiag_process_opts(argc, argv, NULL, NULL, NULL, NULL, usage_args, usage_examples); argc -= optind; argv += optind; if (argc < 2) ibdiag_show_usage(); srcport = mad_rpc_open_port(ibd_ca, ibd_ca_port, mgmt_classes, 3); if (!srcport) IBEXIT("Failed to open '%s' port '%d'", ibd_ca, ibd_ca_port); smp_mkey_set(srcport, ibd_mkey); if (resolve_portid_str(ibd_ca, ibd_ca_port, &portid, argv[0], ibd_dest_type, ibd_sm_id, srcport) < 0) IBEXIT("can't resolve destination port %s", argv[0]); if (argc > 1) portnum = strtol(argv[1], 0, 0); for (i = 2; i < argc; i++) { int j; for (j = 0; j < NPORT_ARGS; j++) { if (strcmp(argv[i], port_args[j].name)) continue; port_args[j].set = 1; if (!port_args[j].val) { if (port_op >= 0) IBEXIT("%s only one of: ", "query, enable, disable, " "reset, down, arm, active, " "can be specified", port_args[j].name); port_op = j; break; } if (++i >= argc) IBEXIT("%s requires an additional parameter", port_args[j].name); val = strtoull(argv[i], 0, 0); switch (j) { case SPEED: if (val > 15) IBEXIT("invalid speed value %ld", val); break; case ESPEED: if (val > 31) IBEXIT("invalid extended speed value %ld", val); break; case FDR10SPEED: if (val > 1) IBEXIT("invalid fdr10 speed value %ld", val); break; case WIDTH: if ((val > 31 && val != 255)) IBEXIT("invalid width value %ld", val); break; case VLS: if (val == 0 || val > 5) IBEXIT("invalid vls value %ld", val); break; case MTU: if (val == 0 || val > 5) IBEXIT("invalid mtu value %ld", val); break; case LID: if (val == 0 || val >= 0xC000) IBEXIT("invalid lid value 0x%lx", val); break; case SMLID: if (val == 0 || val >= 0xC000) IBEXIT("invalid smlid value 0x%lx", val); break; case LMC: if (val > 7) IBEXIT("invalid lmc value %ld", val); break; case MKEY: errno = 0; val = strtoull(argv[i], &endp, 0); if (errno || *endp != '\0') { errno = 0; val = strtoull(getpass("New M_Key: "), &endp, 0); if (errno || *endp != '\0') { IBEXIT("Bad new M_Key\n"); } } /* All 64-bit values are legal */ break; case MKEYLEASE: if (val > 0xFFFF) IBEXIT("invalid mkey lease time %ld", val); break; case MKEYPROT: if (val > 3) IBEXIT("invalid mkey protection bit setting %ld", val); } *port_args[j].val = val; changed = 1; break; } if (j == NPORT_ARGS) IBEXIT("invalid operation: %s", argv[i]); } if (port_op < 0) port_op = QUERY; is_switch = get_node_info(&portid, data); vendorid = (uint32_t) mad_get_field(data, 0, IB_NODE_VENDORID_F); devid = (uint16_t) mad_get_field(data, 0, IB_NODE_DEVID_F); if ((port_args[MKEY].set || port_args[MKEYLEASE].set || port_args[MKEYPROT].set) && is_switch && portnum != 0) IBEXIT("Can't set M_Key fields on switch port != 0"); if (port_op != QUERY || changed) printf("Initial %s PortInfo:\n", is_switch ? "Switch" : "CA/RT"); else printf("%s PortInfo:\n", is_switch ? "Switch" : "CA/RT"); espeed_cap = get_port_info(&portid, data, portnum, is_switch); show_port_info(&portid, data, portnum, espeed_cap, is_switch); if (is_mlnx_ext_port_info_supported(vendorid, devid)) { get_mlnx_ext_port_info(&portid, data2, portnum); show_mlnx_ext_port_info(&portid, data2, portnum); } if (port_op != QUERY || changed) { /* * If we aren't setting the LID and the LID is the default, * the SMA command will fail due to an invalid LID. * Set it to something unlikely but valid. */ physstate = mad_get_field(data, 0, IB_PORT_PHYS_STATE_F); val = mad_get_field(data, 0, IB_PORT_LID_F); if (!port_args[LID].set && (!val || val == 0xFFFF)) mad_set_field(data, 0, IB_PORT_LID_F, 0x1234); val = mad_get_field(data, 0, IB_PORT_SMLID_F); if (!port_args[SMLID].set && (!val || val == 0xFFFF)) mad_set_field(data, 0, IB_PORT_SMLID_F, 0x1234); mad_set_field(data, 0, IB_PORT_STATE_F, 0); /* NOP */ mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 0); /* NOP */ switch (port_op) { case ON: /* Enable only if state is Disable */ if(physstate != 3) { printf("Port is already in enable state\n"); goto close_port; } case ENABLE: case RESET: /* Polling */ mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 2); break; case OFF: case DISABLE: printf("Disable may be irreversible\n"); mad_set_field(data, 0, IB_PORT_PHYS_STATE_F, 3); break; case DOWN: mad_set_field(data, 0, IB_PORT_STATE_F, 1); break; case ARM: mad_set_field(data, 0, IB_PORT_STATE_F, 3); break; case ACTIVE: mad_set_field(data, 0, IB_PORT_STATE_F, 4); break; } /* always set enabled speeds/width - defaults to NOP */ mad_set_field(data, 0, IB_PORT_LINK_SPEED_ENABLED_F, speed); mad_set_field(data, 0, IB_PORT_LINK_SPEED_EXT_ENABLED_F, espeed); mad_set_field(data, 0, IB_PORT_LINK_WIDTH_ENABLED_F, width); if (port_args[VLS].set) mad_set_field(data, 0, IB_PORT_OPER_VLS_F, vls); if (port_args[MTU].set) mad_set_field(data, 0, IB_PORT_NEIGHBOR_MTU_F, mtu); if (port_args[LID].set) mad_set_field(data, 0, IB_PORT_LID_F, lid); if (port_args[SMLID].set) mad_set_field(data, 0, IB_PORT_SMLID_F, smlid); if (port_args[LMC].set) mad_set_field(data, 0, IB_PORT_LMC_F, lmc); if (port_args[FDR10SPEED].set) { mad_set_field(data2, 0, IB_MLNX_EXT_PORT_STATE_CHG_ENABLE_F, FDR10); mad_set_field(data2, 0, IB_MLNX_EXT_PORT_LINK_SPEED_ENABLED_F, fdr10); set_mlnx_ext_port_info(&portid, data2, portnum); } if (port_args[MKEY].set) mad_set_field64(data, 0, IB_PORT_MKEY_F, mkey); if (port_args[MKEYLEASE].set) mad_set_field(data, 0, IB_PORT_MKEY_LEASE_F, mkeylease); if (port_args[MKEYPROT].set) mad_set_field(data, 0, IB_PORT_MKEY_PROT_BITS_F, mkeyprot); set_port_info(&portid, data, portnum, espeed_cap, is_switch); } else if (is_switch && portnum) { /* Now, make sure PortState is Active */ /* Or is PortPhysicalState LinkUp sufficient ? */ mad_decode_field(data, IB_PORT_STATE_F, &state); mad_decode_field(data, IB_PORT_PHYS_STATE_F, &physstate); if (state == 4) { /* Active */ mad_decode_field(data, IB_PORT_LINK_WIDTH_ENABLED_F, &lwe); mad_decode_field(data, IB_PORT_LINK_WIDTH_SUPPORTED_F, &lws); mad_decode_field(data, IB_PORT_LINK_WIDTH_ACTIVE_F, &lwa); mad_decode_field(data, IB_PORT_LINK_SPEED_SUPPORTED_F, &lss); mad_decode_field(data, IB_PORT_LINK_SPEED_ACTIVE_F, &lsa); mad_decode_field(data, IB_PORT_LINK_SPEED_ENABLED_F, &lse); mad_decode_field(data2, IB_MLNX_EXT_PORT_LINK_SPEED_SUPPORTED_F, &fdr10s); mad_decode_field(data2, IB_MLNX_EXT_PORT_LINK_SPEED_ENABLED_F, &fdr10e); mad_decode_field(data2, IB_MLNX_EXT_PORT_LINK_SPEED_ACTIVE_F, &fdr10a); if (espeed_cap) { mad_decode_field(data, IB_PORT_LINK_SPEED_EXT_SUPPORTED_F, &lses); mad_decode_field(data, IB_PORT_LINK_SPEED_EXT_ACTIVE_F, &lsea); mad_decode_field(data, IB_PORT_LINK_SPEED_EXT_ENABLED_F, &lsee); } /* Setup portid for peer port */ memcpy(&peerportid, &portid, sizeof(peerportid)); if (portid.lid == 0) { peerportid.drpath.cnt++; if (peerportid.drpath.cnt == IB_SUBNET_PATH_HOPS_MAX) { IBEXIT("Too many hops"); } } else { peerportid.drpath.cnt = 1; /* Set DrSLID to local lid */ if (resolve_self(ibd_ca, ibd_ca_port, &selfportid, &selfport, 0) < 0) IBEXIT("could not resolve self"); peerportid.drpath.drslid = (uint16_t) selfportid.lid; peerportid.drpath.drdlid = 0xffff; } peerportid.drpath.p[peerportid.drpath.cnt] = (uint8_t) portnum; /* Get peer port NodeInfo to obtain peer port number */ is_peer_switch = get_node_info(&peerportid, data); rem_vendorid = (uint32_t) mad_get_field(data, 0, IB_NODE_VENDORID_F); rem_devid = (uint16_t) mad_get_field(data, 0, IB_NODE_DEVID_F); mad_decode_field(data, IB_NODE_LOCAL_PORT_F, &peerlocalportnum); printf("Peer PortInfo:\n"); /* Get peer port characteristics */ peer_espeed_cap = get_port_info(&peerportid, data, peerlocalportnum, is_peer_switch); if (is_mlnx_ext_port_info_supported(rem_vendorid, rem_devid)) get_mlnx_ext_port_info(&peerportid, data2, peerlocalportnum); show_port_info(&peerportid, data, peerlocalportnum, peer_espeed_cap, is_peer_switch); if (is_mlnx_ext_port_info_supported(rem_vendorid, rem_devid)) show_mlnx_ext_port_info(&peerportid, data2, peerlocalportnum); mad_decode_field(data, IB_PORT_LINK_WIDTH_ENABLED_F, &peerlwe); mad_decode_field(data, IB_PORT_LINK_WIDTH_SUPPORTED_F, &peerlws); mad_decode_field(data, IB_PORT_LINK_WIDTH_ACTIVE_F, &peerlwa); mad_decode_field(data, IB_PORT_LINK_SPEED_SUPPORTED_F, &peerlss); mad_decode_field(data, IB_PORT_LINK_SPEED_ACTIVE_F, &peerlsa); mad_decode_field(data, IB_PORT_LINK_SPEED_ENABLED_F, &peerlse); mad_decode_field(data2, IB_MLNX_EXT_PORT_LINK_SPEED_SUPPORTED_F, &peerfdr10s); mad_decode_field(data2, IB_MLNX_EXT_PORT_LINK_SPEED_ENABLED_F, &peerfdr10e); mad_decode_field(data2, IB_MLNX_EXT_PORT_LINK_SPEED_ACTIVE_F, &peerfdr10a); if (peer_espeed_cap) { mad_decode_field(data, IB_PORT_LINK_SPEED_EXT_SUPPORTED_F, &peerlses); mad_decode_field(data, IB_PORT_LINK_SPEED_EXT_ACTIVE_F, &peerlsea); mad_decode_field(data, IB_PORT_LINK_SPEED_EXT_ENABLED_F, &peerlsee); } /* Now validate peer port characteristics */ /* Examine Link Width */ width = get_link_width(lwe, lws); peerwidth = get_link_width(peerlwe, peerlws); validate_width(width, peerwidth, lwa); /* Examine Link Speeds */ speed = get_link_speed(lse, lss); peerspeed = get_link_speed(peerlse, peerlss); validate_speed(speed, peerspeed, lsa); if (espeed_cap && peer_espeed_cap) { espeed = get_link_speed_ext(lsee, lses); peerespeed = get_link_speed_ext(peerlsee, peerlses); validate_extended_speed(espeed, peerespeed, lsea); } else { if (fdr10e & FDR10 && peerfdr10e & FDR10) { if (!(fdr10a & FDR10)) IBWARN("Peer ports operating at active speed %d rather than FDR10", lsa); } } } } close_port: mad_rpc_close_port(srcport); exit(0); }
int disp_node_create(struct disp_object *disp_obj, struct node_object *hnode, u32 rms_fxn, u32 ul_create_fxn, const struct node_createargs *pargs, nodeenv *node_env) { struct node_msgargs node_msg_args; struct node_taskargs task_arg_obj; struct rms_command *rms_cmd; struct rms_msg_args *pmsg_args; struct rms_more_task_args *more_task_args; enum node_type node_type; u32 dw_length; rms_word *pdw_buf = NULL; u32 ul_bytes; u32 i; u32 total; u32 chars_in_rms_word; s32 task_args_offset; s32 sio_in_def_offset; s32 sio_out_def_offset; s32 sio_defs_offset; s32 args_offset = -1; s32 offset; struct node_strmdef strm_def; u32 max; int status = 0; struct dsp_nodeinfo node_info; u8 dev_type; status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); if (status) goto func_end; if (dev_type != DSP_UNIT) { dev_dbg(bridge, "%s: unknown device type = 0x%x\n", __func__, dev_type); goto func_end; } node_type = node_get_type(hnode); node_msg_args = pargs->asa.node_msg_args; max = disp_obj->bufsize_rms; /* */ chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size; /* */ dw_length = (node_msg_args.arg_length + chars_in_rms_word - 1) / chars_in_rms_word; /* */ total = sizeof(struct rms_command) / sizeof(rms_word) + sizeof(struct rms_msg_args) / sizeof(rms_word) - 1 + dw_length; if (total >= max) { status = -EPERM; dev_dbg(bridge, "%s: Message args too large for buffer! size " "= %d, max = %d\n", __func__, total, max); } /* */ if (!status) { total = 0; /* */ pdw_buf = (rms_word *) disp_obj->buf; rms_cmd = (struct rms_command *)pdw_buf; rms_cmd->fxn = (rms_word) (rms_fxn); rms_cmd->arg1 = (rms_word) (ul_create_fxn); if (node_get_load_type(hnode) == NLDR_DYNAMICLOAD) { /* */ rms_cmd->arg2 = 1; /* */ } else { /* */ rms_cmd->arg2 = 0; /* */ } rms_cmd->data = node_get_type(hnode); /* */ args_offset = 3; total += sizeof(struct rms_command) / sizeof(rms_word); /* */ pmsg_args = (struct rms_msg_args *)(pdw_buf + total); pmsg_args->max_msgs = node_msg_args.max_msgs; pmsg_args->segid = node_msg_args.seg_id; pmsg_args->notify_type = node_msg_args.notify_type; pmsg_args->arg_length = node_msg_args.arg_length; total += sizeof(struct rms_msg_args) / sizeof(rms_word) - 1; memcpy(pdw_buf + total, node_msg_args.pdata, node_msg_args.arg_length); total += dw_length; } if (status) goto func_end; /* */ if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) { task_arg_obj = pargs->asa.task_arg_obj; task_args_offset = total; total += sizeof(struct rms_more_task_args) / sizeof(rms_word) + 1 + task_arg_obj.num_inputs + task_arg_obj.num_outputs; /* */ if (total < max) { total = task_args_offset; more_task_args = (struct rms_more_task_args *)(pdw_buf + total); /* */ get_node_info(hnode, &node_info); more_task_args->priority = node_info.execution_priority; more_task_args->stack_size = task_arg_obj.stack_size; more_task_args->sysstack_size = task_arg_obj.sys_stack_size; more_task_args->stack_seg = task_arg_obj.stack_seg; more_task_args->heap_addr = task_arg_obj.dsp_heap_addr; more_task_args->heap_size = task_arg_obj.heap_size; more_task_args->misc = task_arg_obj.dais_arg; more_task_args->num_input_streams = task_arg_obj.num_inputs; total += sizeof(struct rms_more_task_args) / sizeof(rms_word); dev_dbg(bridge, "%s: dsp_heap_addr %x, heap_size %x\n", __func__, task_arg_obj.dsp_heap_addr, task_arg_obj.heap_size); /* */ sio_in_def_offset = total; total += task_arg_obj.num_inputs; pdw_buf[total++] = task_arg_obj.num_outputs; sio_out_def_offset = total; total += task_arg_obj.num_outputs; sio_defs_offset = total; /* */ offset = sio_defs_offset; for (i = 0; i < task_arg_obj.num_inputs; i++) { if (status) break; pdw_buf[sio_in_def_offset + i] = (offset - args_offset) * (sizeof(rms_word) / DSPWORDSIZE); strm_def = task_arg_obj.strm_in_def[i]; status = fill_stream_def(pdw_buf, &total, offset, strm_def, max, chars_in_rms_word); offset = total; } for (i = 0; (i < task_arg_obj.num_outputs) && (!status); i++) { pdw_buf[sio_out_def_offset + i] = (offset - args_offset) * (sizeof(rms_word) / DSPWORDSIZE); strm_def = task_arg_obj.strm_out_def[i]; status = fill_stream_def(pdw_buf, &total, offset, strm_def, max, chars_in_rms_word); offset = total; } } else { /* */ status = -EPERM; } } if (!status) { ul_bytes = total * sizeof(rms_word); status = send_message(disp_obj, node_get_timeout(hnode), ul_bytes, node_env); } func_end: return status; }
void get_dnode_of_data(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, pgoff_t index, int mode) { int offset[4]; unsigned int noffset[4]; struct f2fs_node *parent = NULL; nid_t nids[4]; block_t nblk[4]; struct node_info ni; int level, i; int ret; level = get_node_path(index, offset, noffset); nids[0] = dn->nid; parent = dn->inode_blk; if (level != 0) nids[1] = get_nid(parent, offset[0], 1); else dn->node_blk = dn->inode_blk; get_node_info(sbi, nids[0], &ni); nblk[0] = ni.blk_addr; for (i = 1; i <= level; i++) { if (!nids[i] && mode == ALLOC_NODE) { f2fs_alloc_nid(sbi, &nids[i], 0); dn->nid = nids[i]; /* Function new_node_blk get a new f2fs_node blk and update*/ /* We should make sure that dn->node_blk == NULL*/ nblk[i] = new_node_block(sbi, dn, noffset[i]); ASSERT(nblk[i]); set_nid(parent, offset[i - 1], nids[i], i == 1); } else { /* If Sparse file no read API, */ struct node_info ni; get_node_info(sbi, nids[i], &ni); dn->node_blk = calloc(BLOCK_SZ, 1); ASSERT(dn->node_blk); ret = dev_read_block(dn->node_blk, ni.blk_addr); ASSERT(ret >= 0); nblk[i] = ni.blk_addr; } if (mode == ALLOC_NODE){ /* Parent node may have changed */ ret = dev_write_block(parent, nblk[i - 1]); ASSERT(ret >= 0); } if (i != 1) free(parent); if (i < level) { parent = dn->node_blk; nids[i + 1] = get_nid(parent, offset[i], 0); } } dn->nid = nids[level]; dn->ofs_in_node = offset[level]; dn->data_blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node); dn->node_blkaddr = nblk[level]; }