ATF_TC_BODY(check_sdp_get_uuid, tc) { uint8_t data[] = { 0x19, 0x12, 0x34, // uuid16 0x1234 0x1a, 0x11, 0x22, 0x33, // uuid32 0x11223344 0x44, 0x00, // nil 0x1c, // uuid128 0x00112233-4444--5555-6666-778899aabbcc 0x00, 0x11, 0x22, 0x33, 0x44, 0x44, 0x55, 0x55, 0x66, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, }; sdp_data_t test = { data, data + sizeof(data) }; uuid_t u16 = { 0x00001234, 0x0000, 0x1000, 0x80, 0x00, { 0x00, 0x80, 0x5f, 0x9b, 0x34, 0xfb } }; uuid_t u32 = { 0x11223344, 0x0000, 0x1000, 0x80, 0x00, { 0x00, 0x80, 0x5f, 0x9b, 0x34, 0xfb } }; uuid_t u128 = { 0x00112233, 0x4444, 0x5555, 0x66, 0x66, { 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc } }; sdp_data_t nil; uuid_t value; /* * sdp_get_uuid expects any UUID type returns the full uuid * advancing test if successful */ ATF_REQUIRE(sdp_get_uuid(&test, &value)); ATF_CHECK(uuid_equal(&value, &u16, NULL)); ATF_REQUIRE(sdp_get_uuid(&test, &value)); ATF_CHECK(uuid_equal(&value, &u32, NULL)); ATF_REQUIRE_EQ(sdp_get_uuid(&test, &value), false); /* not uuid */ ATF_REQUIRE(sdp_get_data(&test, &nil)); /* (skip) */ ATF_CHECK_EQ(sdp_data_type(&nil), SDP_DATA_NIL); ATF_REQUIRE(sdp_get_uuid(&test, &value)); ATF_CHECK(uuid_equal(&value, &u128, NULL)); ATF_CHECK_EQ(test.next, test.end); }
void test_uuid_v5(void) { uuid_t uuid, uuid_next; uuid_v5(&uuid, &uuid_namespace_dns, (uint8_t*)riotos_org, RIOTOS_ORG_LEN); uuid_v5(&uuid_next, &uuid, (uint8_t*)test_str, TEST_STR_LEN); TEST_ASSERT(uuid_equal(&uuid, (uuid_t*)v5_check1)); TEST_ASSERT(uuid_equal(&uuid_next, (uuid_t*)v5_check2)); TEST_ASSERT_EQUAL_INT(uuid_version(&uuid), UUID_V5); TEST_ASSERT_EQUAL_INT(uuid_version(&uuid_next), UUID_V5); }
DECLARE_TEST( uuid, string ) { uuid_t uuid, uuidref; char* str; uuidref = uuid_generate_random(); EXPECT_FALSE( uuid_is_null( uuidref ) ); str = string_from_uuid( uuidref ); EXPECT_NE( str, 0 ); uuid = string_to_uuid( str ); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuidref ) ); string_deallocate( str ); uuid = string_to_uuid( "" ); EXPECT_EQ_MSGFORMAT( uuid_is_null( uuid ), true, "empty string did not convert to null uuid: %s", string_from_uuid_static( uuid ) ); uuid = string_to_uuid( "0" ); EXPECT_EQ_MSGFORMAT( uuid_is_null( uuid ), true, "\"0\" string did not convert to null uuid: %s", string_from_uuid_static( uuid ) ); uuid = string_to_uuid( string_from_uuid_static( uuid_null() ) ); EXPECT_EQ_MSGFORMAT( uuid_is_null( uuid ), true, "null uuid reconvert through string did not convert to null uuid: %s", string_from_uuid_static( uuid ) ); return 0; }
STATIC bool xfs_dquot_buf_verify_crc( struct xfs_mount *mp, struct xfs_buf *bp) { struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; int ndquots; int i; if (!xfs_sb_version_hascrc(&mp->m_sb)) return true; /* * if we are in log recovery, the quota subsystem has not been * initialised so we have no quotainfo structure. In that case, we need * to manually calculate the number of dquots in the buffer. */ if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else ndquots = xfs_calc_dquots_per_chunk(mp, XFS_BB_TO_FSB(mp, bp->b_length)); for (i = 0; i < ndquots; i++, d++) { if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF)) return false; if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid)) return false; } return true; }
static void read_handler(BLECharacteristic characteristic, const uint8_t *value, size_t value_length, uint16_t value_offset, BLEGATTError error) { char uuid_buffer[UUID_STRING_BUFFER_LENGTH]; Uuid characteristic_uuid = ble_characteristic_get_uuid(characteristic); uuid_to_string(&characteristic_uuid, uuid_buffer); APP_LOG(APP_LOG_LEVEL_INFO, "Read Characteristic %s, %u bytes, error: %u", uuid_buffer, value_length, error); for (size_t i = 0; i < value_length; ++i) { APP_LOG(APP_LOG_LEVEL_INFO, "0x%02x", value[i]); } const Uuid node_service_4_uuid = UuidMake(0x18, 0xcd, 0xa7, 0x84, 0x4b, 0xd3, 0x43, 0x70, 0x85, 0xbb, 0xbf, 0xed, 0x91, 0xec, 0x86, 0xaf); // node sensor data incoming if (uuid_equal(&characteristic_uuid, &node_service_4_uuid)) { APP_LOG(APP_LOG_LEVEL_INFO, "Incoming sensor data..."); if(value_length + node_ctx.read_node_buffer_pos > node_ctx.read_node_buffer_max) { process_read_buffer(); node_ctx.read_node_buffer_pos = 0; } char *buff = (char *)&node_ctx.read_node_buffer; memcpy(buff+node_ctx.read_node_buffer_pos, value, value_length); node_ctx.read_node_buffer_pos += value_length; } }
static xfs_failaddr_t xfs_symlink_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_dsymlink_hdr *dsl = bp->b_addr; if (!xfs_sb_version_hascrc(&mp->m_sb)) return __this_address; if (!xfs_verify_magic(bp, dsl->sl_magic)) return __this_address; if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; if (bp->b_bn != be64_to_cpu(dsl->sl_blkno)) return __this_address; if (be32_to_cpu(dsl->sl_offset) + be32_to_cpu(dsl->sl_bytes) >= XFS_SYMLINK_MAXLEN) return __this_address; if (dsl->sl_owner == 0) return __this_address; if (!xfs_log_check_lsn(mp, be64_to_cpu(dsl->sl_lsn))) return __this_address; return NULL; }
int uuid_table_insert(uuid_t *uuid) { int i, hole; mutex_lock(&uuid_monitor, PVFS); for (i = 0, hole = -1; i < uuid_table_size; i++) { if (uuid_is_nil(&uuid_table[i])) { hole = i; continue; } if (uuid_equal(uuid, &uuid_table[i])) { mutex_unlock(&uuid_monitor); return 0; } } if (hole < 0) { uuid_table = kmem_realloc(uuid_table, (uuid_table_size + 1) * sizeof(*uuid_table), uuid_table_size * sizeof(*uuid_table), KM_SLEEP); hole = uuid_table_size++; } uuid_table[hole] = *uuid; mutex_unlock(&uuid_monitor); return 1; }
static bool xfs_attr3_rmt_verify( struct xfs_mount *mp, void *ptr, int fsbsize, xfs_daddr_t bno) { struct xfs_attr3_rmt_hdr *rmt = ptr; if (!xfs_sb_version_hascrc(&mp->m_sb)) return false; if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC)) return false; if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid)) return false; if (be64_to_cpu(rmt->rm_blkno) != bno) return false; if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) return false; if (be32_to_cpu(rmt->rm_offset) + be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX) return false; if (rmt->rm_owner == 0) return false; return true; }
static uuid_t * do_uuid(xfs_agnumber_t agno, uuid_t *uuid) { xfs_sb_t tsb; static uuid_t uu; if (!get_sb(agno, &tsb)) return NULL; if (!uuid) { /* get uuid */ memcpy(&uu, &tsb.sb_uuid, sizeof(uuid_t)); pop_cur(); return &uu; } /* set uuid */ if (!xfs_sb_version_hascrc(&tsb)) goto write; /* * If we have CRCs, and this UUID differs from that stamped in the * metadata, set the incompat flag and copy the old one to the * metadata-specific location. * * If we are setting the user-visible UUID back to match the metadata * UUID, clear the metadata-specific location and the incompat flag. */ if (!xfs_sb_version_hasmetauuid(&tsb) && !uuid_equal(uuid, &mp->m_sb.sb_meta_uuid)) { mp->m_sb.sb_features_incompat |= XFS_SB_FEAT_INCOMPAT_META_UUID; tsb.sb_features_incompat |= XFS_SB_FEAT_INCOMPAT_META_UUID; memcpy(&tsb.sb_meta_uuid, &tsb.sb_uuid, sizeof(uuid_t)); } else if (xfs_sb_version_hasmetauuid(&tsb) && uuid_equal(uuid, &mp->m_sb.sb_meta_uuid)) { memset(&tsb.sb_meta_uuid, 0, sizeof(uuid_t)); /* Write those zeros now; it's ignored once we clear the flag */ libxfs_sb_to_disk(iocur_top->data, &tsb); mp->m_sb.sb_features_incompat &= ~XFS_SB_FEAT_INCOMPAT_META_UUID; tsb.sb_features_incompat &= ~XFS_SB_FEAT_INCOMPAT_META_UUID; } write: memcpy(&tsb.sb_uuid, uuid, sizeof(uuid_t)); libxfs_sb_to_disk(iocur_top->data, &tsb); write_cur(); return uuid; }
static size_t show_section(struct mca_section_header *sh) { static uuid_t uuid_cpu = MCA_UUID_CPU; static uuid_t uuid_memory = MCA_UUID_MEMORY; static uuid_t uuid_sel = MCA_UUID_SEL; static uuid_t uuid_pci_bus = MCA_UUID_PCI_BUS; static uuid_t uuid_smbios = MCA_UUID_SMBIOS; static uuid_t uuid_pci_dev = MCA_UUID_PCI_DEV; static uuid_t uuid_generic = MCA_UUID_GENERIC; printf(" <section>\n"); show_value(4, "uuid", "%s", uuid(&sh->sh_uuid)); show_value(4, "revision", "%d.%d", BCD(sh->sh_major), BCD(sh->sh_minor)); if (uuid_equal(&sh->sh_uuid, &uuid_cpu, NULL)) show_cpu((void*)(sh + 1)); else if (uuid_equal(&sh->sh_uuid, &uuid_memory, NULL)) show_memory((void*)(sh + 1)); else if (uuid_equal(&sh->sh_uuid, &uuid_sel, NULL)) show_sel(); else if (uuid_equal(&sh->sh_uuid, &uuid_pci_bus, NULL)) show_pci_bus((void*)(sh + 1)); else if (uuid_equal(&sh->sh_uuid, &uuid_smbios, NULL)) show_smbios(); else if (uuid_equal(&sh->sh_uuid, &uuid_pci_dev, NULL)) show_pci_dev((void*)(sh + 1)); else if (uuid_equal(&sh->sh_uuid, &uuid_generic, NULL)) show_generic(); printf(" </section>\n"); return (sh->sh_length); }
DECLARE_TEST( uuid, threaded ) { object_t thread[32]; int ith, i, jth, j; int num_threads = math_clamp( system_hardware_threads() + 1, 3, 32 ); for( ith = 0; ith < num_threads; ++ith ) { thread[ith] = thread_create( uuid_thread_time, "uuid_thread", THREAD_PRIORITY_NORMAL, 0 ); thread_start( thread[ith], (void*)(uintptr_t)ith ); } test_wait_for_threads_startup( thread, num_threads ); for( ith = 0; ith < num_threads; ++ith ) { thread_terminate( thread[ith] ); thread_destroy( thread[ith] ); } test_wait_for_threads_exit( thread, num_threads ); for( ith = 0; ith < num_threads; ++ith ) { for( i = 0; i < 8192; ++i ) { for( jth = ith + 1; jth < num_threads; ++jth ) { for( j = 0; j < 8192; ++j ) { EXPECT_FALSE( uuid_equal( uuid_thread_store[ith][i], uuid_thread_store[jth][j] ) ); } } for( j = i + 1; j < 8192; ++j ) { EXPECT_FALSE( uuid_equal( uuid_thread_store[ith][i], uuid_thread_store[ith][j] ) ); } } } return 0; }
void uuid_table_remove(uuid_t *uuid) { int i; mutex_lock(&uuid_monitor, PVFS); for (i = 0; i < uuid_table_size; i++) { if (uuid_is_nil(&uuid_table[i])) continue; if (!uuid_equal(uuid, &uuid_table[i])) continue; uuid_create_nil(&uuid_table[i]); break; } ASSERT(i < uuid_table_size); mutex_unlock(&uuid_monitor); }
static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) { unsigned int i; if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt)) return true; for (i = 0; i < ofs->numlowerfs; i++) { /* * We use uuid to associate an overlay lower file handle with a * lower layer, so we can accept lower fs with null uuid as long * as all lower layers with null uuid are on the same fs. */ if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) return false; } return true; }
static lua_modulemap_entry_t* lua_module_registry_lookup(lua_State* state, const uuid_t uuid) { lua_pushlstring(state, STRING_CONST(BUILD_REGISTRY_LOADED_MODULES)); lua_gettable(state, LUA_REGISTRYINDEX); lua_pushlstring(state, STRING_CONST(LOADED_MODULES_ENTRY_ARRAY)); lua_gettable(state, -2); lua_modulemap_entry_t** entryarr = lua_touserdata(state, -1); lua_pop(state, 2); if (entryarr) { for (size_t ient = 0, esize = array_size(entryarr); ient != esize; ++ient) { if (uuid_equal(entryarr[ient]->uuid, uuid)) return entryarr[ient]; } } return nullptr; }
static bool xfs_dir3_block_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; if (xfs_sb_version_hascrc(&mp->m_sb)) { if (hdr3->magic != cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) return false; if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_uuid)) return false; if (be64_to_cpu(hdr3->blkno) != bp->b_bn) return false; } else { if (hdr3->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) return false; } if (__xfs_dir3_data_check(NULL, bp)) return false; return true; }
static bool xfs_dinode_verify( struct xfs_mount *mp, struct xfs_inode *ip, struct xfs_dinode *dip) { if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) return false; /* only version 3 or greater inodes are extensively verified here */ if (dip->di_version < 3) return true; if (!xfs_sb_version_hascrc(&mp->m_sb)) return false; if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, XFS_DINODE_CRC_OFF)) return false; if (be64_to_cpu(dip->di_ino) != ip->i_ino) return false; if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid)) return false; return true; }
static const char * friendly(uuid_t *t) { static uuid_t efi_slice = GPT_ENT_TYPE_EFI; static uuid_t bios_boot = GPT_ENT_TYPE_BIOS; static uuid_t mslinux = GPT_ENT_TYPE_MS_BASIC_DATA; static uuid_t freebsd = GPT_ENT_TYPE_FREEBSD; static uuid_t hfs = GPT_ENT_TYPE_APPLE_HFS; static uuid_t linuxswap = GPT_ENT_TYPE_LINUX_SWAP; static uuid_t msr = GPT_ENT_TYPE_MS_RESERVED; static uuid_t swap = GPT_ENT_TYPE_FREEBSD_SWAP; static uuid_t ufs = GPT_ENT_TYPE_FREEBSD_UFS; static uuid_t vinum = GPT_ENT_TYPE_FREEBSD_VINUM; static uuid_t nb_swap = GPT_ENT_TYPE_NETBSD_SWAP; static uuid_t nb_ufs = GPT_ENT_TYPE_NETBSD_FFS; static uuid_t nb_lfs = GPT_ENT_TYPE_NETBSD_LFS; static uuid_t nb_raid = GPT_ENT_TYPE_NETBSD_RAIDFRAME; static uuid_t nb_ccd = GPT_ENT_TYPE_NETBSD_CCD; static uuid_t nb_cgd = GPT_ENT_TYPE_NETBSD_CGD; static char buf[80]; char *s; if (show_uuid) goto unfriendly; if (uuid_equal(t, &efi_slice, NULL)) return ("EFI System"); if (uuid_equal(t, &bios_boot, NULL)) return ("BIOS Boot"); if (uuid_equal(t, &nb_swap, NULL)) return ("NetBSD swap"); if (uuid_equal(t, &nb_ufs, NULL)) return ("NetBSD UFS/UFS2"); if (uuid_equal(t, &nb_lfs, NULL)) return ("NetBSD LFS"); if (uuid_equal(t, &nb_raid, NULL)) return ("NetBSD RAIDFrame component"); if (uuid_equal(t, &nb_ccd, NULL)) return ("NetBSD ccd component"); if (uuid_equal(t, &nb_cgd, NULL)) return ("NetBSD Cryptographic Disk"); if (uuid_equal(t, &swap, NULL)) return ("FreeBSD swap"); if (uuid_equal(t, &ufs, NULL)) return ("FreeBSD UFS/UFS2"); if (uuid_equal(t, &vinum, NULL)) return ("FreeBSD vinum"); if (uuid_equal(t, &freebsd, NULL)) return ("FreeBSD legacy"); if (uuid_equal(t, &mslinux, NULL)) return ("Linux/Windows"); if (uuid_equal(t, &linuxswap, NULL)) return ("Linux swap"); if (uuid_equal(t, &msr, NULL)) return ("Windows reserved"); if (uuid_equal(t, &hfs, NULL)) return ("Apple HFS"); unfriendly: uuid_to_string(t, &s, NULL); strlcpy(buf, s, sizeof buf); free(s); return (buf); }
/* Does this block match the btree information passed in? */ STATIC int xrep_findroot_block( struct xrep_findroot *ri, struct xrep_find_ag_btree *fab, uint64_t owner, xfs_agblock_t agbno, bool *done_with_block) { struct xfs_mount *mp = ri->sc->mp; struct xfs_buf *bp; struct xfs_btree_block *btblock; xfs_daddr_t daddr; int block_level; int error = 0; daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.agno, agbno); /* * Blocks in the AGFL have stale contents that might just happen to * have a matching magic and uuid. We don't want to pull these blocks * in as part of a tree root, so we have to filter out the AGFL stuff * here. If the AGFL looks insane we'll just refuse to repair. */ if (owner == XFS_RMAP_OWN_AG) { error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp, xrep_findroot_agfl_walk, &agbno); if (error == XFS_BTREE_QUERY_RANGE_ABORT) return 0; if (error) return error; } /* * Read the buffer into memory so that we can see if it's a match for * our btree type. We have no clue if it is beforehand, and we want to * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which * will cause needless disk reads in subsequent calls to this function) * and logging metadata verifier failures. * * Therefore, pass in NULL buffer ops. If the buffer was already in * memory from some other caller it will already have b_ops assigned. * If it was in memory from a previous unsuccessful findroot_block * call, the buffer won't have b_ops but it should be clean and ready * for us to try to verify if the read call succeeds. The same applies * if the buffer wasn't in memory at all. * * Note: If we never match a btree type with this buffer, it will be * left in memory with NULL b_ops. This shouldn't be a problem unless * the buffer gets written. */ error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr, mp->m_bsize, 0, &bp, NULL); if (error) return error; /* Ensure the block magic matches the btree type we're looking for. */ btblock = XFS_BUF_TO_BLOCK(bp); ASSERT(fab->buf_ops->magic[1] != 0); if (btblock->bb_magic != fab->buf_ops->magic[1]) goto out; /* * If the buffer already has ops applied and they're not the ones for * this btree type, we know this block doesn't match the btree and we * can bail out. * * If the buffer ops match ours, someone else has already validated * the block for us, so we can move on to checking if this is a root * block candidate. * * If the buffer does not have ops, nobody has successfully validated * the contents and the buffer cannot be dirty. If the magic, uuid, * and structure match this btree type then we'll move on to checking * if it's a root block candidate. If there is no match, bail out. */ if (bp->b_ops) { if (bp->b_ops != fab->buf_ops) goto out; } else { ASSERT(!xfs_trans_buf_is_dirty(bp)); if (!uuid_equal(&btblock->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) goto out; /* * Read verifiers can reference b_ops, so we set the pointer * here. If the verifier fails we'll reset the buffer state * to what it was before we touched the buffer. */ bp->b_ops = fab->buf_ops; fab->buf_ops->verify_read(bp); if (bp->b_error) { bp->b_ops = NULL; bp->b_error = 0; goto out; } /* * Some read verifiers will (re)set b_ops, so we must be * careful not to change b_ops after running the verifier. */ } /* * This block passes the magic/uuid and verifier tests for this btree * type. We don't need the caller to try the other tree types. */ *done_with_block = true; /* * Compare this btree block's level to the height of the current * candidate root block. * * If the level matches the root we found previously, throw away both * blocks because there can't be two candidate roots. * * If level is lower in the tree than the root we found previously, * ignore this block. */ block_level = xfs_btree_get_level(btblock); if (block_level + 1 == fab->height) { fab->root = NULLAGBLOCK; goto out; } else if (block_level < fab->height) { goto out; } /* * This is the highest block in the tree that we've found so far. * Update the btree height to reflect what we've learned from this * block. */ fab->height = block_level + 1; /* * If this block doesn't have sibling pointers, then it's the new root * block candidate. Otherwise, the root will be found farther up the * tree. */ if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) && btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) fab->root = agbno; else fab->root = NULLAGBLOCK; trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno, be32_to_cpu(btblock->bb_magic), fab->height - 1); out: xfs_trans_brelse(ri->sc->tp, bp); return error; }
/* * Allocate an inode on disk and return a copy of its in-core version. * Set mode, nlink, and rdev appropriately within the inode. * The uid and gid for the inode are set according to the contents of * the given cred structure. * * This was once shared with the kernel, but has diverged to the point * where it's no longer worth the hassle of maintaining common code. */ int libxfs_ialloc( xfs_trans_t *tp, xfs_inode_t *pip, mode_t mode, nlink_t nlink, xfs_dev_t rdev, struct cred *cr, struct fsxattr *fsx, int okalloc, xfs_buf_t **ialloc_context, xfs_inode_t **ipp) { xfs_ino_t ino; xfs_inode_t *ip; uint flags; int error; /* * Call the space management code to pick * the on-disk inode to be allocated. */ error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, ialloc_context, &ino); if (error != 0) return error; if (*ialloc_context || ino == NULLFSINO) { *ipp = NULL; return 0; } ASSERT(*ialloc_context == NULL); error = xfs_trans_iget(tp->t_mountp, tp, ino, 0, 0, &ip); if (error != 0) return error; ASSERT(ip != NULL); VFS_I(ip)->i_mode = mode; set_nlink(VFS_I(ip), nlink); ip->i_d.di_uid = cr->cr_uid; ip->i_d.di_gid = cr->cr_gid; xfs_set_projid(&ip->i_d, pip ? 0 : fsx->fsx_projid); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG | XFS_ICHGTIME_MOD); /* * We only support filesystems that understand v2 format inodes. So if * this is currently an old format inode, then change the inode version * number now. This way we only do the conversion here rather than here * and in the flush/logging code. */ if (ip->i_d.di_version == 1) { ip->i_d.di_version = 2; /* * old link count, projid_lo/hi field, pad field * already zeroed */ } if (pip && (VFS_I(pip)->i_mode & S_ISGID)) { ip->i_d.di_gid = pip->i_d.di_gid; if ((VFS_I(pip)->i_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) VFS_I(ip)->i_mode |= S_ISGID; } ip->i_d.di_size = 0; ip->i_d.di_nextents = 0; ASSERT(ip->i_d.di_nblocks == 0); ip->i_d.di_extsize = pip ? 0 : fsx->fsx_extsize; ip->i_d.di_dmevmask = 0; ip->i_d.di_dmstate = 0; ip->i_d.di_flags = pip ? 0 : fsx->fsx_xflags; if (ip->i_d.di_version == 3) { ASSERT(ip->i_d.di_ino == ino); ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid)); VFS_I(ip)->i_version = 1; ip->i_d.di_flags2 = 0; ip->i_d.di_crtime.t_sec = (__int32_t)VFS_I(ip)->i_mtime.tv_sec; ip->i_d.di_crtime.t_nsec = (__int32_t)VFS_I(ip)->i_mtime.tv_nsec; } flags = XFS_ILOG_CORE; switch (mode & S_IFMT) { case S_IFIFO: case S_IFSOCK: /* doesn't make sense to set an rdev for these */ rdev = 0; /* FALLTHROUGH */ case S_IFCHR: case S_IFBLK: ip->i_d.di_format = XFS_DINODE_FMT_DEV; ip->i_df.if_u2.if_rdev = rdev; flags |= XFS_ILOG_DEV; break; case S_IFREG: case S_IFDIR: if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { uint di_flags = 0; if ((mode & S_IFMT) == S_IFDIR) { if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) di_flags |= XFS_DIFLAG_RTINHERIT; if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { di_flags |= XFS_DIFLAG_EXTSZINHERIT; ip->i_d.di_extsize = pip->i_d.di_extsize; } } else { if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { di_flags |= XFS_DIFLAG_REALTIME; } if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { di_flags |= XFS_DIFLAG_EXTSIZE; ip->i_d.di_extsize = pip->i_d.di_extsize; } } if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) di_flags |= XFS_DIFLAG_PROJINHERIT; ip->i_d.di_flags |= di_flags; } /* FALLTHROUGH */ case S_IFLNK: ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; ip->i_df.if_flags = XFS_IFEXTENTS; ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; ip->i_df.if_u1.if_extents = NULL; break; default: ASSERT(0); } /* Attribute fork settings for new inode. */ ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; ip->i_d.di_anextents = 0; /* * set up the inode ops structure that the libxfs code relies on */ if (XFS_ISDIR(ip)) ip->d_ops = ip->i_mount->m_dir_inode_ops; else ip->d_ops = ip->i_mount->m_nondir_inode_ops; /* * Log the new values stuffed into the inode. */ xfs_trans_log_inode(tp, ip, flags); *ipp = ip; return 0; }
xfs_failaddr_t xfs_dinode_verify( struct xfs_mount *mp, xfs_ino_t ino, struct xfs_dinode *dip) { xfs_failaddr_t fa; uint16_t mode; uint16_t flags; uint64_t flags2; uint64_t di_size; if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) return __this_address; /* Verify v3 integrity information first */ if (dip->di_version >= 3) { if (!xfs_sb_version_hascrc(&mp->m_sb)) return __this_address; if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, XFS_DINODE_CRC_OFF)) return __this_address; if (be64_to_cpu(dip->di_ino) != ino) return __this_address; if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; } /* don't allow invalid i_size */ di_size = be64_to_cpu(dip->di_size); if (di_size & (1ULL << 63)) return __this_address; mode = be16_to_cpu(dip->di_mode); if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) return __this_address; /* No zero-length symlinks/dirs. */ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) return __this_address; /* Fork checks carried over from xfs_iformat_fork */ if (mode && be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) > be64_to_cpu(dip->di_nblocks)) return __this_address; if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize) return __this_address; flags = be16_to_cpu(dip->di_flags); if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) return __this_address; /* Do we have appropriate data fork formats for the mode? */ switch (mode & S_IFMT) { case S_IFIFO: case S_IFCHR: case S_IFBLK: case S_IFSOCK: if (dip->di_format != XFS_DINODE_FMT_DEV) return __this_address; break; case S_IFREG: case S_IFLNK: case S_IFDIR: fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK); if (fa) return fa; break; case 0: /* Uninitialized inode ok. */ break; default: return __this_address; } if (XFS_DFORK_Q(dip)) { fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK); if (fa) return fa; } else { /* * If there is no fork offset, this may be a freshly-made inode * in a new disk cluster, in which case di_aformat is zeroed. * Otherwise, such an inode must be in EXTENTS format; this goes * for freed inodes as well. */ switch (dip->di_aformat) { case 0: case XFS_DINODE_FMT_EXTENTS: break; default: return __this_address; } if (dip->di_anextents) return __this_address; } /* extent size hint validation */ fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize), mode, flags); if (fa) return fa; /* only version 3 or greater inodes are extensively verified here */ if (dip->di_version < 3) return NULL; flags2 = be64_to_cpu(dip->di_flags2); /* don't allow reflink/cowextsize if we don't have reflink */ if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) && !xfs_sb_version_hasreflink(&mp->m_sb)) return __this_address; /* only regular files get reflink */ if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG) return __this_address; /* don't let reflink and realtime mix */ if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME)) return __this_address; /* don't let reflink and dax mix */ if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX)) return __this_address; /* COW extent size hint validation */ fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize), mode, flags, flags2); if (fa) return fa; return NULL; }
static void rem(gd_t gd) { uuid_t uuid; map_t m; struct gpt_hdr *hdr; struct gpt_ent *ent; unsigned int i; if ((hdr = gpt_gethdr(gd)) == NULL) return; /* Remove all matching entries in the map. */ for (m = map_first(gd); m != NULL; m = m->map_next) { if (m->map_type != MAP_TYPE_GPT_PART || m->map_index < 1) continue; if (entry > 0 && entry != m->map_index) continue; if (block > 0 && block != m->map_start) continue; if (size > 0 && size != m->map_size) continue; i = m->map_index - 1; hdr = gd->gpt->map_data; ent = (void*)((char*)gd->tbl->map_data + i * le32toh(hdr->hdr_entsz)); uuid_dec_le(&ent->ent_type, &uuid); if (!uuid_is_nil(&type, NULL) && !uuid_equal(&type, &uuid, NULL)) continue; /* Remove the primary entry by clearing the partition type. */ uuid_create_nil(&uuid, NULL); uuid_enc_le(&ent->ent_type, &uuid); hdr->hdr_crc_table = htole32(crc32(gd->tbl->map_data, le32toh(hdr->hdr_entries) * le32toh(hdr->hdr_entsz))); hdr->hdr_crc_self = 0; hdr->hdr_crc_self = htole32(crc32(hdr, le32toh(hdr->hdr_size))); gpt_write(gd, gd->gpt); gpt_write(gd, gd->tbl); hdr = gd->tpg->map_data; ent = (void*)((char*)gd->lbt->map_data + i * le32toh(hdr->hdr_entsz)); /* Remove the secondary entry. */ uuid_enc_le(&ent->ent_type, &uuid); hdr->hdr_crc_table = htole32(crc32(gd->lbt->map_data, le32toh(hdr->hdr_entries) * le32toh(hdr->hdr_entsz))); hdr->hdr_crc_self = 0; hdr->hdr_crc_self = htole32(crc32(hdr, le32toh(hdr->hdr_size))); gpt_write(gd, gd->lbt); gpt_write(gd, gd->tpg); gpt_status(gd, m->map_index, "removed"); } }
static void service_change_handler(BTDevice device, const BLEService services[], uint8_t num_services, BTErrno status) { // out with the old... node_ctx.node_service_1_characteristic = BLE_CHARACTERISTIC_INVALID; node_ctx.node_command_characteristic = BLE_CHARACTERISTIC_INVALID; node_ctx.node_service_4_characteristic = BLE_CHARACTERISTIC_INVALID; node_ctx.node_service_6_characteristic = BLE_CHARACTERISTIC_INVALID; for (uint8_t i = 0; i < num_services; ++i) { Uuid service_uuid = ble_service_get_uuid(services[i]); const Uuid node_service_uuid = UuidMake(0xda, 0x2b, 0x84, 0xf1, 0x62, 0x79, 0x48, 0xde, 0xbd, 0xc0, 0xaf, 0xbe, 0xa0, 0x22, 0x60, 0x79); if (!uuid_equal(&service_uuid, &node_service_uuid)) { // Not the Bean's "Scratch Service" continue; } char uuid_buffer[UUID_STRING_BUFFER_LENGTH]; uuid_to_string(&service_uuid, uuid_buffer); const BTDeviceAddress address = bt_device_get_address(device); APP_LOG(APP_LOG_LEVEL_INFO, "Discovered Node+ service %s (0x%08x) on " BT_DEVICE_ADDRESS_FMT, uuid_buffer, services[i], BT_DEVICE_ADDRESS_XPLODE(address)); // Iterate over the characteristics within the "Scratch Service": BLECharacteristic characteristics[6]; uint8_t num_characteristics = ble_service_get_characteristics(services[i], characteristics, 8); if (num_characteristics > 6) { num_characteristics = 6; } for (uint8_t c = 0; c < num_characteristics; ++c) { Uuid characteristic_uuid = ble_characteristic_get_uuid(characteristics[c]); // The characteristic UUIDs we're looking for: const Uuid node_service_1_uuid = UuidMake(0xa8, 0x79, 0x88, 0xb9, 0x69, 0x4c, 0x47, 0x9c, 0x90, 0x0e, 0x95, 0xdf, 0xa6, 0xc0, 0x0a, 0x24); const Uuid node_command_uuid = UuidMake(0xbf, 0x03, 0x26, 0x0c, 0x72, 0x05, 0x4c, 0x25, 0xaf, 0x43, 0x93, 0xb1, 0xc2, 0x99, 0xd1, 0x59); const Uuid node_service_4_uuid = UuidMake(0x18, 0xcd, 0xa7, 0x84, 0x4b, 0xd3, 0x43, 0x70, 0x85, 0xbb, 0xbf, 0xed, 0x91, 0xec, 0x86, 0xaf); const Uuid node_service_6_uuid = UuidMake(0xfd, 0xd6, 0xb4, 0xd3, 0x04, 0x6d, 0x43, 0x30, 0xbd, 0xec, 0x1f, 0xd0, 0xc9, 0x0c, 0xb4, 0x3b); uint8_t node_num = 0; // Just for logging purposes if (uuid_equal(&characteristic_uuid, &node_service_1_uuid)) { // Found node service 1 node_ctx.node_service_1_characteristic = characteristics[c]; node_num = 1; } else if (uuid_equal(&characteristic_uuid, &node_command_uuid)) { // Found node command node_ctx.node_command_characteristic = characteristics[c]; node_num = 2; } else if (uuid_equal(&characteristic_uuid, &node_service_4_uuid)) { // Found node command node_ctx.node_service_4_characteristic = characteristics[c]; node_num = 3; } else if (uuid_equal(&characteristic_uuid, &node_service_6_uuid)) { // Found node command node_ctx.node_service_6_characteristic = characteristics[c]; node_num = 4; } else { continue; } uuid_to_string(&characteristic_uuid, uuid_buffer); APP_LOG(APP_LOG_LEVEL_INFO, "-- Found %u: %s (0x%08x)", node_num, uuid_buffer, characteristics[c]); // Check if all characteristics are found if (node_ctx.node_service_1_characteristic != BLE_CHARACTERISTIC_INVALID && node_ctx.node_command_characteristic != BLE_CHARACTERISTIC_INVALID && node_ctx.node_service_4_characteristic != BLE_CHARACTERISTIC_INVALID && node_ctx.node_service_6_characteristic != BLE_CHARACTERISTIC_INVALID) { ready(); } } } }
static void rem(int fd) { uuid_t uuid; map_t *gpt, *tpg; map_t *tbl, *lbt; map_t *m; struct gpt_hdr *hdr; struct gpt_ent *ent; unsigned int i; gpt = map_find(MAP_TYPE_PRI_GPT_HDR); if (gpt == NULL) { warnx("%s: error: no primary GPT header; run create or recover", device_name); return; } tpg = map_find(MAP_TYPE_SEC_GPT_HDR); if (tpg == NULL) { warnx("%s: error: no secondary GPT header; run recover", device_name); return; } tbl = map_find(MAP_TYPE_PRI_GPT_TBL); lbt = map_find(MAP_TYPE_SEC_GPT_TBL); if (tbl == NULL || lbt == NULL) { warnx("%s: error: run recover -- trust me", device_name); return; } /* Remove all matching entries in the map. */ for (m = map_first(); m != NULL; m = m->map_next) { if (m->map_type != MAP_TYPE_GPT_PART || m->map_index == NOENTRY) continue; if (entry != NOENTRY && entry != m->map_index) continue; if (block > 0 && block != m->map_start) continue; if (size > 0 && size != m->map_size) continue; i = m->map_index; hdr = gpt->map_data; ent = (void*)((char*)tbl->map_data + i * le32toh(hdr->hdr_entsz)); le_uuid_dec(&ent->ent_type, &uuid); if (!uuid_is_nil(&type, NULL) && !uuid_equal(&type, &uuid, NULL)) continue; /* Remove the primary entry by clearing the partition type. */ uuid_create_nil(&ent->ent_type, NULL); hdr->hdr_crc_table = htole32(crc32(tbl->map_data, le32toh(hdr->hdr_entries) * le32toh(hdr->hdr_entsz))); hdr->hdr_crc_self = 0; hdr->hdr_crc_self = htole32(crc32(hdr, le32toh(hdr->hdr_size))); gpt_write(fd, gpt); gpt_write(fd, tbl); hdr = tpg->map_data; ent = (void*)((char*)lbt->map_data + i * le32toh(hdr->hdr_entsz)); /* Remove the secundary entry. */ uuid_create_nil(&ent->ent_type, NULL); hdr->hdr_crc_table = htole32(crc32(lbt->map_data, le32toh(hdr->hdr_entries) * le32toh(hdr->hdr_entsz))); hdr->hdr_crc_self = 0; hdr->hdr_crc_self = htole32(crc32(hdr, le32toh(hdr->hdr_size))); gpt_write(fd, lbt); gpt_write(fd, tpg); printf("%ss%u removed\n", device_name, m->map_index); } }
DECLARE_TEST( uuid, generate ) { int iloop; uuid_t uuid, uuid_ref; char name_str[] = "com.rampantpixels.foundation.uuid.000000"; uuid = uuid_null(); uuid_ref = uuid_null(); EXPECT_TRUE( uuid_is_null( uuid ) ); EXPECT_TRUE( uuid_is_null( uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid, uuid_ref ) ); //Random based uuid = uuid_generate_random(); uuid_ref = uuid_null(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_TRUE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); uuid = uuid_generate_random(); uuid_ref = uuid_generate_random(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); uuid = uuid_ref; EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid, uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); for( iloop = 0; iloop < 64000; ++iloop ) { uuid_ref = uuid; uuid = uuid_generate_random(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); } //Time based uuid = uuid_generate_time(); uuid_ref = uuid_null(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_TRUE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); uuid = uuid_generate_time(); uuid_ref = uuid_generate_time(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); uuid = uuid_ref; EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid, uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); for( iloop = 0; iloop < 64000; ++iloop ) { uuid_ref = uuid; uuid = uuid_generate_time(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); } //Name based uuid = uuid_generate_name( UUID_DNS, "com.rampantpixels.foundation.uuid" ); uuid_ref = uuid_null(); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_TRUE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); uuid = uuid_generate_name( UUID_DNS, "com.rampantpixels.foundation.uuid.1" ); uuid_ref = uuid_generate_name( UUID_DNS, "com.rampantpixels.foundation.uuid.2" ); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); uuid = uuid_generate_name( UUID_DNS, "com.rampantpixels.foundation.uuid.2" ); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid, uuid_ref ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); for( iloop = 0; iloop < 10000; ++iloop ) { string_format_buffer( name_str, 40, "com.rampantpixels.foundation.uuid.%05u", iloop ); uuid_ref = uuid; uuid = uuid_generate_name( UUID_DNS, name_str ); EXPECT_FALSE( uuid_is_null( uuid ) ); EXPECT_FALSE( uuid_is_null( uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid, uuid_ref ) ); EXPECT_FALSE( uuid_equal( uuid_ref, uuid ) ); EXPECT_TRUE( uuid_equal( uuid, uuid ) ); EXPECT_TRUE( uuid_equal( uuid_ref, uuid_ref ) ); } return 0; }