/** * Command to register a real device in a group. * It must be called after group_begin() and before * group_create() or group_start(). * * @param[in] params The parsed command array * * The real parameters passed in the array are: * - UUID of the group in which the real device has to be added * - UUID of the real device in the VRT * - UUID of the real device in the NBD * - Whether the disk is local or not * - Whether the disk is UP or not * - Whether the device properties must be loaded from disk * * @return EXA_SUCCESS on success, negative error code on failure. */ static int vrt_cmd_group_add_rdev(const struct VrtGroupAddRdev *cmd) { vrt_rdev_info_t *rdev_info; os_thread_mutex_lock(&pending_group_lock); if (!pending_group) { os_thread_mutex_unlock(&pending_group_lock); return -EPERM; } if (!uuid_is_equal(&cmd->group_uuid, &pending_group->uuid)) { exalog_error("Failed to edit group " UUID_FMT ", group " UUID_FMT " is already being edited.", UUID_VAL(&cmd->group_uuid), UUID_VAL(&pending_group->uuid)); os_thread_mutex_unlock(&pending_group_lock); return -EAGAIN; } if (pending_group->nb_rdevs == NBMAX_DISKS_PER_GROUP) { os_thread_mutex_unlock(&pending_group_lock); return -EAGAIN; } rdev_info = &pending_group->rdevs[pending_group->nb_rdevs]; uuid_copy(&rdev_info->uuid, &cmd->uuid); uuid_copy(&rdev_info->nbd_uuid, &cmd->nbd_uuid); rdev_info->node_id = cmd->node_id; rdev_info->spof_id = cmd->spof_id; rdev_info->local = cmd->local; rdev_info->up = cmd->up; pending_group->nb_rdevs++; os_thread_mutex_unlock(&pending_group_lock); return EXA_SUCCESS; }
int32_t ida_fd_to_loc(ida_local_t * local, loc_t * loc, fd_t * fd) { int32_t error; error = inode_path(fd->inode, NULL, (char **)&loc->path); if (unlikely(error < 0)) { return -error; } loc->name = strrchr(loc->path, '/'); if (loc->name != NULL) { loc->name++; } error = ida_inode_assign(local, &loc->inode, fd->inode); if (unlikely(error != 0)) { goto failed; } if (!uuid_is_null(loc->inode->gfid)) { uuid_copy(loc->gfid, loc->inode->gfid); } loc->parent = inode_parent(loc->inode, 0, NULL); if (loc->parent != NULL) { if (!uuid_is_null(loc->parent->gfid)) { uuid_copy(loc->pargfid, loc->parent->gfid); } } return 0; failed: GF_FREE((char *)loc->path); return ENOMEM; }
static int newfs_create(const char *path, mode_t mode, struct fuse_file_info *fi){ char message[1024]; sprintf(message, "newfs_create: path = %s\n", path); write_log_fuse(message); /* split the path up into its parts */ char tokens[1024][NAME_MAX]; int count; tokenize_path(tokens, path, &count); /* traverse the file system looking for the directory above our target file */ fcb parent; uuid_t parent_key; int rec = get_fcb_from_tokens(&parent, &parent_key, tokens, count - 1); if(rec != 0){ if(rec == -ENOENT) write_log_fuse("newfs_create: failure ENOENT\n"); if(rec == -ENOTDIR) write_log_fuse("newfs_create: failure ENOTDIR\n"); return rec; } if(!parent.dir){ write_log_fuse("newfs_create: failure ENOTDIR\n"); return -ENOTDIR; } /* get the directory data and look for the target */ dir_entry *dirs = malloc(sizeof(dir_entry) * parent.num_files); fetch_dirdata(dirs, parent.datakey, parent.num_files); int found=0, i=0; for(i = 0; i != parent.num_files; i++){ if(strcmp(dirs[i].name, tokens[count-1]) == 0){ found++; } } if(found){ write_log_fuse("newfs_create: failure EEXIST\n"); return -EEXIST; } /* if all is well, create a new file and store the updated fcb */ uuid_t child_key; uuid_generate(child_key); init_file_fcb(child_key); parent.num_files++; dirs = realloc(dirs, sizeof(dir_entry) * parent.num_files); strcpy(dirs[parent.num_files-1].name, tokens[count-1]); uuid_copy(dirs[parent.num_files-1].key, child_key); store_dirdata(dirs, parent.datakey, parent.num_files); parent.size = sizeof(dir_entry) * parent.num_files; store_fcb(parent, parent_key); write_log_fuse("newfs_create: success\n"); return 0; }
static void bearerbox_to_sql(void *arg) { Boxc *conn = (Boxc *)arg; Msg *msg, *mack; while (sqlbox_status == SQL_RUNNING && conn->alive) { msg = read_from_box(conn->bearerbox_connection, conn); if (msg == NULL) { /* garbage/connection lost */ /* tell sqlbox to die */ conn->alive = 0; sqlbox_status = SQL_SHUTDOWN; debug("sqlbox", 0, "bearerbox_to_sql: connection to bearerbox died."); break; } if (msg_type(msg) == heartbeat) { // todo debug("sqlbox", 0, "bearerbox_to_sql: catch an heartbeat - we are alive"); msg_destroy(msg); continue; } /* if this is an identification message from an smsbox instance */ if (msg_type(msg) == admin && msg->admin.command == cmd_shutdown) { /* tell sqlbox to die */ conn->alive = 0; sqlbox_status = SQL_SHUTDOWN; debug("sqlbox", 0, "bearerbox_to_sql: Bearerbox told us to shutdown."); break; } if (msg_type(msg) == sms) { if (msg->sms.sms_type != report_mo) { if (save_mo) { gw_sql_save_msg(msg, octstr_imm("MO")); } } else { if (save_dlr) { gw_sql_save_msg(msg, octstr_imm("DLR")); } } /* create ack message */ mack = msg_create(ack); mack->ack.nack = ack_success; mack->ack.time = msg->sms.time; uuid_copy(mack->ack.id, msg->sms.id); send_msg(conn->bearerbox_connection, conn, mack); msg_destroy(mack); } msg_destroy(msg); } }
static int cont_query_bcast(crt_context_t ctx, struct cont *cont, const uuid_t pool_hdl, const uuid_t cont_hdl, struct cont_query_out *query_out) { struct cont_tgt_query_in *in; struct cont_tgt_query_out *out; crt_rpc_t *rpc; int rc; D_DEBUG(DF_DSMS, DF_CONT"bcasting pool_hld="DF_UUID" cont_hdl ="DF_UUID"\n", DP_CONT(cont->c_svc->cs_pool_uuid, cont->c_uuid), DP_UUID(pool_hdl), DP_UUID(cont_hdl)); rc = ds_cont_bcast_create(ctx, cont->c_svc, CONT_TGT_QUERY, &rpc); if (rc != 0) D_GOTO(out, rc); in = crt_req_get(rpc); uuid_copy(in->tqi_pool_uuid, pool_hdl); uuid_copy(in->tqi_cont_uuid, cont->c_uuid); out = crt_reply_get(rpc); out->tqo_min_purged_epoch = DAOS_EPOCH_MAX; rc = dss_rpc_send(rpc); if (rc != 0) D_GOTO(out_rpc, rc); out = crt_reply_get(rpc); rc = out->tqo_rc; if (rc != 0) { D_DEBUG(DF_DSMS, DF_CONT": failed to query %d targets\n", DP_CONT(cont->c_svc->cs_pool_uuid, cont->c_uuid), rc); D_GOTO(out_rpc, rc = -DER_IO); } out_rpc: crt_req_decref(rpc); out: return rc; }
static int cont_svc_init(struct cont_svc *svc, const uuid_t pool_uuid, uint64_t id, struct ds_rsvc *rsvc) { int rc; uuid_copy(svc->cs_pool_uuid, pool_uuid); svc->cs_id = id; svc->cs_rsvc = rsvc; rc = ABT_rwlock_create(&svc->cs_lock); if (rc != ABT_SUCCESS) { D_ERROR("failed to create cs_lock: %d\n", rc); D_GOTO(err, rc = dss_abterr2der(rc)); } /* cs_root */ rc = rdb_path_init(&svc->cs_root); if (rc != 0) D_GOTO(err_lock, rc); rc = rdb_path_push(&svc->cs_root, &rdb_path_root_key); if (rc != 0) D_GOTO(err_root, rc); /* cs_conts */ rc = rdb_path_clone(&svc->cs_root, &svc->cs_conts); if (rc != 0) D_GOTO(err_root, rc); rc = rdb_path_push(&svc->cs_conts, &ds_cont_prop_conts); if (rc != 0) D_GOTO(err_conts, rc); /* cs_hdls */ rc = rdb_path_clone(&svc->cs_root, &svc->cs_hdls); if (rc != 0) D_GOTO(err_conts, rc); rc = rdb_path_push(&svc->cs_hdls, &ds_cont_prop_cont_handles); if (rc != 0) D_GOTO(err_hdls, rc); return 0; err_hdls: rdb_path_fini(&svc->cs_hdls); err_conts: rdb_path_fini(&svc->cs_conts); err_root: rdb_path_fini(&svc->cs_root); err_lock: ABT_rwlock_free(&svc->cs_lock); err: return rc; }
void cl_uuid_copy(cl_uuid_t* dst, cl_uuid_t* src) { if (dst == NULL || src == NULL) { cl_log(LOG_ERR, "cl_uuid_copy: " "wrong argument %s is NULL", dst == NULL?"dst":"src"); assert(0); } uuid_copy(dst->uuid, src->uuid); }
static void fill_inv_item_from_opensim(inventory_item *citem, os_inv_item &item) { uuid_copy(citem->item_id, item.item_id); uuid_copy(citem->owner_id, item.owner_id); citem->inv_type = item.inv_type; uuid_copy(citem->folder_id, item.folder_id); uuid_copy(citem->creator_as_uuid, item.creator_as_uuid); citem->perms.next = item.next_perms; citem->perms.current = item.current_perms; citem->perms.base = item.base_perms; citem->perms.everyone = item.everyone_perms; citem->perms.group = item.group_perms; citem->asset_type = item.asset_type; uuid_copy(citem->asset_id, item.asset_id); uuid_copy(citem->group_id, item.group_id); citem->group_owned = item.group_owned; citem->sale_price = item.sale_price; citem->sale_type = item.sale_type; citem->flags = item.flags; citem->creation_date = item.creation_date; xmlFree(item.name); xmlFree(item.description); xmlFree(item.creator_id); }
struct block *new_leaf(struct mtree *mt) { struct block *b = zero_blk(mt->dev, mt->next_block++); struct leaf *leaf = b->buf; uuid_copy(leaf->uuid, mt->uuid); leaf->blknum = b->blknum; leaf->seqnum = mt->seqnum++; leaf->isLeaf = true; leaf->num_recs = 0; leaf->end = BLOCK_SIZE - sizeof(*leaf); return b; }
struct block *new_branch(struct mtree *mt, u64 blknum) { struct block *b = zero_blk(mt->dev, mt->next_block++); struct branch *branch = b->buf; uuid_copy(branch->uuid, mt->uuid); branch->blknum = b->blknum; branch->seqnum = mt->seqnum++; branch->isLeaf = FALSE; branch->num_twigs = 0; branch->twig[0].blknum = blknum; return b; }
static int store_spool_save_ack(Msg *msg, ack_status_t status) { int ret; Msg *nack = msg_create(ack); nack->ack.nack = status; uuid_copy(nack->ack.id, msg->sms.id); nack->ack.time = msg->sms.time; ret = store_spool_save(nack); msg_destroy(nack); return ret; }
struct pg_cache_page_index *create_page_index(uuid_t *id) { struct pg_cache_page_index *page_index; page_index = mallocz(sizeof(*page_index)); page_index->JudyL_array = (Pvoid_t) NULL; uuid_copy(page_index->id, *id); assert(0 == uv_rwlock_init(&page_index->lock)); page_index->oldest_time = INVALID_TIME; page_index->latest_time = INVALID_TIME; return page_index; }
static int cont_destroy_bcast(crt_context_t ctx, struct cont_svc *svc, const uuid_t cont_uuid) { struct cont_tgt_destroy_in *in; struct cont_tgt_destroy_out *out; crt_rpc_t *rpc; int rc; D_DEBUG(DF_DSMS, DF_CONT": bcasting\n", DP_CONT(svc->cs_pool_uuid, cont_uuid)); rc = ds_cont_bcast_create(ctx, svc, CONT_TGT_DESTROY, &rpc); if (rc != 0) D_GOTO(out, rc); in = crt_req_get(rpc); uuid_copy(in->tdi_pool_uuid, svc->cs_pool_uuid); uuid_copy(in->tdi_uuid, cont_uuid); rc = dss_rpc_send(rpc); if (rc != 0) D_GOTO(out_rpc, rc); out = crt_reply_get(rpc); rc = out->tdo_rc; if (rc != 0) { D_ERROR(DF_CONT": failed to destroy %d targets\n", DP_CONT(svc->cs_pool_uuid, cont_uuid), rc); rc = -DER_IO; } out_rpc: crt_req_decref(rpc); out: D_DEBUG(DF_DSMS, DF_CONT": bcasted: %d\n", DP_CONT(svc->cs_pool_uuid, cont_uuid), rc); return rc; }
void xfs_log_dinode_to_disk( struct xfs_log_dinode *from, struct xfs_dinode *to) { to->di_magic = cpu_to_be16(from->di_magic); to->di_mode = cpu_to_be16(from->di_mode); to->di_version = from->di_version; to->di_format = from->di_format; to->di_onlink = 0; to->di_uid = cpu_to_be32(from->di_uid); to->di_gid = cpu_to_be32(from->di_gid); to->di_nlink = cpu_to_be32(from->di_nlink); to->di_projid_lo = cpu_to_be16(from->di_projid_lo); to->di_projid_hi = cpu_to_be16(from->di_projid_hi); memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); to->di_size = cpu_to_be64(from->di_size); to->di_nblocks = cpu_to_be64(from->di_nblocks); to->di_extsize = cpu_to_be32(from->di_extsize); to->di_nextents = cpu_to_be32(from->di_nextents); to->di_anextents = cpu_to_be16(from->di_anextents); to->di_forkoff = from->di_forkoff; to->di_aformat = from->di_aformat; to->di_dmevmask = cpu_to_be32(from->di_dmevmask); to->di_dmstate = cpu_to_be16(from->di_dmstate); to->di_flags = cpu_to_be16(from->di_flags); to->di_gen = cpu_to_be32(from->di_gen); if (from->di_version == 3) { to->di_changecount = cpu_to_be64(from->di_changecount); to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); to->di_flags2 = cpu_to_be64(from->di_flags2); to->di_cowextsize = cpu_to_be32(from->di_cowextsize); to->di_ino = cpu_to_be64(from->di_ino); to->di_lsn = cpu_to_be64(from->di_lsn); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &from->di_uuid); to->di_flushiter = 0; } else { to->di_flushiter = cpu_to_be16(from->di_flushiter); } }
int main() { uuid_t id,id2; char out[36]; uuid_unparse(id,out); debug(DEBUG_TEST,"The object id is:%s",out); uuid_copy(id2,id); char out2[36]; uuid_unparse(id,out2); debug(DEBUG_TEST,"The object id is:%s",out2); print(id); return 0; }
void sendMessage(uuid_t to_node, struct Message message) { struct element *e = malloc(sizeof(struct element)); e->next = NULL; uuid_copy(e->nodeID, to_node); e->message = message; if( root == NULL ) { last = root = e; } else { last->next = e; last = e; } }
static int cont_close(struct rdb_tx *tx, struct ds_pool_hdl *pool_hdl, struct cont *cont, crt_rpc_t *rpc) { struct cont_close_in *in = crt_req_get(rpc); daos_iov_t key; daos_iov_t value; struct container_hdl chdl; struct cont_tgt_close_rec rec; int rc; D_DEBUG(DF_DSMS, DF_CONT": processing rpc %p: hdl="DF_UUID"\n", DP_CONT(pool_hdl->sph_pool->sp_uuid, in->cci_op.ci_uuid), rpc, DP_UUID(in->cci_op.ci_hdl)); /* See if this container handle is already closed. */ daos_iov_set(&key, in->cci_op.ci_hdl, sizeof(uuid_t)); daos_iov_set(&value, &chdl, sizeof(chdl)); rc = rdb_tx_lookup(tx, &cont->c_svc->cs_hdls, &key, &value); if (rc != 0) { if (rc == -DER_NONEXIST) { D_DEBUG(DF_DSMS, DF_CONT": already closed: "DF_UUID"\n", DP_CONT(cont->c_svc->cs_pool->sp_uuid, cont->c_uuid), DP_UUID(in->cci_op.ci_hdl)); rc = 0; } D_GOTO(out, rc); } uuid_copy(rec.tcr_hdl, in->cci_op.ci_hdl); rec.tcr_hce = chdl.ch_hce; D_DEBUG(DF_DSMS, DF_CONT": closing: hdl="DF_UUID" hce="DF_U64"\n", DP_CONT(cont->c_svc->cs_pool_uuid, in->cci_op.ci_uuid), DP_UUID(rec.tcr_hdl), rec.tcr_hce); rc = cont_close_bcast(rpc->cr_ctx, cont->c_svc, &rec, 1 /* nrecs */); if (rc != 0) D_GOTO(out, rc); rc = cont_close_one_hdl(tx, cont->c_svc, rpc->cr_ctx, rec.tcr_hdl); out: D_DEBUG(DF_DSMS, DF_CONT": replying rpc %p: %d\n", DP_CONT(pool_hdl->sph_pool->sp_uuid, in->cci_op.ci_uuid), rpc, rc); return rc; }
void smtpdumper_calldetection(void *dataptr) { BLOCK_META_DATA *mdata = NULL; smtpcapture *smtpcapturedata = (smtpcapture*)dataptr; //printf("SMTPDUMP smtpdumper_calldetection enter\n"); if(!dataptr) { //printf("SMTPDUMP dataptr is NULL!\n"); return; } if(smtpcapturedata->clientdata) { // printf("SMTPDUMP Calling sendData() with the following data (%d bytes):\n\n", ((smtpcapture*)(dataptr))->storedsize); #ifdef DISPLAY_DEMO_OUTPUT prettyprint(smtpcapturedata->clientdata, smtpcapturedata->storedsize); printf("\n\n"); #endif mdata = calloc(1, sizeof(*mdata)); if(mdata == NULL) return; // Fill in the required fields mdata->timestamp = (unsigned int)time(NULL); mdata->data = smtpcapturedata->clientdata; mdata->size = smtpcapturedata->storedsize; // mdata->src_ip = 0x01010101; // mdata->dst_ip = 0x02020202; mdata->ip_proto = 6; mdata->src_port = 25; mdata->dst_port = 8000; uuid_copy(mdata->datatype, MAIL_CAPTURE); rzb_collection.sendData(mdata); } else { //printf("SMTPDUMP dataptr->clientdata is NULL!\n"); } //printf("SMTPDUMP Freeing session data\n"); // Data is freed by sendData; we just need to clear out the rest of the structure. // We can accomplish this by setting clientdata to NULL so we don't do the doublefree smtpcapturedata->clientdata = NULL; smtpdumper_freedata(smtpcapturedata); }
/* Lock and read metadata at specified position */ int vmfs_metadata_lock(vmfs_fs_t *fs,off_t pos,u_char *buf,size_t buf_len, vmfs_metadata_hdr_t *mdh) { /* Acquire heartbeat */ if (vmfs_heartbeat_acquire(fs) == -1) return(-1); /* Reserve volume */ if (vmfs_device_reserve(fs->dev,pos) == -1) { fprintf(stderr,"VMFS: unable to reserve volume.\n"); goto err_reserve; } /* Read the complete metadata for the caller */ if (vmfs_device_read(fs->dev,pos,buf,buf_len) != buf_len) { fprintf(stderr,"VMFS: unable to read metadata.\n"); goto err_io; } vmfs_metadata_hdr_read(mdh,buf); if (mdh->hb_lock != 0) goto err_io; /* Update metadata information */ mdh->obj_seq++; mdh->hb_lock = 1; mdh->hb_pos = fs->hb.pos; mdh->hb_seq = fs->hb_seq; uuid_copy(mdh->hb_uuid,fs->hb.uuid); vmfs_metadata_hdr_write(mdh,buf); /* Rewrite the metadata header only */ if (vmfs_device_write(fs->dev,pos,buf,VMFS_METADATA_HDR_SIZE) != VMFS_METADATA_HDR_SIZE) { fprintf(stderr,"VMFS: unable to write metadata header.\n"); goto err_io; } vmfs_device_release(fs->dev,pos); return(0); err_io: vmfs_device_release(fs->dev,pos); err_reserve: vmfs_heartbeat_release(fs); return(-1); }
static void who_am_i (kbicho_s *b) { bicho_msg_s m; int rc; m.m_method = BICHO_WHO_AM_I; rc = call_tau(b->kb_key, &m); if (rc) { weprintf("who_am_i failed %d", rc); return; } strlcpy(b->kb_name, m.bi_name, TAU_NAME); uuid_copy(b->kb_id, m.bi_id); PRs(b->kb_name); }
static int repep_add (struct epbase *ep, struct tgtd *tg, char *ubuf) { struct rtentry *rt = rt_cur (ubuf); if (uuid_compare (rt->uuid, get_rep_tgtd (tg)->uuid) ) uuid_copy (get_rep_tgtd (tg)->uuid, rt->uuid); DEBUG_OFF ("ep %d recv req %10.10s from socket %d", ep->eid, ubuf, tg->fd); mutex_lock (&ep->lock); skbuf_head_in (&ep->rcv, ubuf); BUG_ON (ep->rcv.waiters < 0); if (ep->rcv.waiters) condition_broadcast (&ep->cond); mutex_unlock (&ep->lock); return 0; }
static UUIDJobNode *AllocateUUIDJobNode (uuid_t id, ServiceJob *job_p) { UUIDJobNode *node_p = (UUIDJobNode *) AllocMemory (sizeof (UUIDJobNode)); if (node_p) { node_p -> ujn_node.ln_prev_p = NULL; node_p -> ujn_node.ln_next_p = NULL; uuid_copy (node_p -> ujn_id, id); node_p -> ujn_job_p = job_p; } return node_p; }
int dump_bptree_sequential(bptree_session *bps, uuid_t failed_node) { int ksize, vsize, rv; unsigned char k[BPTREE_MAX_VALUE_SIZE], v[BPTREE_MAX_VALUE_SIZE]; char uuid_out[40]; char s1[512]; char path[128]; sprintf(path, "/tmp/%d.out", bps->bpt_id); FILE *fp = fopen(path,"w"); //printf("Dumping bpt_id %d:\n",bps->bpt_id); //fflush(stdout); if (!uuid_is_null(failed_node)) { bps->cursor_node = read_node(bps, failed_node, &rv); if(rv == BPTREE_OP_TAPIOCA_NOT_READY) return rv; bps->cursor_pos = 0; rv = bptree_index_next(bps, k, &ksize, v, &vsize); if (rv != BPTREE_OP_KEY_FOUND) return rv; } else { bptree_index_first(bps, k, &ksize, v, &vsize); } for(rv = 0;;) { bptree_key_value_to_string(bps, k,v,ksize,vsize,s1); uuid_unparse(bpnode_get_id(bps->cursor_node), uuid_out); fprintf(fp, "Node->Cell %s -> %d \t Key: %s \n", uuid_out, bps->cursor_pos, s1); rv = bptree_index_next(bps, k, &ksize, v, &vsize); if (rv != BPTREE_OP_KEY_FOUND) break; } if (rv == BPTREE_OP_EOF) { fprintf(fp, "\n\n"); fflush(stdout); rv = BPTREE_OP_SUCCESS; } else if (rv == BPTREE_OP_TAPIOCA_NOT_READY) { uuid_copy(failed_node, bpnode_get_id(bps->cursor_node)); //uuid_copy(failed_node, bps->cursor_node->self_key); } return rv; fflush(fp); fclose(fp); }
void inspectFile (char * fileName, uuid_t uuid) { void * threadData = NULL; if (threadInit != NULL) { if (!threadInit(&threadData)) { rzb_log(LOG_ERR, "Couldn't run nugget inspection threadInit."); exit(-1); } else { rzb_log(LOG_DEBUG, "Thread init for nugget complete."); } } sleep(sleepTime); struct EventId * eventId; eventId = calloc(1,sizeof(struct EventId)); struct Block * block = Block_Create (); struct List * list = NTLVList_Create(); struct stat st; stat(fileName, &st); //BlockPool_Init(); block->pId->iLength = st.st_size; if (!Transfer_Prepare_File(block, fileName, false)) { rzb_log(LOG_ERR, "Trouble preparing file transfer - '%s'", fileName); Block_Destroy(block); free(eventId); List_Destroy(list); return; } Hash_Update(block->pId->pHash, block->data.pointer, block->pId->iLength); Hash_Finalize(block->pId->pHash); uuid_copy(block->pId->uuidDataType,uuid); struct ContextList * current = NULL; while (contextList != NULL) { current = contextList; uint8_t ret = function (block, eventId, list, threadData); if ( ret >= 0 ) { rzb_log(LOG_NOTICE, "Returned with: %u", ret); } if (current == contextList) break; } List_Destroy(list); /*Don't need to free/destroy as it's done with the judgment. * Was needed previously because of cloning - cloning removed*/ //Block_Destroy(block); //free(eventId); if (threadCleanup != NULL) { threadCleanup(threadData); } }
void initGPTHeader(gpt_header_t *gpt, uint64_t diskSize, gpt_partition_t *partitions) { // https://en.wikipedia.org/wiki/GUID_Partition_Table // little endian format // GPT Signature gpt->signature = 0x5452415020494645; // 1.0 version gpt->revision = 0x00010000; // 512 bytes header size gpt->header_size = UINT32_C(512); // 0'ed revered field gpt->reserved = UINT32_C(0); // primary header lba gpt->current_lba = UINT64_C(1); // backup header lba gpt->backup_lba = UINT64_C(diskSize - 1); // number of partitions gpt->number_partitions = UINT32_C(2); // first/last usable lba's gpt->first_usable_lba = partitions[0].first_lba; gpt->last_usable_lba = partitions[1].last_lba; // first partition entry address gpt->starting_lba = UINT64_C(2); // init CRC to 0 gpt->crc_header = UINT32_C(0); gpt->crc_partition = UINT32_C(0); // create unique id for the disk uuid_t uuid; uuid_generate(uuid); uuid_copy(gpt->disk_guid, uuid); // today' size of each partition entry gpt->size_partition_entries = UINT32_C(128); // 0'ed last bytes... memset(gpt->reserved2, '\0', 420); // calculate the CRC32 of partitions entries gpt->crc_partition = crc32(0, (unsigned char*)partitions, gpt->number_partitions * gpt->size_partition_entries); // calculate the CRC32 of this header gpt->header_size = sizeof(*gpt); gpt->crc_header = crc32(0, (unsigned char*)gpt, gpt->header_size); }
int nilfs_cleaner_reload(struct nilfs_cleaner *cleaner, const char *conffile) { struct nilfs_cleaner_request_with_path req; struct nilfs_cleaner_response res; size_t pathlen, reqsz; int bytes, ret = -1; if (cleaner->sendq < 0 || cleaner->recvq < 0) { errno = EBADF; goto out; } if (nilfs_cleaner_clear_queueu(cleaner) < 0) goto out; if (conffile) { if (myrealpath(conffile, req.pathname, NILFS_CLEANER_MSG_MAX_PATH) == NULL) goto out; pathlen = strlen(req.pathname); req.hdr.argsize = pathlen + 1; reqsz = sizeof(req.hdr) + pathlen + 1; } else { req.hdr.argsize = 0; reqsz = sizeof(req.hdr); } req.hdr.cmd = NILFS_CLEANER_CMD_RELOAD; uuid_copy(req.hdr.client_uuid, cleaner->client_uuid); ret = mq_send(cleaner->sendq, (char *)&req, reqsz, NILFS_CLEANER_PRIO_NORMAL); if (ret < 0) goto out; bytes = mq_receive(cleaner->recvq, (char *)&res, sizeof(res), NULL); if (bytes < sizeof(res)) { if (bytes >= 0) errno = EIO; ret = -1; goto out; } if (res.result == NILFS_CLEANER_RSP_NACK) { ret = -1; errno = res.err; } out: return ret; }
static void read_uuid(BANG_peer *self,uuid_t uuid) { uuid_t *uuid_ptr = (uuid_t*) read_message(self,sizeof(uuid_t)); if (uuid_ptr) { uuid_copy(uuid,*uuid_ptr); #ifdef BDEBUG_1 char unparsed[37]; uuid_unparse(uuid,unparsed); fprintf(stderr,"READ-THREAD:\tRead uuid %s.\n",unparsed); #endif free(uuid_ptr); } else { uuid_clear(uuid); } }
assembly_volume_t *assembly_volume_alloc(const exa_uuid_t *uuid) { assembly_volume_t *av; av = os_malloc(sizeof(assembly_volume_t)); if (av == NULL) return NULL; uuid_copy(&av->uuid, uuid); av->slots = NULL; av->total_slots_count = 0; av->next = NULL; return av; }
void xfs_dinode_from_disk( xfs_icdinode_t *to, xfs_dinode_t *from) { to->di_magic = be16_to_cpu(from->di_magic); to->di_mode = be16_to_cpu(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = be16_to_cpu(from->di_onlink); to->di_uid = be32_to_cpu(from->di_uid); to->di_gid = be32_to_cpu(from->di_gid); to->di_nlink = be32_to_cpu(from->di_nlink); to->di_projid_lo = be16_to_cpu(from->di_projid_lo); to->di_projid_hi = be16_to_cpu(from->di_projid_hi); memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); to->di_flushiter = be16_to_cpu(from->di_flushiter); to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); to->di_size = be64_to_cpu(from->di_size); to->di_nblocks = be64_to_cpu(from->di_nblocks); to->di_extsize = be32_to_cpu(from->di_extsize); to->di_nextents = be32_to_cpu(from->di_nextents); to->di_anextents = be16_to_cpu(from->di_anextents); to->di_forkoff = from->di_forkoff; to->di_aformat = from->di_aformat; to->di_dmevmask = be32_to_cpu(from->di_dmevmask); to->di_dmstate = be16_to_cpu(from->di_dmstate); to->di_flags = be16_to_cpu(from->di_flags); to->di_gen = be32_to_cpu(from->di_gen); if (to->di_version == 3) { to->di_changecount = be64_to_cpu(from->di_changecount); to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec); to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec); to->di_flags2 = be64_to_cpu(from->di_flags2); to->di_ino = be64_to_cpu(from->di_ino); to->di_lsn = be64_to_cpu(from->di_lsn); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &from->di_uuid); } }
int clientd_stat_get(ExamsgHandle h, const struct nbd_stats_request *request, struct nbd_stats_reply *reply) { nbd_request_t req; req.event = NBDCMD_STATS; /* FIXME the only field usefull seems to be reset... */ req.stats_reset = request->reset; strlcpy(req.node_name, request->node_name, sizeof(req.node_name)); strlcpy(req.device_path, request->disk_path, sizeof(req.device_path)); uuid_copy(&req.device_uuid, &request->device_uuid); return admwrk_daemon_query_nointr(h, EXAMSG_NBD_CLIENT_ID, EXAMSG_DAEMON_RQST, &req, sizeof(req), reply, sizeof(*reply)); }