bool pmem_reserve(phys_addr_t addr, size_t length) { register phys_addr_t top, cur; register bool checkPass = true; if(ALIGN_RST(addr, PMEM_PAGESIZE) != 0) { fatal("misaligned physical address!\n"); } debug("try to reserve physical memory at %p (length: %d bytes)\n", addr, length); spl_lock(&pmem_lock); next_pass: top = (addr + length); cur = addr; while(top > cur) { pmem_region_t* reg = pmem_region_head; while(reg) { while(pmem_reg_contains(reg, cur)) { register size_t idx = PMEM_TO_REGBIT(reg, cur); if(checkPass) { if(bmap_get(reg->bmap, idx)) { spl_unlock(&pmem_lock); debug("failed to reserve region, %p already reserved\n", cur); return false; } } else { bmap_set(reg->bmap, idx, 1); } cur += PMEM_PAGESIZE; if(top <= cur) goto pass_ok; } reg = reg->next; } cur += PMEM_PAGESIZE; } pass_ok: if(checkPass) { checkPass = false; goto next_pass; } spl_unlock(&pmem_lock); return true; }
static int find_shortest_path (Bitmap *bitmap, GraphPath **path_r) /* current optimal path */ { GraphPath *path = *path_r; GraphPath *found_path = NULL; int cur_path_size = path->num_edges; int j,i; size_t num_p; int status = 1; int two_pass_mode = (cur_path_size < num_nodes); GraphNode **idc = alloca ( (path->num_edges + 1)*sizeof (GraphNode*) ); path_transform_to_reverse_nodes_aray (path, &idc, &num_p); _loop_enter: for (j = 0 ; j < num_p ;j++) { GraphNode *curnode = idc[j]; for (i = 0; i < curnode->num_edges; i++) { GraphEdge *curedge = curnode->edges[i]; GraphNode *node_to = curedge->node_ptr; if (path->total_weight + curedge->weight >= _found_min){ #ifdef DEBUG _stat_cut_by_weight++; #endif continue; //don't go this way } if (two_pass_mode && status && bmap_get_in (bitmap, node_to->arr_idx)){ continue; } if (two_pass_mode && !status && !bmap_get_in (bitmap, node_to->arr_idx)){ continue; } if (bmap_get (bitmap, curnode->arr_idx, node_to->arr_idx)){ #ifdef DEBUG _stat_cut_by_presence++; #endif continue; //already passed by this edge } _stop_cnt++; if (_stop_cnt > MAXIMUM_SEARCH_ITERATION){ //up until third level if (cur_path_size < _update_path){ _update_path -= 2; // printf ("max path update: %d\n", _update_path); _stop_cnt = 0; } goto _loop_exit; } #ifdef DEBUG _stat_evaluated++; #endif void *bitmap_n = bmap_clone (bitmap); // printf ("before(%d,%d): ",i,j); // path_print_short (path); path_add (path, curedge); bmap_set (bitmap_n, curnode->arr_idx, node_to->arr_idx); // printf ("after(%d,%d) : ",i,j); // path_print_short (path); int path_found = bmap_check_full (bitmap_n) && path_check_valid (path); GraphPath *hpath = NULL; if (path_found == 0){ //try to check solution in hash table hpath = bmap_hash_path_search_optimal (bitmap_n); if (hpath == NULL){ if (path->num_edges <= _depth ) path_found = find_shortest_path (bitmap_n, &path); }else { #ifdef DEBUG _stat_found_hash++; #endif if (hpath != BMAP_HASH_PATH_EMPTY ){ hpath = path_clone (hpath); //rearrange path ensure_order_paths (path, hpath, cur_path_size); path_free (path); path = hpath; path_found = 1; }else{ path_found = 0; } } } if (path_found == 1 && (found_path == NULL || path->total_weight < found_path->total_weight)) { path_free (found_path); found_path = path_clone (path); if (hpath == NULL && found_path->num_edges - cur_path_size > 1) bmap_hash_path_add_optimal (bitmap_n, found_path); }else{ if (hpath == NULL && cur_path_size < num_nodes + 2) bmap_hash_path_add_optimal (bitmap_n, NULL); } path_shrink_to_size (path, cur_path_size); bmap_free (bitmap_n); } } if(two_pass_mode && status && cur_path_size < num_nodes){ status = 0; goto _loop_enter; } _loop_exit: if (found_path != NULL){ _update_path = found_path->num_edges; // printf ("FOUND: "); // path_print_short (found_path); path_free (path); if (found_path->total_weight < _found_min){ _stop_cnt = 0; #ifdef DEBUG printf (" update max: %lu -> %lu (weight:%lu, presence:%lu, eval:%lu, hash:%lu)\n", _found_min, found_path->total_weight, _stat_cut_by_weight, _stat_cut_by_presence, _stat_evaluated, _stat_found_hash); #endif _found_min = found_path->total_weight; } *path_r = found_path; return 1; } return 0; }
int mds_repl_delrq(const struct sl_fidgen *fgp, sl_bmapno_t bmapno, sl_bmapno_t *nbmaps, sl_replica_t *iosv, int nios) { int tract[NBREPLST], rc, iosidx[SL_MAX_REPLICAS], flags; sl_bmapno_t nbmaps_processed = 0; struct slm_repl_valid replv; struct fidc_membh *f = NULL; struct bmap *b; if (nios < 1 || nios > SL_MAX_REPLICAS || *nbmaps == 0) return (-EINVAL); rc = slm_fcmh_get(fgp, &f); if (rc) return (-rc); FCMH_LOCK(f); if (fcmh_isdir(f)) flags = IOSV_LOOKUPF_DEL; else flags = IOSV_LOOKUPF_LOOKUP; /* Find replica IOS indexes. */ rc = -_mds_repl_iosv_lookup(current_vfsid, fcmh_2_inoh(f), iosv, iosidx, nios, flags); if (fcmh_isdir(f) || rc) PFL_GOTOERR(out, rc); replv.nios = nios; replv.idx = iosidx; brepls_init(tract, -1); tract[BREPLST_REPL_QUEUED] = BREPLST_GARBAGE_QUEUED; tract[BREPLST_REPL_SCHED] = BREPLST_GARBAGE_QUEUED; tract[BREPLST_VALID] = BREPLST_GARBAGE_QUEUED; /* Wildcards shouldn't result in errors on zero-length files. */ if (*nbmaps != (sl_bmapno_t)-1) rc = -SLERR_BMAP_INVALID; /* * The following loop will bail out on the very first error. * However, its previous action, if any, has already taken * effect. */ for (; *nbmaps && bmapno < fcmh_nvalidbmaps(f); bmapno++, --*nbmaps, nbmaps_processed++) { if (nbmaps_processed >= SLM_REPLRQ_NBMAPS_MAX) PFL_GOTOERR(out, rc = -PFLERR_WOULDBLOCK); rc = -bmap_get(f, bmapno, SL_WRITE, &b); if (rc) PFL_GOTOERR(out, rc); /* * Before blindly doing the transition, we have to check * to ensure this operation would retain at least one * valid replica. */ replv.n = 0; mds_repl_bmap_walkcb(b, NULL, NULL, 0, slm_repl_countvalid_cb, &replv); flags = 0; if (replv.n == 0) rc = -SLERR_LASTREPL; else { rc = _mds_repl_bmap_walk(b, tract, NULL, 0, iosidx, nios, slm_repl_delrq_cb, &flags); psc_assert(!rc); if (flags & FLAG_DIRTY) rc = mds_bmap_write_logrepls(b); } bmap_op_done_type(b, BMAP_OPCNT_LOOKUP); if (rc) PFL_GOTOERR(out, rc); } out: if (f) fcmh_op_done(f); *nbmaps = nbmaps_processed; return (rc); }
/* * Handle a request to do replication from a client. May also * reinitialize some parameters of the replication, such as priority, if * the request already exists in the system. */ int mds_repl_addrq(const struct sl_fidgen *fgp, sl_bmapno_t bmapno, sl_bmapno_t *nbmaps, sl_replica_t *iosv, int nios, int sys_prio, int usr_prio) { int tract[NBREPLST], ret_hasvalid[NBREPLST]; int iosidx[SL_MAX_REPLICAS], rc, flags; sl_bmapno_t nbmaps_processed = 0; struct fidc_membh *f = NULL; struct bmap *b; /* Perform sanity checks on request. */ if (nios < 1 || nios > SL_MAX_REPLICAS || *nbmaps == 0) return (-EINVAL); rc = slm_fcmh_get(fgp, &f); if (rc) return (-rc); if (!fcmh_isdir(f) && !fcmh_isreg(f)) PFL_GOTOERR(out, rc = -PFLERR_NOTSUP); /* Lookup replica(s)' indexes in our replica table. */ rc = -mds_repl_iosv_lookup_add(current_vfsid, fcmh_2_inoh(f), iosv, iosidx, nios); if (rc) PFL_GOTOERR(out, rc); /* * If we are modifying a directory, we are done as just the * replica table needs to be updated. */ if (fcmh_isdir(f)) PFL_GOTOERR(out, 0); /* * Setup structure to ensure at least one VALID replica exists. */ brepls_init(ret_hasvalid, 0); ret_hasvalid[BREPLST_VALID] = 1; /* * Setup transitions to enqueue a replication. */ brepls_init(tract, -1); tract[BREPLST_INVALID] = BREPLST_REPL_QUEUED; tract[BREPLST_GARBAGE_SCHED] = BREPLST_REPL_QUEUED; tract[BREPLST_GARBAGE_QUEUED] = BREPLST_REPL_QUEUED; /* Wildcards shouldn't result in errors on zero-length files. */ if (*nbmaps != (sl_bmapno_t)-1) rc = -SLERR_BMAP_INVALID; for (; *nbmaps && bmapno < fcmh_nvalidbmaps(f); bmapno++, --*nbmaps, nbmaps_processed++) { if (nbmaps_processed >= SLM_REPLRQ_NBMAPS_MAX) { rc = -PFLERR_WOULDBLOCK; break; } rc = -bmap_get(f, bmapno, SL_WRITE, &b); if (rc) PFL_GOTOERR(out, rc); /* * If no VALID replicas exist, the bmap must be * uninitialized/all zeroes; skip it. */ if (mds_repl_bmap_walk_all(b, NULL, ret_hasvalid, REPL_WALKF_SCIRCUIT) == 0) { bmap_op_done(b); continue; } /* * We do not follow the standard "retifset" API here * because we need to preserve DIRTY if it gets set * instead of some other state getting returned. */ flags = 0; _mds_repl_bmap_walk(b, tract, NULL, 0, iosidx, nios, slm_repl_addrq_cb, &flags); /* both default to -1 in parse_replrq() */ bmap_2_bmi(b)->bmi_sys_prio = sys_prio; bmap_2_bmi(b)->bmi_usr_prio = usr_prio; if (flags & FLAG_DIRTY) mds_bmap_write_logrepls(b); else if (sys_prio != -1 || usr_prio != -1) slm_repl_upd_write(b, 0); bmap_op_done_type(b, BMAP_OPCNT_LOOKUP); if (flags & FLAG_REPLICA_STATE_INVALID) { /* See pfl_register_errno() */ rc = -SLERR_REPLICA_STATE_INVALID; break; } } out: if (f) fcmh_op_done(f); *nbmaps = nbmaps_processed; return (rc); }