void free_cstring(char * str) { enif_free(str); }
static void gmperl_free(void *ptr, size_t size) { return enif_free(ptr); }
static ERL_NIF_TERM _send (ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { CAN_handle* handle; unsigned int i = 0, length, total_size = 0; ERL_NIF_TERM result; if (!enif_get_resource(env, argv[0], CAN_handle_type, (void**) &handle)) return enif_make_int(env, -2000); if (!enif_get_list_length(env, argv[1], &length)) return enif_make_int(env, -2001); canmsg_t* buffer = enif_alloc(length * sizeof(canmsg_t)); memset(buffer, 0, length * sizeof(canmsg_t)); ERL_NIF_TERM list = argv[1]; ERL_NIF_TERM head, tail; while (enif_get_list_cell(env, list, &head, &tail)) { canmsg_t* can_msg = &buffer[i++]; int arity; canmsg_id_t target; ErlNifBinary msg; const ERL_NIF_TERM* items; list = tail; if (!enif_get_tuple(env, head, &arity, &items)) { result = enif_make_int(env, -1000); goto end; } if (arity != 2) { result = enif_make_int(env, -1001); goto end; } if (!enif_get_ulong(env, items[0], &target)) { result = enif_make_int(env, -1002); goto end; } if (!enif_inspect_binary(env, items[1], &msg)) { result = enif_make_int(env, -1003); goto end; } if (msg.size > CAN_MSG_LENGTH) { result = enif_make_int(env, -1005); goto end; } can_msg->id = target; memcpy(&can_msg->data[0], msg.data, msg.size); can_msg->length = msg.size; total_size += msg.size; } { int status = write(handle->device, buffer, length * sizeof(canmsg_t)); if (status != length * sizeof(canmsg_t)) status = errno; result = enif_make_tuple2(env, enif_make_int(env, status), enif_make_int(env, total_size)); } end: enif_free(buffer); return result; }
static void destroy_regexp(ErlNifEnv *env, void *obj) { struct regexp *r = obj; xmlRegFreeRegexp(r->xre); enif_free(r->string); }
static void matrix_dtor(ErlNifEnv* env, void* obj) { Matrix* mx = (Matrix*) obj; enif_free(mx->data); mx->data = NULL; }
static void free_func(void *ctx, void *ptr) { enif_free((ErlNifEnv *)ctx, ptr); }
static void unload(ErlNifEnv* env, void* priv) { state_ptr state = (state_ptr) priv; enif_free(state); }
void destroy_parser(Parser* parser) { if(parser->columns != NULL) { enif_free(parser->columns); } }
static void crypto_free(void* ptr) { enif_free(ptr); }
void nif_thread_message_free(nif_thread_message* msg) { enif_free(msg->from_pid); enif_free(msg->args); enif_free(msg); }
/* extern void GEOS_DLL GEOSSTRtree_query(GEOSSTRtree *tree, const GEOSGeometry *g, GEOSQueryCallback callback, void *userdata); GeosSTRtree = lgeo_geos_index:strtree_create(), Element1 = {'LineString', [[4.0,4.0], [4.5, 4.5], [10.0,10.0]]}, Element2 = 17.0, Element3 = ["hola"], Geom = lgeo_geos_index:to_geom(Element1), lgeo_geos_index:strtree_insert(GeosSTRtree, Geom, Element1), lgeo_geos_index:strtree_insert(GeosSTRtree, Geom, Element2), lgeo_geos_index:strtree_insert(GeosSTRtree, Geom, Element3), lgeo_geos_index:strtree_query(GeosSTRtree, Geom). [Element1, Element2, Element3] */ static ERL_NIF_TERM strtree_query(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { GeosSTRtree_t **tree; GEOSGeometry **geom; ERL_NIF_TERM eterm; if (argc != 2) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[0], GEOSSTRTREE_RESOURCE, (void**)&tree)) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[1], GEOSGEOM_RESOURCE, (void**)&geom)) { return enif_make_badarg(env); } int size = 128; ERL_NIF_TERM *arr = (ERL_NIF_TERM *) enif_alloc(sizeof(ERL_NIF_TERM)*size); GeosSTRtree_acc_t acc = {.count=0, .size=size, .elements=arr}; GEOSSTRtree_query((**tree).tree, *geom, geosstrtree_cb, &acc); eterm = enif_make_list_from_array(env, acc.elements, acc.count); enif_free(arr); return eterm; } /* extern void GEOS_DLL GEOSSTRtree_iterate(GEOSSTRtree *tree, GEOSQueryCallback callback, void *userdata); */ static ERL_NIF_TERM strtree_iterate(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { GeosSTRtree_t **tree; ERL_NIF_TERM eterm; if (argc != 1) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[0], GEOSSTRTREE_RESOURCE, (void**)&tree)) { return enif_make_badarg(env); } int size = 128; ERL_NIF_TERM *arr = (ERL_NIF_TERM *) enif_alloc(sizeof(ERL_NIF_TERM)*size); GeosSTRtree_acc_t acc = {.count=0, .size=size, .elements=arr}; GEOSSTRtree_iterate((**tree).tree, geosstrtree_cb, &acc); eterm = enif_make_list_from_array(env, acc.elements, acc.count); enif_free(arr); return eterm; } /* //Removed until know how to compare two terms from diferent environments if possible //insert saves a copy and remove needs that copy to work extern char GEOS_DLL GEOSSTRtree_remove(GEOSSTRtree *tree, const GEOSGeometry *g, void *item); //GeosSTRtree = lgeo_geos_index:strtree_create(), //Ls1 = {'LineString', [[3.0,3.0],[6.0,6.0]]}, //Geom1 = lgeo_geos_index:to_geom(Ls1), //lgeo_geos_index:strtree_insert(GeosSTRtree, Geom1, Ls1), //lgeo_geos_index:strtree_remove(GeosSTRtree, Geom1, Ls1), //Geoms = lgeo_geos_index:strtree_query(GeosSTRtree, Geom1). //[] static ERL_NIF_TERM strtree_remove(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { GeosSTRtree_t **tree; GEOSGeometry **geom; if (argc != 3) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[0], GEOSSTRTREE_RESOURCE, (void**)&tree)) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[1], GEOSGEOM_RESOURCE, (void**)&geom)) { return enif_make_badarg(env); } char remove = \ GEOSSTRtree_remove((**tree).tree, GEOSEnvelope(*geom), (void*)argv[2]); //printf("Rtree remove: %d.\n", remove); if (remove == 0) { return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_atom(env, "false")); } else if (remove == 1) { return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_atom(env, "true")); } else { return enif_make_tuple2(env, enif_make_atom(env, "error"), enif_make_atom(env, "undefined")); } } */ /************************************************************************ * * Erlang-GEOS Translation * ***********************************************************************/ static ERL_NIF_TERM to_geom(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM eterm; GEOSGeometry **geom = \ enif_alloc_resource(GEOSGEOM_RESOURCE, sizeof(GEOSGeometry*)); *geom = eterm_to_geom(env, argv); eterm = enif_make_resource(env, geom); enif_release_resource(geom); return eterm; }
/* the async job thread that handles opening/closing of doors */ void * job_thread(void *arg) { struct zdoor_handle *zhandle; int cont = 1; int res; /* first init the handle */ zhandle = zdoor_handle_init(); enif_mutex_lock(gbl.jlock); while (cont) { struct job *j; while (!gbl.jlist) enif_cond_wait(gbl.jcond, gbl.jlock); j = gbl.jlist; while (j) { gbl.jlist = j->next; enif_mutex_unlock(gbl.jlock); if (j->action == ACT_OPEN) { enif_rwlock_rwlock(gbl.dlock); j->door->next = NULL; if (gbl.dlist != NULL) j->door->next = gbl.dlist; gbl.dlist = j->door; enif_rwlock_rwunlock(gbl.dlock); res = zdoor_open(zhandle, j->door->zonename, j->door->service, j->door, zdoor_cb); ErlNifEnv *env = enif_alloc_env(); ERL_NIF_TERM ret = enif_make_atom(env, "ok"); switch (res) { case ZDOOR_ERROR: ret = enif_make_atom(env, "error"); break; case ZDOOR_NOT_GLOBAL_ZONE: ret = enif_make_atom(env, "not_global"); break; case ZDOOR_ZONE_NOT_RUNNING: ret = enif_make_atom(env, "not_running"); break; case ZDOOR_ZONE_FORBIDDEN: ret = enif_make_atom(env, "eperm"); break; case ZDOOR_ARGS_ERROR: ret = enif_make_atom(env, "badarg"); break; case ZDOOR_OUT_OF_MEMORY: ret = enif_make_atom(env, "enomem"); break; } enif_send(NULL, &j->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor_job"), enif_make_atom(env, "open"), ret)); enif_free_env(env); } else if (j->action == ACT_CLOSE) { enif_rwlock_rwlock(gbl.dlock); enif_rwlock_rwlock(j->door->rlock); if (j->door->rlist) { enif_rwlock_rwunlock(j->door->rlock); enif_rwlock_rwunlock(gbl.dlock); ErlNifEnv *env = enif_alloc_env(); enif_send(NULL, &j->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor_job"), enif_make_atom(env, "close"), enif_make_atom(env, "busy"))); enif_free_env(env); } else { struct door *d = gbl.dlist; if (d == j->door) { gbl.dlist = j->door->next; } else { for (; d; d = d->next) { if (d->next == j->door) break; } if (d) d->next = j->door->next; } enif_rwlock_rwunlock(gbl.dlock); zdoor_close(zhandle, j->door->zonename, j->door->service); door_free(j->door); ErlNifEnv *env = enif_alloc_env(); enif_send(NULL, &j->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor_job"), enif_make_atom(env, "close"), enif_make_atom(env, "ok"))); enif_free_env(env); } } else if (j->action == ACT_QUIT) { cont = 0; } enif_free(j); enif_mutex_lock(gbl.jlock); j = gbl.jlist; } } enif_mutex_unlock(gbl.jlock); zdoor_handle_destroy(zhandle); return NULL; }
ERL_NIF_TERM engine_ctrl_cmd_strings_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {/* (Engine, Commands, Optional) */ #ifdef HAS_ENGINE_SUPPORT ERL_NIF_TERM ret; unsigned int cmds_len = 0; char **cmds = NULL; struct engine_ctx *ctx; unsigned int i; int optional = 0; int cmds_loaded = 0; // Get Engine ASSERT(argc == 3); if (!enif_get_resource(env, argv[0], engine_ctx_rtype, (void**)&ctx)) goto bad_arg; PRINTF_ERR1("Engine Id: %s\r\n", ENGINE_get_id(ctx->engine)); // Get Command List if (!enif_get_list_length(env, argv[1], &cmds_len)) goto bad_arg; if (cmds_len > (UINT_MAX / 2) - 1) goto err; cmds_len *= 2; // Key-Value list from erlang if ((size_t)cmds_len + 1 > SIZE_MAX / sizeof(char*)) goto err; if ((cmds = enif_alloc((cmds_len + 1) * sizeof(char*))) == NULL) goto err; if (get_engine_load_cmd_list(env, argv[1], cmds, 0)) goto err; cmds_loaded = 1; if (!enif_get_int(env, argv[2], &optional)) goto err; for(i = 0; i < cmds_len; i+=2) { PRINTF_ERR2("Cmd: %s:%s\r\n", cmds[i] ? cmds[i] : "(NULL)", cmds[i+1] ? cmds[i+1] : "(NULL)"); if(!ENGINE_ctrl_cmd_string(ctx->engine, cmds[i], cmds[i+1], optional)) { PRINTF_ERR2("Command failed: %s:%s\r\n", cmds[i] ? cmds[i] : "(NULL)", cmds[i+1] ? cmds[i+1] : "(NULL)"); goto cmd_failed; } } ret = atom_ok; goto done; bad_arg: err: ret = enif_make_badarg(env); goto done; cmd_failed: ret = ERROR_Atom(env, "ctrl_cmd_failed"); done: if (cmds_loaded) { for (i = 0; cmds != NULL && cmds[i] != NULL; i++) enif_free(cmds[i]); } if (cmds != NULL) enif_free(cmds); return ret; #else return atom_notsup; #endif }
static void on_unload(ErlNifEnv *env, void *priv_data) { // Free the private data allocated earlier enif_free(priv_data); }
static void release_token(ErlNifEnv* env, token_t* t) { if ((t->type == TOKEN_WORD) || (t->type == TOKEN_STRING)) enif_release_binary(&t->bin); enif_free(t); }
ERL_NIF_TERM make_encoder_resource_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]){ unsigned rs_len, fs_len, bin_sz; enif_get_uint(env, argv[0], &rs_len); enif_get_uint(env, argv[1], &fs_len); enif_get_uint(env, argv[4], &bin_sz); PrivData* priv = (PrivData*)enif_priv_data(env); unsigned resource_sz = enc_resource_size(rs_len, fs_len); EncEntry *enc_entry = (EncEntry*)enif_alloc_resource(priv->encoder_RSTYPE, resource_sz); //memset(enc_entry, 0, resource_sz); enc_entry->records_cnt = rs_len; enc_entry->fields_cnt = fs_len; if(!enif_alloc_binary(bin_sz + 1, &enc_entry->bin)) goto error; //memset(enc_entry->bin.data, 0, bin_sz + 1); ErlNifBinary ebin; enif_inspect_binary(env, argv[5], &ebin); memcpy(enc_entry->bin.data, ebin.data , ebin.size); ERL_NIF_TERM list, head, tail; list = argv[2]; int i = 0; while(enif_get_list_cell(env, list, &head, &tail)){ const ERL_NIF_TERM *tuple; int arity; unsigned ip; enif_get_tuple(env, head, &arity, &tuple); EncRecord *records = enc_records_base(enc_entry); records[i].tag = tuple[0]; enif_get_uint(env, tuple[1], &ip); records[i].fds_offset = ip; enif_get_uint(env, tuple[2], &ip); records[i].arity = ip; i++; list = tail; } list = argv[3]; i = 0; while(enif_get_list_cell(env, list, &head, &tail)){ const ERL_NIF_TERM *tuple; int arity; unsigned ip; enif_get_tuple(env, head, &arity, &tuple); EncField *fields = enc_fields_base(enc_entry); enif_get_uint(env, tuple[0], &ip); fields[i].offset = ip; enif_get_uint(env, tuple[1], &ip); fields[i].size = ip; i++; list = tail; } list = argv[6]; if(!enif_get_list_length(env, list, &(enc_entry->ignored_len))) goto error; enc_entry->ignored = (ERL_NIF_TERM*)enif_alloc(enc_entry->ignored_len*sizeof(ERL_NIF_TERM)); i = 0; while(enif_get_list_cell(env, list, &head, &tail)){ // ignored term should be atoms if(enif_is_atom(env, head)){ enc_entry->ignored[i] = head; }else{ enif_free(enc_entry->ignored); goto error; } i++; list = tail; } ERL_NIF_TERM ret = enif_make_resource(env, (void *)enc_entry); enif_release_resource(enc_entry); return ret; error: enif_release_resource(enc_entry); return enif_make_badarg(env); }
static ERL_NIF_TERM re2_match(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary sdata; if (enif_inspect_iolist_as_binary(env, argv[0], &sdata)) { const re2::StringPiece s((const char*)sdata.data, sdata.size); autohandle<re2::RE2> re; union re2_handle_union handle; ErlNifBinary pdata; matchoptions opts(env); if (argc == 3 && !parse_match_options(env, argv[2], opts)) return enif_make_badarg(env); if (enif_get_resource(env, argv[1], re2_resource_type, &handle.vp) && handle.p->re != NULL) { re.set(handle.p->re, true); if (opts.caseless) // caseless allowed either in compile or match return enif_make_badarg(env); } else if (enif_inspect_iolist_as_binary(env, argv[1], &pdata)) { const re2::StringPiece p((const char*)pdata.data, pdata.size); re2::RE2::Options re2opts; re2opts.set_log_errors(false); if (opts.caseless) re2opts.set_case_sensitive(false); re2::RE2* re2 = (re2::RE2*)enif_alloc(sizeof(re2::RE2)); if (re2 == NULL) return error(env, a_err_enif_alloc); re.set(new (re2) re2::RE2(p, re2opts)); // placement new } else { return enif_make_badarg(env); } if (!re->ok()) return enif_make_badarg(env); int n = re->NumberOfCapturingGroups()+1; std::vector<re2::StringPiece> group; group.reserve(n); if (re->Match(s, opts.offset, s.size(), re2::RE2::UNANCHORED, &group[0], n)) { int start = 0; int arrsz = n; if (opts.vs == matchoptions::VS_NONE) { // return match atom only return a_match; } else if (opts.vs == matchoptions::VS_FIRST) { // return first match only ERL_NIF_TERM first = mres(env, s, group[0], opts.ct); if (enif_is_identical(first, a_err_alloc_binary)) { return error(env, a_err_alloc_binary); } else { return enif_make_tuple2(env, a_match, enif_make_list1(env, first)); } } else if (opts.vs == matchoptions::VS_ALL_BUT_FIRST) { // skip first match start = 1; arrsz--; } if (opts.vs == matchoptions::VS_VLIST) { // return matched subpatterns as specified in ValueList return re2_match_ret_vlist(env, re, s, opts, group, n); } else { // return all or all_but_first matches ERL_NIF_TERM* arr = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM)*n); for(int i = start, arridx=0; i < n; i++,arridx++) { ERL_NIF_TERM res = mres(env, s, group[i], opts.ct); if (enif_is_identical(res, a_err_alloc_binary)) { enif_free(arr); return error(env, a_err_alloc_binary); } else { arr[arridx] = res; } } ERL_NIF_TERM list = enif_make_list_from_array(env,arr,arrsz); enif_free(arr); return enif_make_tuple2(env, a_match, list); } } else { return a_nomatch; } } else { return enif_make_badarg(env); } }
static void unload(ErlNifEnv* env, void* priv) { enif_free(priv); return; }
static ERL_NIF_TERM re2_match_ret_vlist(ErlNifEnv* env, const autohandle<re2::RE2>& re, const re2::StringPiece& s, const matchoptions& opts, std::vector<re2::StringPiece>& group, int n) { std::vector<ERL_NIF_TERM> vec; const std::map<std::string, int>& nmap = re->NamedCapturingGroups(); ERL_NIF_TERM VL,VH,VT; // empty StringPiece for unfound ValueIds const re2::StringPiece empty; for (VL=opts.vlist; enif_get_list_cell(env, VL, &VH, &VT); VL=VT) { int nid = 0; if (enif_get_int(env, VH, &nid) && nid > 0) { // ValueID int() if (nid < n) { const re2::StringPiece match = group[nid]; ERL_NIF_TERM res; if (!match.empty()) res = mres(env, s, group[nid], opts.ct); else res = mres(env, s, empty, opts.ct); if (enif_is_identical(res, a_err_alloc_binary)) return error(env, a_err_alloc_binary); else vec.push_back(res); } else { vec.push_back(mres(env, s, empty, opts.ct)); } } else if (enif_is_atom(env, VH)) { // ValueID atom() unsigned atom_len; char *a_id = alloc_atom(env, VH, &atom_len); if (a_id == NULL) return error(env, a_err_enif_alloc); if (enif_get_atom(env, VH, a_id, atom_len, ERL_NIF_LATIN1) > 0) { std::map<std::string, int>::const_iterator it = nmap.find(a_id); ERL_NIF_TERM res; if (it != nmap.end()) res = mres(env, s, group[it->second], opts.ct); else res = mres(env, s, empty, opts.ct); if (enif_is_identical(res, a_err_alloc_binary)) return error(env, a_err_alloc_binary); else vec.push_back(res); } else { enif_free(a_id); return error(env, a_err_get_atom); } enif_free(a_id); } else { // ValueID string() unsigned str_len; char *str_id = alloc_str(env, VH, &str_len); if (str_id == NULL) return error(env, a_err_enif_alloc); if (enif_get_string(env, VH, str_id, str_len, ERL_NIF_LATIN1) > 0) { std::map<std::string, int>::const_iterator it = nmap.find(str_id); ERL_NIF_TERM res; if (it != nmap.end()) res = mres(env, s, group[it->second], opts.ct); else res = mres(env, s, empty, opts.ct); if (enif_is_identical(res, a_err_alloc_binary)) return error(env, a_err_alloc_binary); else vec.push_back(res); } else { enif_free(str_id); return error(env, a_err_get_string); } enif_free(str_id); } } ERL_NIF_TERM list = enif_make_list_from_array(env,&vec[0],vec.size()); return enif_make_tuple2(env, a_match, list); }
posix_errno_t efile_rename(const efile_path_t *old_path, const efile_path_t *new_path) { BOOL old_is_directory, new_is_directory; DWORD move_flags, last_error; ASSERT_PATH_FORMAT(old_path); ASSERT_PATH_FORMAT(new_path); move_flags = MOVEFILE_COPY_ALLOWED | MOVEFILE_WRITE_THROUGH; if(MoveFileExW((WCHAR*)old_path->data, (WCHAR*)new_path->data, move_flags)) { return 0; } last_error = GetLastError(); old_is_directory = has_file_attributes(old_path, FILE_ATTRIBUTE_DIRECTORY); new_is_directory = has_file_attributes(new_path, FILE_ATTRIBUTE_DIRECTORY); switch(last_error) { case ERROR_SHARING_VIOLATION: case ERROR_ACCESS_DENIED: if(old_is_directory) { BOOL moved_into_itself; moved_into_itself = (old_path->size <= new_path->size) && !_wcsnicmp((WCHAR*)old_path->data, (WCHAR*)new_path->data, PATH_LENGTH(old_path)); if(moved_into_itself) { return EINVAL; } else if(is_path_root(old_path)) { return EINVAL; } /* Renaming a directory across volumes needs to be rewritten as * EXDEV so that the caller can respond by simulating it with * copy/delete operations. * * Files are handled through MOVEFILE_COPY_ALLOWED. */ if(!has_same_mount_point(old_path, new_path)) { return EXDEV; } } break; case ERROR_PATH_NOT_FOUND: case ERROR_FILE_NOT_FOUND: return ENOENT; case ERROR_ALREADY_EXISTS: case ERROR_FILE_EXISTS: if(old_is_directory && !new_is_directory) { return ENOTDIR; } else if(!old_is_directory && new_is_directory) { return EISDIR; } else if(old_is_directory && new_is_directory) { /* This will fail if the destination isn't empty. */ if(RemoveDirectoryW((WCHAR*)new_path->data)) { return efile_rename(old_path, new_path); } return EEXIST; } else if(!old_is_directory && !new_is_directory) { /* This is pretty iffy; the public documentation says that the * operation may EACCES on some systems when either file is open, * which gives us room to use MOVEFILE_REPLACE_EXISTING and be done * with it, but the old implementation simulated Unix semantics and * there's a lot of code that relies on that. * * The simulation renames the destination to a scratch name to get * around the fact that it's impossible to open (and by extension * rename) a file that's been deleted while open. It has a few * drawbacks though; * * 1) It's not atomic as there's a small window where there's no * file at all on the destination path. * 2) It will confuse applications that subscribe to folder * changes. * 3) It will fail if we lack general permission to write in the * same folder. */ WCHAR *swap_path = enif_alloc(new_path->size + sizeof(WCHAR) * 64); if(swap_path == NULL) { return ENOMEM; } else { static LONGLONG unique_counter = 0; WCHAR *swap_path_end; /* We swap in the same folder as the destination to be * reasonably sure that it's on the same volume. Note that * we're avoiding GetTempFileNameW as it will fail on long * paths. */ sys_memcpy(swap_path, (WCHAR*)new_path->data, new_path->size); swap_path_end = swap_path + PATH_LENGTH(new_path); while(!IS_SLASH(*swap_path_end)) { ASSERT(swap_path_end > swap_path); swap_path_end--; } StringCchPrintfW(&swap_path_end[1], 64, L"erl-%lx-%llx.tmp", GetCurrentProcessId(), unique_counter); InterlockedIncrement64(&unique_counter); } if(MoveFileExW((WCHAR*)new_path->data, swap_path, MOVEFILE_REPLACE_EXISTING)) { if(MoveFileExW((WCHAR*)old_path->data, (WCHAR*)new_path->data, move_flags)) { last_error = ERROR_SUCCESS; DeleteFileW(swap_path); } else { last_error = GetLastError(); MoveFileW(swap_path, (WCHAR*)new_path->data); } } else { last_error = GetLastError(); DeleteFileW(swap_path); } enif_free(swap_path); return windows_to_posix_errno(last_error); } return EEXIST; } return windows_to_posix_errno(last_error); }
void queue_destroy(queue_t* queue) { enif_mutex_destroy(queue->mutex); enif_cond_destroy(queue->cond); enif_free(queue); }
static void * cache_bg_thread(void *arg) { struct cache *c = (struct cache *)arg; int i, dud; while (1) { enif_mutex_lock(c->ctrl_lock); /* if we've been told to die, quit this loop and start cleaning up */ if (c->flags & FL_DYING) { enif_mutex_unlock(c->ctrl_lock); break; } /* sleep until there is work to do */ enif_cond_wait(c->check_cond, c->ctrl_lock); __sync_add_and_fetch(&(c->wakeups), 1); dud = 1; /* we have to let go of ctrl_lock so we can take cache_lock then ctrl_lock again to get them back in the right order */ enif_mutex_unlock(c->ctrl_lock); enif_rwlock_rwlock(c->cache_lock); enif_mutex_lock(c->ctrl_lock); /* first process the promotion queue before we do any evicting */ for (i = 0; i < N_INCR_BKT; ++i) { enif_mutex_lock(c->incr_lock[i]); while (!TAILQ_EMPTY(&(c->incr_head[i]))) { struct cache_incr_node *n; n = TAILQ_FIRST(&(c->incr_head[i])); TAILQ_REMOVE(&(c->incr_head[i]), n, entry); __sync_sub_and_fetch(&(c->incr_count), 1); dud = 0; /* let go of the ctrl_lock here, we don't need it when we aren't looking at the incr_queue, and this way other threads can use it while we shuffle queue nodes around */ enif_mutex_unlock(c->incr_lock[i]); enif_mutex_unlock(c->ctrl_lock); if (n->node->q == &(c->q1)) { TAILQ_REMOVE(&(c->q1.head), n->node, entry); c->q1.size -= n->node->size; TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry); n->node->q = &(c->q2); c->q2.size += n->node->size; } else if (n->node->q == &(c->q2)) { TAILQ_REMOVE(&(c->q2.head), n->node, entry); TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry); } enif_free(n); /* take the ctrl_lock back again for the next loop around */ enif_mutex_lock(c->ctrl_lock); enif_mutex_lock(c->incr_lock[i]); } enif_mutex_unlock(c->incr_lock[i]); } /* let go of the ctrl_lock here for two reasons: 1. avoid lock inversion, because if we have evictions to do we will need to take lookup_lock, and we must take lookup_lock before taking ctrl_lock 2. if we don't need to do evictions, we're done with the structures that are behind ctrl_lock so we should give it up for others */ enif_mutex_unlock(c->ctrl_lock); /* do timed evictions -- if anything has expired, nuke it */ { struct cache_node *n; if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) { struct timespec now; clock_now(&now); while (n && n->expiry.tv_sec < now.tv_sec) { enif_mutex_lock(c->ctrl_lock); dud = 0; destroy_cache_node(n); enif_mutex_unlock(c->ctrl_lock); n = RB_MIN(expiry_tree, &(c->expiry_head)); } } } /* now check if we need to do ordinary size limit evictions */ if (c->q1.size + c->q2.size > c->max_size) { enif_rwlock_rwlock(c->lookup_lock); enif_mutex_lock(c->ctrl_lock); while ((c->q1.size + c->q2.size > c->max_size) && (c->q1.size > c->min_q1_size)) { struct cache_node *n; n = TAILQ_LAST(&(c->q1.head), cache_q); destroy_cache_node(n); } while (c->q1.size + c->q2.size > c->max_size) { struct cache_node *n; n = TAILQ_LAST(&(c->q2.head), cache_q); destroy_cache_node(n); } dud = 0; enif_mutex_unlock(c->ctrl_lock); enif_rwlock_rwunlock(c->lookup_lock); } if (dud) __sync_add_and_fetch(&(c->dud_wakeups), 1); /* now let go of the cache_lock that we took right back at the start of this iteration */ enif_rwlock_rwunlock(c->cache_lock); } /* first remove us from the atom_tree, so we get no new operations coming in */ enif_rwlock_rwlock(gbl->atom_lock); RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node); enif_rwlock_rwunlock(gbl->atom_lock); enif_free(c->atom_node); /* now take all of our locks, to make sure any pending operations are done */ enif_rwlock_rwlock(c->cache_lock); enif_rwlock_rwlock(c->lookup_lock); enif_mutex_lock(c->ctrl_lock); c->atom_node = NULL; /* free the actual cache queues */ { struct cache_node *n, *nextn; nextn = TAILQ_FIRST(&(c->q1.head)); while ((n = nextn)) { nextn = TAILQ_NEXT(n, entry); destroy_cache_node(n); } nextn = TAILQ_FIRST(&(c->q2.head)); while ((n = nextn)) { nextn = TAILQ_NEXT(n, entry); destroy_cache_node(n); } } for (i = 0; i < N_INCR_BKT; ++i) enif_mutex_lock(c->incr_lock[i]); /* free the incr_queue */ for (i = 0; i < N_INCR_BKT; ++i) { struct cache_incr_node *in, *nextin; nextin = TAILQ_FIRST(&(c->incr_head[i])); while ((in = nextin)) { nextin = TAILQ_NEXT(in, entry); TAILQ_REMOVE(&(c->incr_head[i]), in, entry); in->node = 0; enif_free(in); } enif_mutex_unlock(c->incr_lock[i]); enif_mutex_destroy(c->incr_lock[i]); } /* unlock and destroy! */ enif_cond_destroy(c->check_cond); enif_mutex_unlock(c->ctrl_lock); enif_mutex_destroy(c->ctrl_lock); enif_rwlock_rwunlock(c->lookup_lock); enif_rwlock_destroy(c->lookup_lock); enif_rwlock_rwunlock(c->cache_lock); enif_rwlock_destroy(c->cache_lock); enif_free(c); return 0; }
static void frame_cleanup(ErlNifEnv* env, void* arg) { enif_free(arg); }