void panel_remove(struct widget *self) { handle_free(self->owner, DATA->hrotate); handle_free(self->owner, DATA->hzoom); handle_free(self->owner, DATA->hclose); handle_free(self->owner, DATA->hlabel); handle_free(self->owner, DATA->hbar); g_free(DATA); r_divnode_free(self->in); }
static void sk_localproxy_close(Socket s) { Local_Proxy_Socket ps = (Local_Proxy_Socket) s; handle_free(ps->to_cmd_h); handle_free(ps->from_cmd_h); CloseHandle(ps->to_cmd_H); CloseHandle(ps->from_cmd_H); sfree(ps); }
static void sk_handle_close(Socket s) { Handle_Socket ps = (Handle_Socket) s; handle_free(ps->send_h); handle_free(ps->recv_h); CloseHandle(ps->send_H); if (ps->recv_H != ps->send_H) CloseHandle(ps->recv_H); bufchain_clear(&ps->inputdata); sfree(ps); }
static void sk_localproxy_close (Socket s) { Local_Proxy_Socket ps = (Local_Proxy_Socket) s; #ifdef MPEXT // WinSCP core uses do_select as signalization of connection up/down do_select(ps->plug, INVALID_SOCKET, 0); #endif handle_free(ps->to_cmd_h); handle_free(ps->from_cmd_h); CloseHandle(ps->to_cmd_H); CloseHandle(ps->from_cmd_H); sfree(ps); }
/** * Remove a handle from the map * when it was removed from the filesystem * or when it is stale. */ int HandleMap_DelFH(nfs23_map_handle_t *p_in_nfs23_digest) { int rc; struct gsh_buffdesc buffkey, stored_buffkey; struct gsh_buffdesc stored_buffval; digest_pool_entry_t digest; digest_pool_entry_t *p_stored_digest; handle_pool_entry_t *p_stored_handle; /* first, delete it from hash table */ digest.nfs23_digest = *p_in_nfs23_digest; buffkey.addr = (caddr_t) &digest; buffkey.len = sizeof(digest_pool_entry_t); rc = HashTable_Del(handle_map_hash, &buffkey, &stored_buffkey, &stored_buffval); if (rc != HASHTABLE_SUCCESS) return HANDLEMAP_STALE; p_stored_digest = (digest_pool_entry_t *) stored_buffkey.addr; p_stored_handle = (handle_pool_entry_t *) stored_buffval.addr; digest_free(p_stored_digest); handle_free(p_stored_handle); /* then, submit the request to the database */ return handlemap_db_delete(p_in_nfs23_digest); }
static void serial_terminate(Serial *serial) { if (serial->out) { handle_free(serial->out); serial->out = NULL; } if (serial->in) { handle_free(serial->in); serial->in = NULL; } if (serial->port != INVALID_HANDLE_VALUE) { if (serial->break_in_progress) ClearCommBreak(serial->port); CloseHandle(serial->port); serial->port = INVALID_HANDLE_VALUE; } }
CryptoData_t *crypto_create (const SEC_CRYPTO *plugin, size_t data_size, void *owner, int endpoint) { CryptoData_t *dp; Crypto_t h; unsigned n; void *nhandles; h = handle_alloc (handles); if (!h) { n = cur_handles + min_handles; if (n > max_handles) n = max_handles; n -= cur_handles; if (!n) { warn_printf ("Crypto: max. # of contexts reached (1)!"); return (NULL); } nhandles = handle_extend (handles, n); if (!nhandles) { warn_printf ("Crypto: max. # of contexts reached (2)!"); return (NULL); } handles = nhandles; h = handle_alloc (handles); if (!h) { fatal_printf ("Crypto: can't create a handle!"); return (NULL); } crypto = xrealloc (crypto, (cur_handles + 1 + n) * sizeof (CryptoData_t *)); if (!crypto) { fatal_printf ("Crypto: can't extend crypto table!"); return (NULL); } cur_handles += n; sec_crypt_alloc += n * sizeof (CryptoData_t *); } dp = xmalloc (sizeof (CryptoData_t) + data_size); if (!dp) { warn_printf ("Crypto: Out of memory for crypto data!"); handle_free (handles, h); return (NULL); } sec_crypt_alloc += sizeof (CryptoData_t) + data_size; dp->handle = h; dp->plugin = plugin; if (endpoint) dp->parent.endpoint = owner; else dp->parent.participant = owner; dp->endpoint = endpoint; dp->data = dp + 1; (*crypto) [h] = dp; dp->handle = h; return (dp); }
void textedit_build ( struct gropctxt *c, unsigned short state, struct widget *self ) { s16 w, h, tw; g_error e; w = self->in->div->r.w - theme_lookup(PGTH_O_SCROLL_V,PGTH_P_WIDTH); h = self->in->div->r.h; self->in->div->preferred.h = h; self->in->div->preferred.w = w; if (!DATA->fd){ /* FIXME: Theme lookup foreground, background colors, border */ e = textedit_set_font (self, theme_lookup (state, PGTH_P_FONT)); // errorcheck; } assert (DATA->fd); /* * The general rule is that once you create a handle you should never * delete the object it refers to, only delete the handle */ handle_free(self->owner,DATA->bit_h); e = VID(bitmap_new) (&(DATA->bit), w, h, vid->bpp); // errorcheck; /* the handle should be owned by the application not by pgserver itself */ e = mkhandle(&DATA->bit_h, PG_TYPE_BITMAP, self->owner, DATA->bit); // errorcheck; /* Size and add the bitmap itself */ e = addgropsz(c, PG_GROP_BITMAP, 0, 0, w, h); // errorcheck; c->current->param[0] = DATA->bit_h; /* Create cursor */ e = addgropsz(c,PG_GROP_RECT, 0, 0, 0, 0); // errorcheck; c->current->flags |= PG_GROPF_COLORED; DATA->cursor_grop = c->current; DATA->cursor_state = 1; DATA->cursor_grop->param[0] = VID(color_pgtohwr)(CURSORCOLOR_ON); /* Set cursor height to that of typical char */ textedit_str_size(self, NULL, 0, &tw, &DATA->cursor_grop->r.h); DATA->cursor_grop->r.x = DATA->border_h; DATA->cursor_grop->r.y = DATA->border_v; e = text_backend_build( DATA, w, h); // errorcheck; // return success; }
int handle_mapping_hash_add(hash_table_t *p_hash, uint64_t object_id, unsigned int handle_hash, const void *data, uint32_t datalen) { int rc; struct gsh_buffdesc buffkey; struct gsh_buffdesc buffval; digest_pool_entry_t *digest; handle_pool_entry_t *handle; if (datalen >= sizeof(handle->fh_data)) return HANDLEMAP_INVALID_PARAM; digest = digest_alloc(); if (!digest) return HANDLEMAP_SYSTEM_ERROR; handle = handle_alloc(); if (!handle) { digest_free(digest); return HANDLEMAP_SYSTEM_ERROR; } digest->nfs23_digest.object_id = object_id; digest->nfs23_digest.handle_hash = handle_hash; memset(handle->fh_data, 0, sizeof(handle->fh_data)); memcpy(handle->fh_data, data, datalen); handle->fh_len = datalen; buffkey.addr = (caddr_t) digest; buffkey.len = sizeof(digest_pool_entry_t); buffval.addr = (caddr_t) handle; buffval.len = sizeof(handle_pool_entry_t); rc = hashtable_test_and_set(handle_map_hash, &buffkey, &buffval, HASHTABLE_SET_HOW_SET_NO_OVERWRITE); if (rc != HASHTABLE_SUCCESS) { digest_free(digest); handle_free(handle); if (rc != HASHTABLE_ERROR_KEY_ALREADY_EXISTS) { LogCrit(COMPONENT_FSAL, "ERROR %d inserting entry to handle mapping hash table", rc); return HANDLEMAP_HASHTABLE_ERROR; } else { return HANDLEMAP_EXISTS; } } return HANDLEMAP_SUCCESS; }
static void sk_handle_close(Socket s) { Handle_Socket ps = (Handle_Socket) s; if (ps->defer_close) { ps->deferred_close = TRUE; return; } handle_free(ps->send_h); handle_free(ps->recv_h); CloseHandle(ps->send_H); if (ps->recv_H != ps->send_H) CloseHandle(ps->recv_H); bufchain_clear(&ps->inputdata); bufchain_clear(&ps->stderrdata); sfree(ps); }
static void sk_handle_close(Socket s) { Handle_Socket ps = (Handle_Socket) s; #ifdef MPEXT // WinSCP core uses do_select as signalization of connection up/down do_select(ps->plug, INVALID_SOCKET, 0); #endif handle_free(ps->send_h); handle_free(ps->recv_h); CloseHandle(ps->send_H); if (ps->recv_H != ps->send_H) CloseHandle(ps->recv_H); bufchain_clear(&ps->inputdata); bufchain_clear(&ps->stderrdata); sfree(ps); }
/* Free the buffer associated with handle_num. */ void buflib_free(struct buflib_context *ctx, int handle_num) { union buflib_data *handle = ctx->handle_table - handle_num, *freed_block = handle_to_block(ctx, handle_num), *block = ctx->first_free_block, *next_block = block; /* We need to find the block before the current one, to see if it is free * and can be merged with this one. */ while (next_block < freed_block) { block = next_block; next_block += abs(block->val); } /* If next_block == block, the above loop didn't go anywhere. If it did, * and the block before this one is empty, we can combine them. */ if (next_block == freed_block && next_block != block && block->val < 0) block->val -= freed_block->val; /* Otherwise, set block to the newly-freed block, and mark it free, before * continuing on, since the code below exects block to point to a free * block which may have free space after it. */ else { block = freed_block; block->val = -block->val; } next_block = block - block->val; /* Check if we are merging with the free space at alloc_end. */ if (next_block == ctx->alloc_end) ctx->alloc_end = block; /* Otherwise, the next block might still be a "normal" free block, and the * mid-allocation free means that the buffer is no longer compact. */ else { ctx->compact = false; if (next_block->val < 0) block->val += next_block->val; } handle_free(ctx, handle); handle->alloc = NULL; /* If this block is before first_free_block, it becomes the new starting * point for free-block search. */ if (block < ctx->first_free_block) ctx->first_free_block = block; /* if the handle is the one aquired with buflib_alloc_maximum() * unlock buflib_alloc() as part of the shrink */ if (ctx->handle_lock == handle_num) ctx->handle_lock = 0; }
void textedit_remove(struct widget *self) { /* * The general rule is that once you create a handle you should never * delete the object it refers to, only delete the handle */ handle_free(self->owner,DATA->bit_h); text_backend_destroy(DATA); g_free(DATA); r_divnode_free(self->in); }
void crypto_release (Crypto_t h) { CryptoData_t *dp; if (h < 1 || h > cur_handles || (dp = (*crypto) [h]) == NULL) { warn_printf ("Crypto: Invalid handle (%u)!", h); return; } xfree (dp); (*crypto) [h] = NULL; handle_free (handles, h); }
void gl_close(void) { if (gl_global.h_infilter) handle_free(-1,gl_global.h_infilter); if (gl_global.display_rend) { g_free(gl_global.display_rend); gl_global.display_rend = NULL; } unload_inlib(gl_global.continuous_inlib); if (gl_global.osd_font) font_descriptor_destroy(gl_global.osd_font); }
int neon_vfs_fclose_impl (VFSFile * file) { struct neon_handle * h = vfs_get_handle (file); if (h->reader_status.reading) kill_reader (h); if (h->request) ne_request_destroy (h->request); if (h->session) ne_session_destroy (h->session); handle_free (h); return 0; }
void * neon_vfs_fopen_impl (const char * path, const char * mode) { struct neon_handle * handle = handle_init (); _DEBUG ("<%p> Trying to open '%s' with neon", (void *) handle, path); handle->url = g_strdup (path); if (open_handle (handle, 0) != 0) { _ERROR ("<%p> Could not open URL", (void *) handle); handle_free (handle); return NULL; } return handle; }
static void tcp_accept(uv_stream_t *master, int status) { if (status != 0) { return; } uv_stream_t *client = handle_alloc(master->loop, sizeof(*client)); if (!client) { return; } memset(client, 0, sizeof(*client)); io_create(master->loop, (uv_handle_t *)client, SOCK_STREAM); if (uv_accept(master, client) != 0) { handle_free((uv_handle_t *)client); return; } io_start_read((uv_handle_t *)client); }
void pcm_out_sdl_close(int handle) { SDL_CHAN *ch; (void) handle; if (chan_list) { // find the one with a handle ch = list_element_head(chan_list); while ((ch) && (ch->handle != handle)) ch=node_next(ch); // kill it if (ch) { handle_free(ch->handle); list_remove(chan_list, ch); } } }
/* Free the buffer associated with handle_num. */ int buflib_free(struct buflib_context *ctx, int handle_num) { union buflib_data *handle = ctx->handle_table - handle_num, *freed_block = handle_to_block(ctx, handle_num), *block, *next_block; /* We need to find the block before the current one, to see if it is free * and can be merged with this one. */ block = find_block_before(ctx, freed_block, true); if (block) { block->val -= freed_block->val; } else { /* Otherwise, set block to the newly-freed block, and mark it free, before * continuing on, since the code below exects block to point to a free * block which may have free space after it. */ block = freed_block; block->val = -block->val; } next_block = block - block->val; /* Check if we are merging with the free space at alloc_end. */ if (next_block == ctx->alloc_end) ctx->alloc_end = block; /* Otherwise, the next block might still be a "normal" free block, and the * mid-allocation free means that the buffer is no longer compact. */ else { ctx->compact = false; if (next_block->val < 0) block->val += next_block->val; } handle_free(ctx, handle); handle->alloc = NULL; return 0; /* unconditionally */ }
/* This is called whenever the widget is attached, after the attaching * process is complete. We use this as a hook for managing the tab and tab_bar. */ g_error tabpage_post_attach(struct widget *self, struct widget *parent, int rship) { struct widget *tab, *tab_bar, *parent_tab; g_error e; handle existing_bar = 0; /* Dereference handles */ e = rdhandle((void**)&tab, PG_TYPE_WIDGET, self->owner, DATA->htab); errorcheck; e = rdhandle((void**)&tab_bar, PG_TYPE_WIDGET, self->owner, DATA->htab_bar); errorcheck; /* Detach our tab. It will be reattached later if necessary */ e = widget_derive(&tab, &DATA->htab, tab->type, NULL, 0, 0, self->owner); errorcheck; /* If we already have a tab bar but it's empty, delete it */ if (DATA->htab_bar && !widget_traverse(tab_bar, PG_TRAVERSE_CHILDREN, 0)) { handle_free(self->owner, DATA->htab_bar); DATA->htab_bar = 0; } /* Are we being attached rather than detached? */ if (parent) { /* If we're attaching before or after another tab page, share its tab bar */ if (parent->type==PG_WIDGET_TABPAGE && (rship==PG_DERIVE_BEFORE || rship==PG_DERIVE_AFTER)) { struct widget *self = parent; existing_bar = DATA->htab_bar; } DATA->htab_bar = existing_bar; /* Otherwise, create a new tab bar */ if (!DATA->htab_bar) { tab_bar = NULL; e = widget_derive(&tab_bar, &DATA->htab_bar, PG_WIDGET_TOOLBAR, self, self->h, PG_DERIVE_BEFORE, self->owner); errorcheck; e = widget_set(tab_bar, PG_WP_THOBJ, PGTH_O_TAB_BAR); errorcheck; tab_bar->auto_orientation = PG_AUTO_SIDE; } /* If we're attaching on an existing bar, attach the tab in the same * relative order as the tab pages themselves. */ parent_tab = NULL; rdhandle((void**)&parent_tab, PG_TYPE_WIDGET, self->owner, widget_get(parent, PG_WP_TAB)); if (existing_bar && parent_tab) { e = widget_derive(&tab, &DATA->htab, tab->type, parent_tab, parent_tab->h, rship, self->owner); errorcheck; } /* Otherwise just put it in our tab bar directly */ else { e = widget_derive(&tab, &DATA->htab, tab->type, tab_bar, tab_bar->h, PG_DERIVE_INSIDE, self->owner); errorcheck; } /* If we were here first, make ourselves active */ if (!existing_bar) { e = widget_set(self, PG_WP_ON, 1); errorcheck; } } return success; }
/* Look up the handle associated with the pointer, and delete it safely */ g_error pointer_free(int owner, void *ptr) { return handle_free(owner, hlookup(ptr,NULL)); }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ union buflib_data* last_block = find_block_before(ctx, ctx->alloc_end, false); struct buflib_callbacks* ops = last_block[2].ops; unsigned hints = 0; if (!ops || !ops->shrink_callback) { /* the last one isn't shrinkable * make room in front of a shrinkable and move this alloc */ hints = BUFLIB_SHRINK_POS_FRONT; hints |= last_block->val * sizeof(union buflib_data); } else if (ops && ops->shrink_callback) { /* the last is shrinkable, make room for handles directly */ hints = BUFLIB_SHRINK_POS_BACK; hints |= 16*sizeof(union buflib_data); } /* buflib_compact_and_shrink() will compact and move last_block() * if possible */ if (buflib_compact_and_shrink(ctx, hints)) goto handle_alloc; return -1; } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = find_first_free(ctx);;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ unsigned hint = BUFLIB_SHRINK_POS_FRONT | ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); if (buflib_compact_and_shrink(ctx, hint)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return -2; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }
/** Wrapper for __builtin_vec_delete(). */ static void drd___builtin_vec_delete(ThreadId tid, void* p) { handle_free(tid, p); }
/** Wrapper for free(). */ static void drd_free(ThreadId tid, void* p) { handle_free(tid, p); }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { /* busy wait if there's a thread owning the lock */ while (ctx->handle_lock != 0) YIELD(); union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ if (!ctx->compact && buflib_compact(ctx)) goto handle_alloc; else { /* first try to shrink the alloc before the handle table * to make room for new handles */ int handle = ctx->handle_table - ctx->last_handle; union buflib_data* last_block = handle_to_block(ctx, handle); struct buflib_callbacks* ops = last_block[2].ops; if (ops && ops->shrink_callback) { char *data = buflib_get_data(ctx, handle); unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data); if (ops->shrink_callback(handle, hint, data, (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK) { /* retry one more time */ goto handle_alloc; } } return 0; } } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = ctx->first_free_block;;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ if (buflib_compact_and_shrink(ctx, (size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return 0; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops ?: &default_callbacks; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); /* If we have just taken the first free block, the next allocation search * can save some time by starting after this block. */ if (block == ctx->first_free_block) ctx->first_free_block += size; block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }
void zaurus_close(void) { handle_free(-1,zaurus_if); close(zaurus_ts_fd); close(zaurus_buz_fd); close(zaurus_led_fd); }
void tabpage_remove(struct widget *self) { handle_free(self->owner, DATA->htab); g_free(DATA); WIDGET_REMOVE_PARENT; }