dm_descriptor_t * dm_get_associated_descriptors(dm_descriptor_t desc, dm_desc_type_t type, int *errp) { descriptor_t **descs = NULL; descriptor_t *dp; dp = (descriptor_t *)(uintptr_t)desc; cache_wlock(); if (!cache_is_valid_desc(dp)) { cache_unlock(); *errp = EBADF; return (NULL); } /* verify that the descriptor is still valid */ if (dp->p.generic == NULL) { cache_unlock(); *errp = ENODEV; return (NULL); } switch (dp->type) { case DM_DRIVE: descs = drive_get_assoc_descriptors(dp, type, errp); break; case DM_BUS: descs = bus_get_assoc_descriptors(dp, type, errp); break; case DM_CONTROLLER: descs = controller_get_assoc_descriptors(dp, type, errp); break; case DM_MEDIA: descs = media_get_assoc_descriptors(dp, type, errp); break; case DM_SLICE: descs = slice_get_assoc_descriptors(dp, type, errp); break; case DM_PARTITION: descs = partition_get_assoc_descriptors(dp, type, errp); break; case DM_PATH: descs = path_get_assoc_descriptors(dp, type, errp); break; case DM_ALIAS: descs = alias_get_assoc_descriptors(dp, type, errp); break; default: *errp = EINVAL; break; } cache_unlock(); return (ptr_array_to_desc_array(descs, errp)); }
nvlist_t * dm_get_attributes(dm_descriptor_t desc, int *errp) { descriptor_t *dp; nvlist_t *attrs = NULL; dp = (descriptor_t *)(uintptr_t)desc; cache_rlock(); if (!cache_is_valid_desc(dp)) { cache_unlock(); *errp = EBADF; return (NULL); } /* verify that the descriptor is still valid */ if (dp->p.generic == NULL) { cache_unlock(); *errp = ENODEV; return (NULL); } switch (dp->type) { case DM_DRIVE: attrs = drive_get_attributes(dp, errp); break; case DM_BUS: attrs = bus_get_attributes(dp, errp); break; case DM_CONTROLLER: attrs = controller_get_attributes(dp, errp); break; case DM_MEDIA: attrs = media_get_attributes(dp, errp); break; case DM_SLICE: attrs = slice_get_attributes(dp, errp); break; case DM_PARTITION: attrs = partition_get_attributes(dp, errp); break; case DM_PATH: attrs = path_get_attributes(dp, errp); break; case DM_ALIAS: attrs = alias_get_attributes(dp, errp); break; default: *errp = EINVAL; break; } cache_unlock(); return (attrs); }
void dhcp_cache_flush_old(void) { cache_wrlock(); cache_now = time(NULL); if(cache_last_flush + CACHE_FLUSH_PERIOD > cache_now) { cache_unlock(); return; } log_wr(DLOG, "Flushing cache: last flush ts - %lu, flush period - %lu, now is %lu.", cache_last_flush, CACHE_FLUSH_PERIOD, cache_now); size_t num_del = 0; dhcp_fqueue_t * deleting_queue = search_obsolete_nodes(cache->root->left, NULL); char str_ether[STR_ETHER_ALEN + 1]; char str_ipaddr[2][IP4_MAXSTR_ALEN + 1]; dhcp_fqueue_t * q_ptr; dhcp_cache_node_t * del_node; uint32_t gw_ipaddr; /* Removing him's if exists */ while(deleting_queue) { del_node = deleting_queue->node->info; etheraddr_bin_to_str(del_node->cli_ethaddr, str_ether); iptos(del_node->cached_response.dhcp_data.you_iaddr.s_addr, str_ipaddr[0]); gw_ipaddr = del_node->gw_ipaddr; RBDelete(cache, deleting_queue->node); log_wr(DLOG, "Cache node for %s/%s%s%s%s deleted.", str_ether, str_ipaddr[0], gw_ipaddr ? " (relay: " : "", gw_ipaddr ? iptos(gw_ipaddr, str_ipaddr[1]) : "", gw_ipaddr ? ")" : ""); ++num_del; q_ptr = deleting_queue->next; free(deleting_queue); deleting_queue = q_ptr; } log_wr(DLOG, "Cache flushed. Total %u nodes deleted.", num_del); cache_last_flush = cache_now; cache_unlock(); return; }
/** * @param selidx filelist中的文件位置 * @param where 文件位置类型 * */ static int cache_add_by_selidx(dword selidx, int where) { t_fs_filetype type; cache_image_t img; const char *archname; const char *filename; dword filesize; archname = config.shortpath; if (where == scene_in_dir) { filename = filelist[selidx].shortname->ptr; } else { filename = filelist[selidx].compname->ptr; } filesize = filelist[selidx].data3; type = fs_file_get_type(filename); if (!fs_is_image(type)) { return -1; } if (cache_get(archname, filename) != NULL) { dbg_printf(d, "SERVER: %s: Image %s duplicate load, FIXME", __func__, filename); return -1; } cache_lock(); memset(&img, 0, sizeof(img)); img.archname = archname; img.filename = filename; img.where = where; img.status = CACHE_INIT; img.selidx = selidx; img.filesize = filesize; if (ccacher.caches_size < ccacher.caches_cap) { ccacher.caches[ccacher.caches_size] = img; ccacher.caches_size++; cacher_cleared = false; } else { dbg_printf(d, "SERVER: cannot add cache any more: size %u cap %u", ccacher.caches_size, ccacher.caches_cap); cache_unlock(); return -1; } cache_unlock(); return 0; }
static int start_cache(dword selidx) { int re; dword pos; dword size; if (ccacher.first_run) { ccacher.first_run = false; } else { SceUInt timeout = 10000; // wait until user notify cache delete re = xrKernelWaitEventFlag(cache_del_event, CACHE_EVENT_DELETED, PSP_EVENT_WAITAND, NULL, &timeout); if (re == SCE_KERNEL_ERROR_WAIT_TIMEOUT) { return 0; } xrKernelSetEventFlag(cache_del_event, CACHE_EVENT_UNDELETED); } cache_lock(); pos = selidx; size = ccacher.caches_size; while (size-- > 0) { pos = cache_get_next_image(pos, ccacher.isforward); } re = min(ccacher.caches_cap, count_img()) - ccacher.caches_size; dbg_printf(d, "SERVER: start pos %u selidx %u caches_size %u re %u", (unsigned) pos, (unsigned) selidx, (unsigned) ccacher.caches_size, (unsigned) re); if (re == 0) { cache_unlock(); return 0; } // dbg_printf(d, "SERVER: Wait for new selidx: memory usage %uKB", (unsigned) ccacher.memory_usage / 1024); // dbg_printf(d, "SERVER: %d images to cache, selidx %u, caches_size %u", re, (unsigned)selidx, (unsigned)ccacher.caches_size); while (re-- > 0) { dbg_printf(d, "SERVER: add cache image %u", (unsigned) pos); cache_add_by_selidx(pos, where); pos = cache_get_next_image(pos, ccacher.isforward); } cache_unlock(); return re; }
cache_t *lru_cache_load(void *arg, inip_file_t *fd, char *grp, data_attr_t *da, int timeout) { cache_t *c; cache_lru_t *cp; int dt; if (grp == NULL) grp = "cache-lru"; //** Create the default structure c = lru_cache_create(arg, da, timeout); cp = (cache_lru_t *)c->fn.priv; cache_lock(c); cp->max_bytes = inip_get_integer(fd, grp, "max_bytes", cp->max_bytes); cp->dirty_fraction = inip_get_double(fd, grp, "dirty_fraction", cp->dirty_fraction); cp->dirty_bytes_trigger = cp->dirty_fraction * cp->max_bytes; c->default_page_size = inip_get_integer(fd, grp, "default_page_size", c->default_page_size); dt = inip_get_integer(fd, grp, "dirty_max_wait", apr_time_sec(cp->dirty_max_wait)); cp->dirty_max_wait = apr_time_make(dt, 0); c->max_fetch_fraction = inip_get_double(fd, grp, "max_fetch_fraction", c->max_fetch_fraction); c->max_fetch_size = c->max_fetch_fraction * cp->max_bytes; c->write_temp_overflow_fraction = inip_get_double(fd, grp, "write_temp_overflow_fraction", c->write_temp_overflow_fraction); c->write_temp_overflow_size = c->write_temp_overflow_fraction * cp->max_bytes; c->n_ppages = inip_get_integer(fd, grp, "ppages", c->n_ppages); log_printf(0, "COP size=" XOT "\n", c->write_temp_overflow_size); cache_unlock(c); return(c); }
void dbg_dump_cache(void) { cache_image_t *p; dword c; cache_lock(); p = ccacher.caches; for (c = 0; p != ccacher.caches + ccacher.caches_size; ++p) { if (p->status == CACHE_OK || p->status == CACHE_FAILED) c++; } dbg_printf(d, "CLIENT: Dumping cache[%u] %u/%ukb, %u finished", ccacher.caches_size, (unsigned) ccacher.memory_usage / 1024, (unsigned) get_free_mem() / 1024, (unsigned) c); for (p = ccacher.caches; p != ccacher.caches + ccacher.caches_size; ++p) { dbg_printf(d, "%d: %u st %u res %d mem %lukb", p - ccacher.caches, (unsigned) p->selidx, p->status, p->result, p->width * p->height * sizeof(pixel) / 1024L); } cache_unlock(); }
cache_page_t *lru_create_empty_page(cache_t *c, segment_t *seg, int doblock) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s = (cache_segment_t *)seg->priv; ex_off_t max_bytes, bytes_to_free; cache_page_t *p = NULL; int qend; cache_lock(c); qend = 0; do { max_bytes = _lru_max_bytes(c); bytes_to_free = s->page_size + cp->bytes_used - max_bytes; log_printf(15, "lru_create_empty_page: max_bytes=" XOT " used=" XOT " bytes_to_free=" XOT " doblock=%d\n", max_bytes, cp->bytes_used, bytes_to_free, doblock); if (bytes_to_free > 0) { bytes_to_free = _lru_free_mem(c, seg, bytes_to_free); if ((doblock==1) && (bytes_to_free>0)) _lru_wait_for_page(c, seg, qend); qend = 1; } } while ((doblock==1) && (bytes_to_free>0)); if (bytes_to_free <= 0) p = _lru_new_page(c, seg); cache_unlock(c); return(p); }
struct vnode * getsynthvnode(const char *devname) { struct vnode *vp; struct nchandle nch; struct nlookupdata nd; struct ucred *cred = proc0.p_ucred; int error; KKASSERT(synth_inited != 0); KKASSERT(synth_mp != NULL); KKASSERT(synth_mp->mnt_ncmountpt.mount != NULL); /* Sync devfs/disks twice to make sure all devices are around */ if (synth_synced < 2) { sync_devs(); ++synth_synced; } error = nlookup_init_root(&nd, devname, UIO_SYSSPACE, NLC_FOLLOW, cred, &synth_mp->mnt_ncmountpt, &synth_mp->mnt_ncmountpt); if (error) { panic("synth: nlookup_init_root failed with %d", error); /* NOTREACHED */ } error = nlookup(&nd); if (error == 0) { if (nd.nl_nch.ncp->nc_vp == NULL) { kprintf("synth: nc_vp == NULL\n"); return (NULL); } nch = nd.nl_nch; cache_zero(&nd.nl_nch); } nlookup_done(&nd); if (error) { if (error != ENOENT) { /* Don't bother warning about ENOENT */ kprintf("synth: nlookup of %s failed with %d\n", devname, error); } return (NULL); } vp = nch.ncp->nc_vp; /* A VX locked & refd vnode must be returned. */ error = vget(vp, LK_EXCLUSIVE); cache_unlock(&nch); if (error) { kprintf("synth: could not vget vnode\n"); return (NULL); } return (vp); }
dhcp_full_packet_t * dhcp_cache_find(const dhcp_parsed_message_t * request, dhcp_full_packet_t * response, size_t * dhcp_data_len) { /* Lock cache for read */ cache_rdlock(); dhcp_cache_node_t s_data; s_data.if_ipaddr = request->dhcp_dev->ipaddr; s_data.gw_ipaddr = request->raw_dhcp_msg->gw_iaddr.s_addr; s_data.cli_ethaddr = (typeof(s_data.cli_ethaddr))request->raw_dhcp_msg->cli_hwaddr; s_data.header_ethaddr = (typeof(s_data.header_ethaddr))request->from_ether; rb_red_blk_node *f_node; dhcp_cache_node_t * cached_node = NULL; if ( ( f_node = RBExactQuery(cache, &s_data) ) ) { cached_node = f_node->info; *dhcp_data_len = cached_node->dhcp_data_len; memcpy(response, &cached_node->cached_response, *dhcp_data_len); } else { response = NULL; *dhcp_data_len = 0; } cache_unlock(); return response; }
void remove_cache_item(const char *entry_path, const char *subdir_path, const char *subdir_name, const char *entry_name) { /* Unlink the expired file, and the artist directory if it is empty */ cache_lock(); unlink(entry_path); rmdir(subdir_path); /* Remove any scaled copies of this file, plus parent directories that are now empty */ char cache_root_path[PATH_MAX]; make_cache_root_path(cache_root_path, PATH_MAX); struct dirent **scaled_dirs = NULL; const int scaled_dirs_count = scandir(cache_root_path, &scaled_dirs, filter_scaled_dirs, NULL); for (size_t i = 0; i < scaled_dirs_count; i++) { char scaled_entry_path[PATH_MAX]; if (snprintf(scaled_entry_path, PATH_MAX, "%s%s/%s/%s", cache_root_path, scaled_dirs[i]->d_name, subdir_name, entry_name) < PATH_MAX) { unlink(scaled_entry_path); char *scaled_entry_dir = dirname(scaled_entry_path); rmdir(scaled_entry_dir); rmdir(dirname(scaled_entry_dir)); } free(scaled_dirs[i]); } free(scaled_dirs); cache_unlock(); }
int lru_cache_destroy(cache_t *c) { apr_status_t value; cache_lru_t *cp = (cache_lru_t *)c->fn.priv; //** Shutdown the dirty thread cache_lock(c); c->shutdown_request = 1; apr_thread_cond_signal(cp->dirty_trigger); cache_unlock(c); apr_thread_join(&value, cp->dirty_thread); //** Wait for it to complete cache_base_destroy(c); free_stack(cp->stack, 0); free_stack(cp->waiting_stack, 0); free_stack(cp->pending_free_tasks, 0); destroy_pigeon_coop(cp->free_pending_tables); destroy_pigeon_coop(cp->free_page_tables); free(cp); free(c); return(0); }
/** * 删除缓存, 并释放资源 */ static int cache_delete(size_t pos) { cache_image_t *p; cache_lock(); p = &ccacher.caches[pos]; if (p->data != NULL) { dbg_printf(d, "%s: data 0x%08x", __func__, (unsigned) p->data); free(p->data); p->data = NULL; } if (p->exif_array) { buffer_array_free(p->exif_array); p->exif_array = NULL; } if (p->status == CACHE_OK) { ccacher.memory_usage -= p->width * p->height * sizeof(pixel); } memmove(&ccacher.caches[pos], &ccacher.caches[pos + 1], (ccacher.caches_size - pos - 1) * sizeof(ccacher.caches[0])); ccacher.caches_size--; cache_unlock(); return 0; }
static void cache_clear(void) { int i; cache_lock(); for (i = 0; i < ccacher.caches_size; ++i) { if (ccacher.caches[i].data != NULL) { dbg_printf(d, "%s: %d data 0x%08x", __func__, i, (unsigned) ccacher.caches[i].data); free(ccacher.caches[i].data); ccacher.caches[i].data = NULL; } if (ccacher.caches[i].exif_array) { buffer_array_free(ccacher.caches[i].exif_array); ccacher.caches[i].exif_array = NULL; } if (ccacher.caches[i].status == CACHE_OK) { ccacher.memory_usage -= ccacher.caches[i].width * ccacher.caches[i].height * sizeof(pixel); } } ccacher.caches_size = 0; cacher_cleared = true; cache_unlock(); }
int cache_delete_first(void) { cache_lock(); if (ccacher.caches_size != 0 && ccacher.caches != NULL) { int ret; ret = cache_delete(0); xrKernelSetEventFlag(cache_del_event, CACHE_EVENT_DELETED); cache_unlock(); return ret; } cache_unlock(); return -1; }
static HRESULT WINAPI IAssemblyCacheImpl_QueryAssemblyInfo(IAssemblyCache *iface, DWORD dwFlags, LPCWSTR pszAssemblyName, ASSEMBLY_INFO *pAsmInfo) { IAssemblyCacheImpl *cache = impl_from_IAssemblyCache(iface); IAssemblyName *asmname, *next = NULL; IAssemblyEnum *asmenum = NULL; HRESULT hr; TRACE("(%p, %d, %s, %p)\n", iface, dwFlags, debugstr_w(pszAssemblyName), pAsmInfo); if (pAsmInfo) { if (pAsmInfo->cbAssemblyInfo == 0) pAsmInfo->cbAssemblyInfo = sizeof(ASSEMBLY_INFO); else if (pAsmInfo->cbAssemblyInfo != sizeof(ASSEMBLY_INFO)) return E_INVALIDARG; } hr = CreateAssemblyNameObject(&asmname, pszAssemblyName, CANOF_PARSE_DISPLAY_NAME, NULL); if (FAILED(hr)) return hr; cache_lock( cache ); hr = CreateAssemblyEnum(&asmenum, NULL, asmname, ASM_CACHE_GAC, NULL); if (FAILED(hr)) goto done; for (;;) { hr = IAssemblyEnum_GetNextAssembly(asmenum, NULL, &next, 0); if (hr != S_OK) { hr = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND); goto done; } hr = IAssemblyName_IsEqual(asmname, next, ASM_CMPF_IL_ALL); if (hr == S_OK) break; } if (!pAsmInfo) goto done; hr = IAssemblyName_GetPath(next, pAsmInfo->pszCurrentAssemblyPathBuf, &pAsmInfo->cchBuf); pAsmInfo->dwAssemblyFlags = ASSEMBLYINFO_FLAG_INSTALLED; done: IAssemblyName_Release(asmname); if (next) IAssemblyName_Release(next); if (asmenum) IAssemblyEnum_Release(asmenum); cache_unlock( cache ); return hr; }
dm_desc_type_t dm_get_type(dm_descriptor_t desc) { descriptor_t *dp; dp = (descriptor_t *)(uintptr_t)desc; cache_rlock(); if (!cache_is_valid_desc(dp)) { cache_unlock(); return (-1); } cache_unlock(); return (dp->type); }
void lru_pages_destroy(cache_t *c, cache_page_t **page, int n_pages, int remove_from_segment) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; page_lru_t *lp; cache_page_t *p; // cache_cond_t *cache_cond; int i; int cr, cw, cf, count; cache_lock(c); log_printf(15, " START cp->bytes_used=" XOT "\n", cp->bytes_used); for (i=0; i<n_pages; i++) { p = page[i]; s = (cache_segment_t *)p->seg->priv; cr = atomic_get(p->access_pending[CACHE_READ]); cw = atomic_get(p->access_pending[CACHE_WRITE]); cf = atomic_get(p->access_pending[CACHE_FLUSH]); count = cr +cw + cf; // cache_cond = (cache_cond_t *)pigeon_coop_hole_data(&(p->cond_pch)); // if (cache_cond == NULL) { //** No one listening so free normally if (count == 0) { //** No one is listening log_printf(15, "lru_pages_destroy i=%d p->offset=" XOT " seg=" XIDT " remove_from_segment=%d limbo=%d\n", i, p->offset, segment_id(p->seg), remove_from_segment, cp->limbo_pages); cp->bytes_used -= s->page_size; lp = (page_lru_t *)p->priv; if (lp->ele != NULL) { move_to_ptr(cp->stack, lp->ele); delete_current(cp->stack, 0, 0); } if (remove_from_segment == 1) { s = (cache_segment_t *)p->seg->priv; list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var } if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } else { //** Someone is listening so trigger them and also clear the bits so it will be released atomic_set(p->bit_fields, C_TORELEASE); log_printf(15, "lru_pages_destroy i=%d p->offset=" XOT " seg=" XIDT " remove_from_segment=%d cr=%d cw=%d cf=%d limbo=%d\n", i, p->offset, segment_id(p->seg), remove_from_segment, cr, cw, cf, cp->limbo_pages); } } log_printf(15, " AFTER LOOP cp->bytes_used=" XOT "\n", cp->bytes_used); log_printf(15, " END cp->bytes_used=" XOT "\n", cp->bytes_used); cache_unlock(c); }
ex_off_t _lru_force_free_mem(cache_t *c, segment_t *page_seg, ex_off_t bytes_to_free, int check_waiters) { cache_segment_t *s = (cache_segment_t *)page_seg->priv; cache_lru_t *cp = (cache_lru_t *)c->fn.priv; ex_off_t freed_bytes, bytes_left; int top, finished; pigeon_coop_hole_t pch; cache_cond_t *cache_cond; //** I'm holding this coming in but don't need it cause I can touch all segs segment_unlock(page_seg); top = 0; bytes_left = bytes_to_free; freed_bytes = _lru_attempt_free_mem(c, page_seg, bytes_left); finished = 0; while ((freed_bytes < bytes_to_free) && (finished == 0)) { //** Keep trying to mark space as free until I get enough if (top == 0) { top = 1; pch = reserve_pigeon_coop_hole(s->c->cond_coop); cache_cond = (cache_cond_t *)pigeon_coop_hole_data(&pch); cache_cond->count = 0; move_to_bottom(cp->pending_free_tasks); insert_below(cp->pending_free_tasks, cache_cond); //** Add myself to the bottom } else { push(cp->pending_free_tasks, cache_cond); //** I go on the top } log_printf(15, "not enough space so waiting cache_cond=%p freed_bytes=" XOT " bytes_to_free=" XOT "\n", cache_cond, freed_bytes, bytes_to_free); //** Now wait until it's my turn apr_thread_cond_wait(cache_cond->cond, c->lock); bytes_left -= freed_bytes; freed_bytes = _lru_attempt_free_mem(c, page_seg, bytes_left); finished = 1; } //** Now check if we can handle some waiters if (check_waiters == 1) _lru_process_waiters(c); cache_unlock(c); //** Reacquire the lock in the proper order segment_lock(page_seg); //** Reacquire the lock cause I had it coming in cache_lock(c); if (top == 1) release_pigeon_coop_hole(s->c->cond_coop, &pch); freed_bytes = bytes_to_free - bytes_left; //NEW freed_bytes = bytes_left - freed_bytes; return(freed_bytes); }
void cache_set_forward(bool forward) { cache_lock(); if (ccacher.isforward != forward) { cache_clear(); ccacher.first_run = true; } ccacher.isforward = forward; cache_unlock(); }
void cmus_play_file(const char *filename) { struct track_info *ti; cache_lock(); ti = cache_get_ti(filename, 0); cache_unlock(); if (!ti) { error_msg("Couldn't get file information for %s\n", filename); return; } player_play_file(ti); }
void lru_adjust_dirty(cache_t *c, ex_off_t tweak) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_lock(c); c->stats.dirty_bytes += tweak; if (c->stats.dirty_bytes > cp->dirty_bytes_trigger) { if (cp->flush_in_progress == 0) { cp->flush_in_progress = 1; apr_thread_cond_signal(cp->dirty_trigger); } } cache_unlock(c); }
void dm_free_descriptor(dm_descriptor_t desc) { descriptor_t *dp; if (desc == NULL) { return; } dp = (descriptor_t *)(uintptr_t)desc; cache_wlock(); cache_free_descriptor(dp); cache_unlock(); }
void _lru_wait_for_page(cache_t *c, segment_t *seg, int ontop) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s = (cache_segment_t *)seg->priv; lru_page_wait_t pw; pigeon_coop_hole_t pch; cache_cond_t *cc; ex_off_t bytes_free, bytes_needed, n; int check_waiters_first; check_waiters_first = (ontop == 0) ? 1 : 0; pch = reserve_pigeon_coop_hole(c->cond_coop); cc = (cache_cond_t *)pigeon_coop_hole_data(&pch); pw.cond = cc->cond; pw.bytes_needed = s->page_size; bytes_free = _lru_max_bytes(c) - cp->bytes_used; while (s->page_size > bytes_free) { //** Attempt to free pages bytes_needed = s->page_size - bytes_free; n = _lru_force_free_mem(c, seg, bytes_needed, check_waiters_first); if (n > 0) { //** Didn't make it so wait if (ontop == 0) { move_to_bottom(cp->waiting_stack); insert_below(cp->waiting_stack, &pw); } else { push(cp->waiting_stack, &pw); } segment_unlock(seg); //** Unlock the segment to prevent deadlocks apr_thread_cond_wait(pw.cond, c->lock); //** Wait for the space to become available //** Have to reaquire both locks in the correct order cache_unlock(c); segment_lock(seg); cache_lock(c); ontop = 1; //** 2nd time we are always placed on the top of the stack check_waiters_first = 0; //** And don't check on waiters } bytes_free = _lru_max_bytes(c) - cp->bytes_used; } release_pigeon_coop_hole(c->cond_coop, &pch); return; }
int output_alsa_play_stream(struct output *h, struct output_stream *s) { pthread_mutex_lock(&h->mutex); /* Play */ s->is_playing = 1; /* Unlock cache after a flush */ cache_unlock(s->cache); pthread_mutex_unlock(&h->mutex); return 0; }
void output_alsa_flush_stream(struct output *h, struct output_stream *s) { pthread_mutex_lock(&h->mutex); /* Flush the cache */ cache_flush(s->cache); resample_flush(s->res); /* Must unlock input callback in cache after a flush */ if(s->is_playing) cache_unlock(s->cache); s->played = 0; pthread_mutex_unlock(&h->mutex); }
int lru_pages_release(cache_t *c, cache_page_t **page, int n_pages) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; page_lru_t *lp; cache_page_t *p; int bits, i; cache_lock(c); for (i=0; i<n_pages; i++) { p = page[i]; bits = atomic_get(p->bit_fields); log_printf(15, "seg=" XIDT " p->offset=" XOT " bits=%d bytes_used=" XOT "\n", segment_id(p->seg), p->offset, bits, cp->bytes_used); if ((bits & C_TORELEASE) > 0) { log_printf(15, "DESTROYING seg=" XIDT " p->offset=" XOT " bits=%d bytes_used=" XOT "cache_pages=%d\n", segment_id(p->seg), p->offset, bits, cp->bytes_used, stack_size(cp->stack)); s = (cache_segment_t *)p->seg->priv; lp = (page_lru_t *)p->priv; cp->bytes_used -= s->page_size; if (lp->ele != NULL) { move_to_ptr(cp->stack, lp->ele); delete_current(cp->stack, 0, 0); } else { cp->limbo_pages--; log_printf(15, "seg=" XIDT " limbo page p->offset=" XOT " limbo=%d\n", segment_id(p->seg), p->offset, cp->limbo_pages); } if (p->offset > -1) { list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var } if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } } //** Now check if we can handle some waiters _lru_process_waiters(c); cache_unlock(c); return(0); }
dm_descriptor_t dm_get_descriptor_by_name(dm_desc_type_t desc_type, char *name, int *errp) { dm_descriptor_t desc = NULL; cache_wlock(); switch (desc_type) { case DM_DRIVE: desc = (uintptr_t)drive_get_descriptor_by_name(name, errp); break; case DM_BUS: desc = (uintptr_t)bus_get_descriptor_by_name(name, errp); break; case DM_CONTROLLER: desc = (uintptr_t)controller_get_descriptor_by_name(name, errp); break; case DM_MEDIA: desc = (uintptr_t)media_get_descriptor_by_name(name, errp); break; case DM_SLICE: desc = (uintptr_t)slice_get_descriptor_by_name(name, errp); break; case DM_PARTITION: desc = (uintptr_t)partition_get_descriptor_by_name(name, errp); break; case DM_PATH: desc = (uintptr_t)path_get_descriptor_by_name(name, errp); break; case DM_ALIAS: desc = (uintptr_t)alias_get_descriptor_by_name(name, errp); break; default: *errp = EINVAL; break; } cache_unlock(); return (desc); }
/* * Cleanup a nlookupdata structure after we are through with it. This may * be called on any nlookupdata structure initialized with nlookup_init(). * Calling nlookup_done() is mandatory in all cases except where nlookup_init() * returns an error, even if as a consumer you believe you have taken all * dynamic elements out of the nlookupdata structure. */ void nlookup_done(struct nlookupdata *nd) { if (nd->nl_nch.ncp) { if (nd->nl_flags & NLC_NCPISLOCKED) { nd->nl_flags &= ~NLC_NCPISLOCKED; cache_unlock(&nd->nl_nch); } if (nd->nl_flags & NLC_NCDIR) { cache_drop_ncdir(&nd->nl_nch); nd->nl_flags &= ~NLC_NCDIR; } else { cache_drop(&nd->nl_nch); /* NULL's out the nch */ } } if (nd->nl_rootnch.ncp) cache_drop_and_cache(&nd->nl_rootnch); if (nd->nl_jailnch.ncp) cache_drop_and_cache(&nd->nl_jailnch); if ((nd->nl_flags & NLC_HASBUF) && nd->nl_path) { objcache_put(namei_oc, nd->nl_path); nd->nl_path = NULL; } if (nd->nl_cred) { if ((nd->nl_flags & NLC_BORROWCRED) == 0) crfree(nd->nl_cred); nd->nl_cred = NULL; nd->nl_flags &= ~NLC_BORROWCRED; } if (nd->nl_open_vp) { if (nd->nl_flags & NLC_LOCKVP) { vn_unlock(nd->nl_open_vp); nd->nl_flags &= ~NLC_LOCKVP; } vn_close(nd->nl_open_vp, nd->nl_vp_fmode, NULL); nd->nl_open_vp = NULL; } if (nd->nl_dvp) { vrele(nd->nl_dvp); nd->nl_dvp = NULL; } nd->nl_flags = 0; /* clear remaining flags (just clear everything) */ }
void dm_free_descriptors(dm_descriptor_t *desc_list) { descriptor_t **dp; int error; if (desc_list == NULL) { return; } dp = desc_array_to_ptr_array(desc_list, &error); if (error != 0) { free(desc_list); return; } cache_wlock(); cache_free_descriptors(dp); cache_unlock(); }