/* * genfs_rename_cache_purge: Purge the name cache. To be called by * gro_rename on success. The only pair of vnodes that may be * identical is {fdvp, tdvp}. */ void genfs_rename_cache_purge(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp) { KASSERT(fdvp != NULL); KASSERT(fvp != NULL); KASSERT(tdvp != NULL); KASSERT(fdvp != fvp); KASSERT(fdvp != tvp); KASSERT(tdvp != fvp); KASSERT(tdvp != tvp); KASSERT(fvp != tvp); KASSERT(fdvp->v_type == VDIR); KASSERT(tdvp->v_type == VDIR); /* * XXX What actually needs to be purged? */ cache_purge(fdvp); if (fvp->v_type == VDIR) cache_purge(fvp); if (tdvp != fdvp) cache_purge(tdvp); if ((tvp != NULL) && (tvp->v_type == VDIR)) cache_purge(tvp); }
static void cache_do_rename(const char *from, const char *to) { pthread_mutex_lock(&cache.lock); cache_purge(from); cache_purge(to); cache_purge_parent(from); cache_purge_parent(to); pthread_mutex_unlock(&cache.lock); }
static int _xfs_rename( struct vop_rename_args /* { struct vnode *a_fdvp; struct vnode *a_fvp; struct componentname *a_fcnp; struct vnode *a_tdvp; struct vnode *a_tvp; struct componentname *a_tcnp; } */ *ap) { struct vnode *fvp = ap->a_fvp; struct vnode *tvp = ap->a_tvp; struct vnode *fdvp = ap->a_fdvp; struct vnode *tdvp = ap->a_tdvp; /* struct componentname *tcnp = ap->a_tcnp; */ /* struct componentname *fcnp = ap->a_fcnp;*/ int error = EPERM; if (error) goto out; /* Check for cross-device rename */ if ((fvp->v_mount != tdvp->v_mount) || (tvp && (fvp->v_mount != tvp->v_mount))) { error = EXDEV; goto out; } if (tvp && tvp->v_usecount > 1) { error = EBUSY; goto out; } if (fvp->v_type == VDIR) { if (tvp != NULL && tvp->v_type == VDIR) cache_purge(tdvp); cache_purge(fdvp); } out: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); vgone(fvp); if (tvp) vgone(tvp); return (error); }
static int unionfs_rmdir(void *v) { struct vop_rmdir_args *ap = v; int error; struct unionfs_node *dunp; struct unionfs_node *unp; struct unionfs_mount *ump; struct componentname *cnp; struct vnode *udvp; struct vnode *uvp; struct vnode *lvp; UNIONFS_INTERNAL_DEBUG("unionfs_rmdir: enter\n"); error = 0; dunp = VTOUNIONFS(ap->a_dvp); unp = VTOUNIONFS(ap->a_vp); cnp = ap->a_cnp; udvp = dunp->un_uppervp; uvp = unp->un_uppervp; lvp = unp->un_lowervp; if (udvp == NULLVP) return (EROFS); if (udvp == uvp) return (EOPNOTSUPP); if (uvp != NULLVP) { if (lvp != NULLVP) { error = unionfs_check_rmdir(ap->a_vp, cnp->cn_cred); if (error != 0) return (error); } ump = MOUNTTOUNIONFSMOUNT(ap->a_vp->v_mount); if (ump->um_whitemode == UNIONFS_WHITE_ALWAYS || lvp != NULLVP) cnp->cn_flags |= DOWHITEOUT; error = VOP_RMDIR(udvp, uvp, cnp); } else if (lvp != NULLVP) error = unionfs_mkwhiteout(udvp, cnp, unp->un_path); if (error == 0) { cache_purge(ap->a_dvp); cache_purge(ap->a_vp); } UNIONFS_INTERNAL_DEBUG("unionfs_rmdir: leave (%d)\n", error); return (error); }
/* Try to discard pages, in order to recycle a vcache entry. * * We also make some sanity checks: ref count, open count, held locks. * * We also do some non-VM-related chores, such as releasing the cred pointer * (for AIX and Solaris) and releasing the gnode (for AIX). * * Locking: afs_xvcache lock is held. If it is dropped and re-acquired, * *slept should be set to warn the caller. * * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it * is not dropped and re-acquired for any platform. It may be that *slept is * therefore obsolescent. * */ int osi_VM_FlushVCache(struct vcache *avc, int *slept) { struct vnode *vp; int code; vp = AFSTOV(avc); if (!VI_TRYLOCK(vp)) return EBUSY; code = osi_fbsd_checkinuse(avc); if (code) { VI_UNLOCK(vp); return code; } /* must hold the vnode before calling cache_purge() * This code largely copied from vfs_subr.c:vlrureclaim() */ vholdl(vp); VI_UNLOCK(vp); AFS_GUNLOCK(); cache_purge(vp); AFS_GLOCK(); vdrop(vp); return 0; }
/* * Reclaim an fnode/ntnode so that it can be used for other purposes. */ int ntfs_reclaim(void *v) { struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct proc *p = ap->a_p; int error; dprintf(("ntfs_reclaim: vnode: %p, ntnode: %d\n", vp, ip->i_number)); #ifdef DIAGNOSTIC if (ntfs_prtactive && vp->v_usecount != 0) vprint("ntfs_reclaim: pushing active", vp); #endif if ((error = ntfs_ntget(ip, p)) != 0) return (error); /* Purge old data structures associated with the inode. */ cache_purge(vp); ntfs_frele(fp); ntfs_ntput(ip, p); vp->v_data = NULL; return (0); }
/* * Reclaim an inode so that it can be used for other purposes. */ int ufs_reclaim(struct vnode *vp) { struct inode *ip = VTOI(vp); if (prtactive && vp->v_usecount > 1) vprint("ufs_reclaim: pushing active", vp); if (!UFS_WAPBL_BEGIN(vp->v_mount)) { UFS_UPDATE(vp, NULL, NULL, UPDATE_CLOSE); UFS_WAPBL_END(vp->v_mount); } UFS_UPDATE(vp, NULL, NULL, UPDATE_CLOSE); /* * Remove the inode from its hash chain. */ ufs_ihashrem(ip); /* * Purge old data structures associated with the inode. */ cache_purge(vp); if (ip->i_devvp) { vrele(ip->i_devvp); ip->i_devvp = 0; } #ifdef QUOTA ufsquota_free(ip); #endif #ifdef UFS_DIRHASH if (ip->i_dirhash != NULL) ufsdirhash_free(ip); #endif return (0); }
static void cache_invalidate_dir(const char *path) { pthread_mutex_lock(&cache.lock); cache_purge(path); cache_purge_parent(path); pthread_mutex_unlock(&cache.lock); }
static int vnop_remove_9p(struct vnop_remove_args *ap) { vnode_t dvp, vp; node_9p *dnp, *np; int e; TRACE(); dvp = ap->a_dvp; vp = ap->a_vp; dnp = NTO9P(dvp); np = NTO9P(vp); if (dvp == vp) { panic("parent == node"); return EINVAL; } if (ISSET(ap->a_flags, VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) return EBUSY; nlock_9p(dnp, NODE_LCK_EXCLUSIVE); nlock_9p(np, NODE_LCK_EXCLUSIVE); if ((e=remove_9p(np->nmp, np->fid))) goto error; cache_purge(vp); vnode_recycle(vp); error: nunlock_9p(np); nunlock_9p(dnp); return e; }
/** Send an custom Siemens OBEX rename request. \param cli an obexftp_client_t created by obexftp_open(). \param sourcename remote filename to be renamed \param targetname remote target filename \return the result of Siemens rename request */ int obexftp_rename(obexftp_client_t *cli, const char *sourcename, const char *targetname) { obex_object_t *object = NULL; int ret; return_val_if_fail(cli != NULL, -EINVAL); cli->infocb(OBEXFTP_EV_SENDING, sourcename, 0, cli->infocb_data); DEBUG(2, "%s() Moving %s -> %s\n", __func__, sourcename, targetname); object = obexftp_build_rename (cli->obexhandle, cli->connection_id, sourcename, targetname); if(object == NULL) return -1; cache_purge(&cli->cache, NULL); ret = cli_sync_request(cli, object); if(ret < 0) cli->infocb(OBEXFTP_EV_ERR, sourcename, 0, cli->infocb_data); else cli->infocb(OBEXFTP_EV_OK, sourcename, 0, cli->infocb_data); return ret; }
int tmpfs_reclaim(struct vop_reclaim_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_mount *tmp; struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); tmp = VFS_TO_TMPFS(vp->v_mount); vnode_destroy_vobject(vp); cache_purge(vp); TMPFS_NODE_LOCK(node); TMPFS_ASSERT_ELOCKED(node); tmpfs_free_vp(vp); /* If the node referenced by this vnode was deleted by the user, * we must free its associated data structures (now that the vnode * is being reclaimed). */ if (node->tn_links == 0 && (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) { node->tn_vpstate = TMPFS_VNODE_DOOMED; TMPFS_NODE_UNLOCK(node); tmpfs_free_node(tmp, node); } else TMPFS_NODE_UNLOCK(node); MPASS(vp->v_data == NULL); return 0; }
static int _xfs_remove( struct vop_remove_args /* { struct vnodeop_desc *a_desc; struct vnode * a_dvp; struct vnode * a_vp; struct componentname * a_cnp; } */ *ap) { struct vnode *vp = ap->a_vp; struct thread *td = curthread; struct ucred *credp = td->td_ucred; /* struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; */ int error; if (vp->v_type == VDIR || vp->v_usecount != 1) return (EPERM); error = xfs_remove(VPTOXFSVP(ap->a_dvp)->v_bh.bh_first, VPTOXFSVP(ap->a_vp)->v_bh.bh_first, ap->a_cnp,credp); cache_purge(vp); return error; }
/* struct vnop_remove_args { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; }; */ static int fuse_vnop_remove(struct vop_remove_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; struct componentname *cnp = ap->a_cnp; int err; FS_DEBUG2G("inode=%ju name=%*s\n", (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr); if (fuse_isdeadfs(vp)) { return ENXIO; } if (vnode_isdir(vp)) { return EPERM; } cache_purge(vp); err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK); if (err == 0) fuse_internal_vnode_disappear(vp); return err; }
/* * Reclaim an inode so that it can be used for other purposes. */ int ufs_reclaim(struct vnode *vp, struct proc *p) { struct inode *ip; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("ufs_reclaim: pushing active", vp); #endif /* * Remove the inode from its hash chain. */ ip = VTOI(vp); ufs_ihashrem(ip); /* * Purge old data structures associated with the inode. */ cache_purge(vp); if (ip->i_devvp) { vrele(ip->i_devvp); } #ifdef UFS_DIRHASH if (ip->i_dirhash != NULL) ufsdirhash_free(ip); #endif ufs_quota_delete(ip); return (0); }
int cache_unregister(struct cache_detail *cd) { cache_purge(cd); spin_lock(&cache_list_lock); write_lock(&cd->hash_lock); if (cd->entries || atomic_read(&cd->inuse)) { write_unlock(&cd->hash_lock); spin_unlock(&cache_list_lock); return -EBUSY; } if (current_detail == cd) current_detail = NULL; list_del_init(&cd->others); write_unlock(&cd->hash_lock); spin_unlock(&cache_list_lock); if (cd->proc_ent) { if (cd->flush_ent) remove_proc_entry("flush", cd->proc_ent); if (cd->channel_ent) remove_proc_entry("channel", cd->proc_ent); if (cd->content_ent) remove_proc_entry("content", cd->proc_ent); cd->proc_ent = NULL; remove_proc_entry(cd->name, proc_net_rpc); } if (list_empty(&cache_list)) { /* module must be being unloaded so its safe to kill the worker */ cancel_delayed_work(&cache_cleaner); flush_scheduled_work(); } return 0; }
/* * Reclaim an inode so that it can be used for other purposes. */ int ext2fs_reclaim(void *v) { struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct inode *ip; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("ext2fs_reclaim: pushing active", vp); #endif /* * Remove the inode from its hash chain. */ ip = VTOI(vp); ufs_ihashrem(ip); /* * Purge old data structures associated with the inode. */ cache_purge(vp); if (ip->i_devvp) vrele(ip->i_devvp); if (ip->i_e2din != NULL) pool_put(&ext2fs_dinode_pool, ip->i_e2din); pool_put(&ext2fs_inode_pool, ip); vp->v_data = NULL; return (0); }
/** Send an OBEX PUT, optionally with (some) SETPATHs for a local file. \param cli an obexftp_client_t created by obexftp_open(). \param filename local file to send \param remotename remote name to write \return the result of the OBEX PUT (and SETPATH) request(s). \note Puts to filename's basename if remotename is NULL or ends with a slash. */ int obexftp_put_file(obexftp_client_t *cli, const char *filename, const char *remotename) { obex_object_t *object; int ret; return_val_if_fail(cli != NULL, -EINVAL); return_val_if_fail(filename != NULL, -EINVAL); if (cli->out_data) { DEBUG(1, "%s: Warning: buffer still active?\n", __func__); } cli->infocb(OBEXFTP_EV_SENDING, filename, 0, cli->infocb_data); // TODO: if remotename ends with a slash: add basename if (!remotename) { remotename = strrchr(filename, '/'); if (remotename) remotename++; else remotename = filename; } if (OBEXFTP_USE_SPLIT_SETPATH(cli->quirks) && remotename && strchr(remotename, '/')) { char *basepath, *basename; split_file_path(remotename, &basepath, &basename); ret = obexftp_setpath(cli, basepath, 0); if(ret < 0) { cli->infocb(OBEXFTP_EV_ERR, basepath, 0, cli->infocb_data); return ret; } DEBUG(2, "%s() Sending %s -> %s\n", __func__, filename, basename); object = build_object_from_file (cli->obexhandle, cli->connection_id, filename, basename); free(basepath); free(basename); } else { DEBUG(2, "%s() Sending %s -> %s\n", __func__, filename, remotename); object = build_object_from_file (cli->obexhandle, cli->connection_id, filename, remotename); } cli->fd = open(filename, O_RDONLY | O_BINARY, 0); if(cli->fd < 0) ret = -1; else { cli->out_data = NULL; /* dont free, isnt ours */ cache_purge(&cli->cache, NULL); ret = cli_sync_request(cli, object); } /* close(cli->fd); */ if(ret < 0) cli->infocb(OBEXFTP_EV_ERR, filename, 0, cli->infocb_data); else cli->infocb(OBEXFTP_EV_OK, filename, 0, cli->infocb_data); return ret; }
void fuse_internal_vnode_disappear(struct vnode *vp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); fvdat->flag |= FN_REVOKED; cache_purge(vp); }
static int vnop_rename_9p(struct vnop_rename_args *ap) { struct componentname *tcnp; vnode_t fdvp, tdvp, fvp; node_9p *fdnp, *fnp; dir_9p d; char *s; int e; TRACE(); fdvp = ap->a_fdvp; tdvp = ap->a_tdvp; fvp = ap->a_fvp; tcnp = ap->a_tcnp; fdnp = NTO9P(fdvp); fnp = NTO9P(fvp); if (fdvp!=tdvp || NTO9P(fdvp)!=NTO9P(tdvp)) return ENOTSUP; nlock_9p(fdnp, NODE_LCK_EXCLUSIVE); nlock_9p(fnp, NODE_LCK_EXCLUSIVE); nulldir(&d); e = ENOMEM; s = malloc_9p(tcnp->cn_namelen+1); if (s == NULL) goto error; bcopy(tcnp->cn_nameptr, s, tcnp->cn_namelen); s[tcnp->cn_namelen] = 0; d.name = s; e = wstat_9p(fnp->nmp, fnp->fid, &d); free_9p(s); if (e == 0) { cache_purge(fvp); cache_purge(fdvp); } error: nunlock_9p(fnp); nunlock_9p(fdnp); return e; }
void nnpfs_dnlc_purge (struct vnode *vp) { NNPFSDEB(XDEBDNLC, ("nnpfs_dnlc_purge\n")); if (tbl.dvp == vp || tbl.vp == vp) tbl_clear (); cache_purge(vp); }
/** Send OBEX SETPATH request (multiple requests if split path flag is set). \param cli an obexftp_client_t created by obexftp_open(). \param name path to change into \param create flag whether to create missing folders or fail \return the result of the OBEX SETPATH request(s). \note handles NULL, "", "/" and everything else correctly. */ int obexftp_setpath(obexftp_client_t *cli, const char *name, int create) { obex_object_t *object; int ret = 0; char *copy, *tail, *p; return_val_if_fail(cli != NULL, -EINVAL); DEBUG(2, "%s() Changing to %s\n", __func__, name); if (OBEXFTP_USE_SPLIT_SETPATH(cli->quirks) && name && *name && strchr(name, '/')) { tail = copy = strdup(name); for (p = strchr(tail, '/'); tail; ) { if (p) { *p = '\0'; p++; } cli->infocb(OBEXFTP_EV_SENDING, tail, 0, cli->infocb_data); DEBUG(2, "%s() Setpath \"%s\" (create:%d)\n", __func__, tail, create); /* try without the create flag */ object = obexftp_build_setpath (cli->obexhandle, cli->connection_id, tail, 0); ret = cli_sync_request(cli, object); if ((ret < 0) && create) { /* try again with create flag set maybe? */ object = obexftp_build_setpath (cli->obexhandle, cli->connection_id, tail, 1); ret = cli_sync_request(cli, object); } if (ret < 0) break; tail = p; if (p) p = strchr(p, '/'); /* prevent a trailing slash from messing all up with a cd top */ if (tail && *tail == '\0') break; } free (copy); } else { cli->infocb(OBEXFTP_EV_SENDING, name, 0, cli->infocb_data); DEBUG(2, "%s() Setpath \"%s\"\n", __func__, name); object = obexftp_build_setpath (cli->obexhandle, cli->connection_id, name, create); ret = cli_sync_request(cli, object); } if (create) cache_purge(&cli->cache, NULL); /* no way to know where we started */ if(ret < 0) cli->infocb(OBEXFTP_EV_ERR, name, 0, cli->infocb_data); else cli->infocb(OBEXFTP_EV_OK, name, 0, cli->infocb_data); return ret; }
/* Purge VM for a file when its callback is revoked. * * Locking: No lock is held, not even the global lock. */ void osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp) { struct vnode *vp = AFSTOV(avc); if (!vp) return; cache_purge(vp); uvm_vnp_uncache(vp); uvm_vnp_setsize(vp, avc->f.m.Length); }
int dns_main_loop() { struct timeval tv; fd_set active_rfds; int retval; dns_request_t m; dns_request_t *ptr, *next; //int purge_time = config.purge_time / 60; int purge_time = CACHE_CHECK_TIME / DNS_TICK_TIME; //(30sec) modified by CMC 8/4/2001 while( !dns_main_quit ){ /* set the one second time out */ tv.tv_sec = DNS_TICK_TIME; //modified by CMC 8/3/2001 tv.tv_usec = 0; /* now copy the main rfds in the active one as it gets modified by select*/ active_rfds = rfds; retval = select( FD_SETSIZE, &active_rfds, NULL, NULL, &tv ); if (retval){ /* data is now available */ dns_read_packet( dns_sock, &m ); dns_handle_request( &m ); }else{ /* select time out */ ptr = dns_request_list; while( ptr ){ next = ptr->next; ptr->time_pending++; if( ptr->time_pending > DNS_TIMEOUT/DNS_TICK_TIME ){ /* CMC: ptr->time_pending= DNS_TIMEOUT ~ DNS_TIMEOUT+DNS_TICK_TIME */ debug("Request timed out\n"); /* send error back */ dns_construct_error_reply(ptr); dns_write_packet( dns_sock, ptr->src_addr, ptr->src_port, ptr ); dns_request_list = dns_list_remove( dns_request_list, ptr ); } ptr = next; } /* while(ptr) */ /* purge cache */ purge_time--; if( purge_time <= 0 ){ //modified by CMC 8/4/2001 cache_purge( config.purge_time ); //purge_time = config.purge_time / 60; purge_time = CACHE_CHECK_TIME / DNS_TICK_TIME; //(30sec) modified by CMC 8/3/2001 } } /* if (retval) */ } return 0; }
// destroy cache void cache_destroy (struct cache_t **cache, void (*destroy)(void **data)) { // check parameters if (!cache || !*cache) return; // destroy object cache cache_purge(*cache, destroy); free((*cache)->objects); free(*cache); *cache = NULL; }
static void cache_purge_parent(const char *path) { const char *s = strrchr(path, '/'); if (s) { if (s == path) g_hash_table_remove(cache.table, "/"); else { char *parent = g_strndup(path, s - path); cache_purge(parent); g_free(parent); } } }
static int null_reclaim(struct vnop_reclaim_args * ap) { struct vnode * vp; struct null_node * xp; struct vnode * lowervp; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); vp = ap->a_vp; xp = VTONULL(vp); lowervp = xp->null_lowervp; lck_mtx_lock(&null_mp->nullm_lock); vnode_removefsref(vp); if (lowervp != NULL) { /* root and second don't have a lowervp, so nothing to release and nothing * got hashed */ if (xp->null_flags & NULL_FLAG_HASHED) { /* only call this if we actually made it into the hash list. reclaim gets called also to clean up a vnode that got created when it didn't need to under race conditions */ null_hashrem(xp); } vnode_getwithref(lowervp); vnode_rele(lowervp); vnode_put(lowervp); } if (vp == null_mp->nullm_rootvp) { null_mp->nullm_rootvp = NULL; } else if (vp == null_mp->nullm_secondvp) { null_mp->nullm_secondvp = NULL; } else if (vp == null_mp->nullm_thirdcovervp) { null_mp->nullm_thirdcovervp = NULL; } lck_mtx_unlock(&null_mp->nullm_lock); cache_purge(vp); vnode_clearfsnode(vp); FREE(xp, M_TEMP); return 0; }
/* * smbfs_remove directory call */ int smbfs_rmdir(void *v) { struct vop_rmdir_args /* { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; } */ *ap = v; struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; /* struct smbmount *smp = VTOSMBFS(vp);*/ struct smbnode *dnp = VTOSMB(dvp); struct smbnode *np = VTOSMB(vp); struct smb_cred scred; int error; if (dvp == vp) { vrele(dvp); vput(dvp); return (EINVAL); } smb_makescred(&scred, curlwp, cnp->cn_cred); error = smbfs_smb_rmdir(np, &scred); if (error == 0) np->n_flag |= NGONE; dnp->n_flag |= NMODIFIED; smbfs_attr_cacheremove(dvp); VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); VN_KNOTE(vp, NOTE_DELETE); cache_purge(dvp); cache_purge(vp); vput(vp); vput(dvp); return (error); }
/** Close an obexftp client and free the resources. \param cli the obexftp_client_t to be shut done and free'd. It's save to pass NULL here. Closes the given obexftp client and frees the resources. It's recommended to set the client reference to NULL afterwards. */ void obexftp_close(obexftp_client_t *cli) { DEBUG(3, "%s()\n", __func__); return_if_fail(cli != NULL); OBEX_Cleanup(cli->obexhandle); if (cli->buf_data) { DEBUG(1, "%s: Warning: purging left-over buffer.\n", __func__); free(cli->buf_data); } cache_purge(&cli->cache, NULL); free(cli->stream_chunk); free(cli); }
/* Try to discard pages, in order to recycle a vcache entry. * * We also make some sanity checks: ref count, open count, held locks. * * We also do some non-VM-related chores, such as releasing the cred pointer * (for AIX and Solaris) and releasing the gnode (for AIX). * * Locking: afs_xvcache lock is held. If it is dropped and re-acquired, * *slept should be set to warn the caller. * * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it * is not dropped and re-acquired for any platform. It may be that *slept is * therefore obsolescent. * * OSF/1 Locking: VN_LOCK has been called. */ int osi_VM_FlushVCache(struct vcache *avc, int *slept) { struct vnode *vp = AFSTOV(avc); if (!vp) return 0; AFS_GUNLOCK(); cache_purge(vp); uvm_vnp_uncache(vp); AFS_GLOCK(); return 0; }
int fusefs_reclaim(void *v) { struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct fusefs_node *ip = VTOI(vp); struct fusefs_filehandle *fufh = NULL; struct fusefs_mnt *fmp; struct fusebuf *fbuf; int type; fmp = (struct fusefs_mnt *)ip->ufs_ino.i_ump; /*close opened files*/ for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(ip->fufh[type]); if (fufh->fh_type != FUFH_INVALID) { printf("fusefs: vnode being reclaimed is valid\n"); fusefs_file_close(fmp, ip, fufh->fh_type, type, (ip->vtype == VDIR), curproc); } } /* * Purge old data structures associated with the inode. */ ip->parent = 0; /* * if the fuse connection is opened * ask libfuse to free the vnodes */ if (fmp->sess_init) { fbuf = fb_setup(0, ip->ufs_ino.i_number, FBT_RECLAIM, curproc); if (fb_queue(fmp->dev, fbuf)) printf("fusefs: libfuse vnode reclaim failed\n"); fb_delete(fbuf); } /* * Remove the inode from its hash chain. */ ufs_ihashrem(&ip->ufs_ino); cache_purge(vp); free(ip, M_FUSEFS); vp->v_data = NULL; return (0); }