void node_destroy ( void *node) { ASSERT (node); node_unlink (node); mem_free (node); }
static inline node *list_remove(list *l, node *n) { if (n == l->head) l->head = n->next; if (n == l->tail) l->tail = n->prev; node_unlink(n); l->size--; return n; }
/* Add NODE to the recently-used-node cache, which adds a reference to prevent it from going away. NODE should be locked. */ void ftpfs_cache_node (struct node *node) { struct netnode *nn = node->nn; struct ftpfs *fs = nn->fs; mutex_lock (&fs->node_cache_lock); if (fs->params.node_cache_max > 0 || fs->node_cache_len > 0) { if (fs->node_cache_mru != node) { if (nn->ncache_next || nn->ncache_prev) /* Node is already in the cache. */ node_unlink (node, fs); else /* Add a reference from the cache. */ netfs_nref (node); nn->ncache_next = fs->node_cache_mru; nn->ncache_prev = 0; if (fs->node_cache_mru) fs->node_cache_mru->nn->ncache_prev = node; if (! fs->node_cache_lru) fs->node_cache_lru = node; fs->node_cache_mru = node; fs->node_cache_len++; } /* Forget the least used nodes. */ while (fs->node_cache_len > fs->params.node_cache_max) { struct node *lru = fs->node_cache_lru; node_unlink (lru, fs); netfs_nrele (lru); } } mutex_unlock (&fs->node_cache_lock); }
static inline void list_push(list *l, node *n) { node_unlink(n); if (l->head == NULL) { l->head = n; } else if (l->head == l->tail) { l->head->next = n; n->prev = l->head; } else { l->tail->next = n; n->prev = l->tail; } l->tail = n; l->size++; }
static inline node *list_pop(list *l) { if (l->head == NULL) return NULL; else { node *oldHead = l->head; l->head = l->head->next; node_unlink(oldHead); l->size--; if (l->size == 0) { l->tail = NULL; } return oldHead; } }
/* * XXX: a bug still exists here. we have a thread polling on this * XXX: device in the kernel, we need to get rid of this also. * XXX: since we're going to move the waiter thread up to the * XXX: user level, it'll be easier to kill off as part of the * XXX: cleanup of the device private data. */ static void rmscsi_close(char *path, dev_t rdev) { char namebuf[MAXNAMELEN]; struct stat sb; struct devs *dp; struct rmscsi_priv *rsp; int i; debug(1, "rmscsi_close %s\n", path); (void) sprintf(namebuf, RMSCSI_NAMEPROTO, path, RMSCSI_BASEPART); if (stat(namebuf, &sb) < 0) { if (rdev == NODEV) { warning(gettext("rmscsi_close: %s; %m\n"), namebuf); return; } } else { rdev = sb.st_rdev; } if ((dp = dev_getdp(rdev)) == NULL) { debug(1, "rmscsi_close: %s not in use\n", path); return; } /* get our private data */ rsp = (struct rmscsi_priv *)dp->dp_priv; /* * take care of the listner thread */ (void) mutex_lock(&rsp->rs_killmutex); (void) thr_kill(rsp->rs_tid, SIGUSR1); (void) mutex_unlock(&rsp->rs_killmutex); (void) thr_join(rsp->rs_tid, 0, 0); debug(1, "rmscsi_close: thread id %d reaped (killed/joined)\n", rsp->rs_tid); /* * if there is a volume inserted in this device ... */ if (dp->dp_vol) { /* * clean up the name space and the device maps * to remove references to any volume that might * be in the device right now * * this crap with the flags is to keep the * "poll" from being relaunched by this function * * yes, its a hack and there should be a better way */ if (dp->dp_dsw->d_flags & D_POLL) { dp->dp_dsw->d_flags &= ~D_POLL; dev_eject(dp->dp_vol, TRUE); dp->dp_dsw->d_flags |= D_POLL; } else { dev_eject(dp->dp_vol, TRUE); } if (dp->dp_vol != NULL) { return; } /* do the eject work */ (void) ioctl(rsp->rs_fd[RMSCSI_BASEPART], DKIOCEJECT, 0); } /* * clean up the names in the name space */ node_unlink(dp->dp_bvn); node_unlink(dp->dp_rvn); /* * free the private data we've allocated */ for (i = 0; i < V_NUMPAR; i++) { if (rsp->rs_rawpath[i]) { free(rsp->rs_rawpath[i]); } if (rsp->rs_fd[i] != -1) { (void) close(rsp->rs_fd[i]); } } #if defined(_FIRMWARE_NEEDS_FDISK) for (i = 0; i < (FD_NUMPART+1); i++) { if (rsp->rs_raw_pfd[i] >= 0) { (void) close(rsp->rs_raw_pfd[i]); } } #endif free(rsp); /* * free the dp, so no one points at us anymore */ dev_freedp(dp); }