static void image_cache_put(SpiceImageCache *spice_cache, uint64_t id, pixman_image_t *image) { ImageCache *cache = SPICE_UPCAST(ImageCache, spice_cache); ImageCacheItem *item; #ifndef IMAGE_CACHE_AGE if (cache->num_items == IMAGE_CACHE_MAX_ITEMS) { SPICE_VERIFY(SPICE_OFFSETOF(ImageCacheItem, lru_link) == 0); ImageCacheItem *tail = (ImageCacheItem *)ring_get_tail(&cache->lru); spice_assert(tail); image_cache_remove(cache, tail); } #endif item = spice_new(ImageCacheItem, 1); item->id = id; #ifdef IMAGE_CACHE_AGE item->age = cache->age; #else cache->num_items++; #endif item->image = pixman_image_ref(image); ring_item_init(&item->lru_link); item->next = cache->hash_table[item->id % IMAGE_CACHE_HASH_SIZE]; cache->hash_table[item->id % IMAGE_CACHE_HASH_SIZE] = item; ring_add(&cache->lru, &item->lru_link); }
static bool image_cache_hit(ImageCache *cache, uint64_t id) { ImageCacheItem *item; if (!(item = image_cache_find(cache, id))) { return FALSE; } #ifdef IMAGE_CACHE_AGE item->age = cache->age; #endif ring_remove(&item->lru_link); ring_add(&cache->lru, &item->lru_link); return TRUE; }
static AsyncCommand *async_command_alloc(RedDispatcher *dispatcher, RedWorkerMessage message, uint64_t cookie) { AsyncCommand *async_command = spice_new0(AsyncCommand, 1); pthread_mutex_lock(&dispatcher->async_lock); async_command->cookie = cookie; async_command->message = message; ring_add(&dispatcher->async_commands, &async_command->link); pthread_mutex_unlock(&dispatcher->async_lock); spice_debug("%p", async_command); return async_command; }
/* Load the state of the object, unghostifying it. Upon success, return 1. * If an error occurred, re-ghostify the object and return -1. */ static int unghostify(cPersistentObject *self) { if (self->state < 0 && self->jar) { PyObject *r; /* Is it ever possible to not have a cache? */ if (self->cache) { /* Create a node in the ring for this unghostified object. */ self->cache->non_ghost_count++; self->cache->total_estimated_size += _estimated_size_in_bytes(self->estimated_size); ring_add(&self->cache->ring_home, &self->ring); Py_INCREF(self); } /* set state to CHANGED while setstate() call is in progress to prevent a recursive call to _PyPersist_Load(). */ self->state = cPersistent_CHANGED_STATE; /* Call the object's __setstate__() */ r = PyObject_CallMethod(self->jar, "setstate", "O", (PyObject *)self); if (r == NULL) { ghostify(self); return -1; } self->state = cPersistent_UPTODATE_STATE; Py_DECREF(r); if (self->cache && self->ring.r_next == NULL) { #ifdef Py_DEBUG fatal_1350(self, "unghostify", "is not in the cache despite that we just " "unghostified it"); #else PyErr_Format(PyExc_SystemError, "object at %p with type " "%.200s not in the cache despite that we just " "unghostified it", self, Py_TYPE(self)->tp_name); return -1; #endif } } return 1; }
/** recv new waiting packets */ static void service_recv(int s, struct ringbuf* ring, sldns_buffer* pkt, fd_set* rorig, int* max, struct proxy** proxies, struct sockaddr_storage* srv_addr, socklen_t srv_len, struct timeval* now, struct timeval* delay, struct timeval* reuse) { int i; struct sockaddr_storage from; socklen_t from_len; ssize_t len; struct proxy* p; for(i=0; i<TRIES_PER_SELECT; i++) { from_len = (socklen_t)sizeof(from); len = recvfrom(s, (void*)sldns_buffer_begin(pkt), sldns_buffer_capacity(pkt), 0, (struct sockaddr*)&from, &from_len); if(len < 0) { #ifndef USE_WINSOCK if(errno == EAGAIN || errno == EINTR) return; fatal_exit("recvfrom: %s", strerror(errno)); #else if(WSAGetLastError() == WSAEWOULDBLOCK || WSAGetLastError() == WSAEINPROGRESS) return; fatal_exit("recvfrom: %s", wsa_strerror(WSAGetLastError())); #endif } sldns_buffer_set_limit(pkt, (size_t)len); /* find its proxy element */ p = find_create_proxy(&from, from_len, rorig, max, proxies, addr_is_ip6(srv_addr, srv_len), now, reuse); if(!p) fatal_exit("error: cannot find or create proxy"); p->lastuse = *now; ring_add(ring, pkt, now, delay, p); p->numwait++; log_addr(1, "recv from client", &p->addr, p->addr_len); } }
static int dquot_f( int argc, char **argv) { bmap_ext_t bm; int c; int dogrp; int doprj; xfs_dqid_t id; xfs_ino_t ino; int nex; char *p; int perblock; xfs_fileoff_t qbno; int qoff; char *s; dogrp = doprj = optind = 0; while ((c = getopt(argc, argv, "gpu")) != EOF) { switch (c) { case 'g': dogrp = 1; doprj = 0; break; case 'p': doprj = 1; dogrp = 0; break; case 'u': dogrp = doprj = 0; break; default: dbprintf(_("bad option for dquot command\n")); return 0; } } s = doprj ? _("project") : dogrp ? _("group") : _("user"); if (optind != argc - 1) { dbprintf(_("dquot command requires one %s id argument\n"), s); return 0; } ino = mp->m_sb.sb_uquotino; if (doprj) ino = mp->m_sb.sb_pquotino; else if (dogrp) ino = mp->m_sb.sb_gquotino; if (ino == 0 || ino == NULLFSINO) { dbprintf(_("no %s quota inode present\n"), s); return 0; } id = (xfs_dqid_t)strtol(argv[optind], &p, 0); if (*p != '\0') { dbprintf(_("bad %s id for dquot %s\n"), s, argv[optind]); return 0; } perblock = (int)(mp->m_sb.sb_blocksize / sizeof(xfs_dqblk_t)); qbno = (xfs_fileoff_t)id / perblock; qoff = (int)(id % perblock); push_cur(); set_cur_inode(ino); nex = 1; bmap(qbno, 1, XFS_DATA_FORK, &nex, &bm); pop_cur(); if (nex == 0) { dbprintf(_("no %s quota data for id %d\n"), s, id); return 0; } set_cur(&typtab[TYP_DQBLK], XFS_FSB_TO_DADDR(mp, bm.startblock), blkbb, DB_RING_IGN, NULL); off_cur(qoff * (int)sizeof(xfs_dqblk_t), sizeof(xfs_dqblk_t)); ring_add(); return 0; }
int main(int argc, char *argv[]) { struct xnet_type_ops ops = { .buf_alloc = __mds_buf_alloc, .buf_free = NULL, .recv_handler = mds_spool_dispatch, .dispatcher = mds_fe_dispatch, }; int err = 0; int self, sport = -1, i, j; int memonly, memlimit, mode, plot_method; char *value; char *ring_ip = NULL; char profiling_fname[256]; hvfs_info(xnet, "MDS Unit Testing...\n"); hvfs_info(xnet, "Mode is 0/1 (no ring/with ring)\n"); if (argc < 2) { hvfs_err(xnet, "Self ID is not provided.\n"); err = EINVAL; return err; } else { self = atoi(argv[1]); hvfs_info(xnet, "Self type+ID is mds:%d.\n", self); if (argc == 4) { ring_ip = argv[2]; sport = atoi(argv[3]); } else if (argc == 3) ring_ip = argv[2]; } value = getenv("memonly"); if (value) { memonly = atoi(value); } else memonly = 1; value = getenv("memlimit"); if (value) { memlimit = atoi(value); } else memlimit = 0; value = getenv("mode"); if (value) { mode = atoi(value); } else mode = 0; value = getenv("fsid"); if (value) { fsid = atoi(value); } else fsid = 0; value = getenv("plot"); if (value) { plot_method = atoi(value); } else plot_method = MDS_PROF_PLOT; st_init(); mds_pre_init(); hmo.prof.xnet = &g_xnet_prof; hmo.conf.itbid_check = 1; hmo.conf.prof_plot = plot_method; hmo.conf.option |= HVFS_MDS_NOSCRUB; hmo.cb_branch_init = mds_cb_branch_init; hmo.cb_branch_destroy = mds_cb_branch_destroy; mds_init(11); /* set the uuid base! */ hmi.uuid_base = (u64)self << 45; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { xnet_update_ipaddr(HVFS_TYPE(i, j), 1, &ipaddr[i], (short *)(&port[i][j])); } } xnet_update_ipaddr(HVFS_CLIENT(12), 1, &ipaddr[4], (short *)(&port[4][0])); xnet_update_ipaddr(HVFS_MDS(4), 1, &ipaddr[0], (short *)(&port[4][1])); /* prepare the ring address */ if (!ring_ip) { xnet_update_ipaddr(HVFS_RING(0), 1, &ipaddr[3], (short *)(&port[3][0])); if (sport == -1) sport = port[TYPE_MDS][self]; } else { xnet_update_ipaddr(HVFS_RING(0), 1, &ring_ip, (short *)(&port[3][0])); if (sport == -1) sport = port[TYPE_MDS][0]; } /* setup the profiling file */ memset(profiling_fname, 0, sizeof(profiling_fname)); sprintf(profiling_fname, "./CP-BACK-mds.%d", self); hmo.conf.pf_file = fopen(profiling_fname, "w+"); if (!hmo.conf.pf_file) { hvfs_err(xnet, "fopen() profiling file %s faield %d\n", profiling_fname, errno); return EINVAL; } self = HVFS_MDS(self); hmo.xc = xnet_register_type(0, sport, self, &ops); if (IS_ERR(hmo.xc)) { err = PTR_ERR(hmo.xc); return err; } hmo.site_id = self; if (mode == 0) { hmi.gdt_salt = 0; hvfs_info(xnet, "Select GDT salt to %lx\n", hmi.gdt_salt); hmi.root_uuid = 1; hmi.root_salt = 0xdfeadb0; hvfs_info(xnet, "Select root salt to %lx\n", hmi.root_salt); #if 0 ring_add(&hmo.chring[CH_RING_MDS], HVFS_MDS(0)); ring_add(&hmo.chring[CH_RING_MDS], HVFS_MDS(1)); ring_add(&hmo.chring[CH_RING_MDS], HVFS_MDS(2)); ring_add(&hmo.chring[CH_RING_MDS], HVFS_MDS(3)); #else ring_add(&hmo.chring[CH_RING_MDS], HVFS_MDS(4)); #endif ring_add(&hmo.chring[CH_RING_MDSL], HVFS_MDSL(0)); ring_add(&hmo.chring[CH_RING_MDSL], HVFS_MDSL(1)); ring_resort_nolock(hmo.chring[CH_RING_MDS]); ring_resort_nolock(hmo.chring[CH_RING_MDSL]); ring_dump(hmo.chring[CH_RING_MDS]); ring_dump(hmo.chring[CH_RING_MDSL]); /* insert the GDT DH */ dh_insert(hmi.gdt_uuid, hmi.gdt_uuid, hmi.gdt_salt); bitmap_insert(0, 0); } else { hmo.cb_exit = mds_cb_exit; hmo.cb_hb = mds_cb_hb; hmo.cb_ring_update = mds_cb_ring_update; /* use ring info to init the mds */ err = r2cli_do_reg(self, HVFS_RING(0), fsid, 0); if (err) { hvfs_err(xnet, "reg self %x w/ r2 %x failed w/ %d\n", self, HVFS_RING(0), err); goto out; } hvfs_info(xnet, "HMI gdt uuid %ld salt %lx txg %ld\n", hmi.gdt_uuid, hmi.gdt_salt, atomic64_read(&hmi.mi_txg)); } err = mds_verify(); if (err) { hvfs_err(xnet, "Verify MDS configration failed!\n"); goto out; } // SET_TRACING_FLAG(xnet, HVFS_DEBUG); // SET_TRACING_FLAG(mds, HVFS_DEBUG | HVFS_VERBOSE); hvfs_info(xnet, "MDS is UP for serving requests now.\n"); msg_wait(); xnet_unregister_type(hmo.xc); st_destroy(); mds_destroy(); return 0; out: st_destroy(); mds_destroy(); return err; }