int main(void) { context_t *context = alloc_context(); if (!context) { printf("T: Failed to create new context.\n"); return 1; } for (int i = 0; i < 5; i++) { measurement_t measurement; if (measure(context, &measurement) == 0) { printf("T: Time = %.1u, Value = %.3f\n", measurement.time, measurement.value); } else { printf("T: Failed to obtain measurement.\n"); } } if (release_context(context) != 0) { printf("T: Failed to release context.\n"); return 1; } return 0; }
/* called on entry into a compiled callback */ void factor_vm::nest_stacks(stack_frame *magic_frame) { context *new_ctx = alloc_context(); new_ctx->callstack_bottom = (stack_frame *)-1; new_ctx->callstack_top = (stack_frame *)-1; /* note that these register values are not necessarily valid stack pointers. they are merely saved non-volatile registers, and are restored in unnest_stacks(). consider this scenario: - factor code calls C function - C function saves ds/cs registers (since they're non-volatile) - C function clobbers them - C function calls Factor callback - Factor callback returns - C function restores registers - C function returns to Factor code */ new_ctx->datastack_save = ds; new_ctx->retainstack_save = rs; new_ctx->magic_frame = magic_frame; /* save per-callback special_objects */ new_ctx->current_callback_save = special_objects[OBJ_CURRENT_CALLBACK]; new_ctx->catchstack_save = special_objects[OBJ_CATCHSTACK]; new_ctx->next = ctx; ctx = new_ctx; reset_datastack(); reset_retainstack(); }
/* Join a group. The group context is returned in *contextp. */ hot_err_t hot_ens_Join( hot_ens_JoinOps_t *jops, hot_context_t *contextp /*OUT*/ ) { hot_err_t err = HOT_OK ; hot_context_t s ; /* Initialize global state if not done so already. */ if (!g.initialized) { err = hot_ens_Init(jops->outboard, jops->argv); if (err != HOT_OK) return err; } begin_write(); { begin_critical(); { /* Allocate a new group context * Initialize the group record. */ s = alloc_context(); s->joining = 1 ; s->leaving = 0 ; s->conf = jops->conf; s->env = jops->env; s->view = NULL ; *contextp = s ; } end_critical(); /* Write the downcall. */ write_hdr(s,DN_JOIN); write_int(jops->heartbeat_rate); write_string(jops->transports); write_string(jops->protocol); write_string(jops->group_name); write_string(jops->properties); write_bool(jops->use_properties); write_bool(jops->groupd); write_string(jops->params); write_bool(jops->client); write_bool(jops->debug); if (jops->endpt.name[0] != 0x0) { hot_sys_Warning("HOT_OUTBOARD does not support 'endpt' in join ops") ; jops->endpt.name[0] = 0x0; } write_string(jops->princ); write_string(jops->key); write_bool(jops->secure); } end_write(); return HOT_OK; }
int main(int argc, char *argv[]) { int c, rc = 0, restart; struct option opt[] = { { "once", 0, 0, '1' }, { "continue-on-error", 0, 0, 'c' }, { "exec", 1, 0, 'e' }, { "config", 1, 0, 'f' }, { "iface", 1, 0, 'i' }, { "loglevel", 1, 0, 'l' }, { "help", 0, 0, 'h' }, { "foreground", 0, 0, 'n' }, { "pidfile", 1, 0, 100 }, { "drop-privs", 1, 0, 'p' }, { "syslog", 0, 0, 's' }, { "startup-delay", 1, 0, 't' }, { "version", 0, 0, 'v' }, { NULL, 0, 0, 0 } }; ddns_t *ctx = NULL; while ((c = getopt_long(argc, argv, "1ce:f:h?i:l:np:st:v", opt, NULL)) != EOF) { switch (c) { case '1': /* --once */ once = 1; break; case 'c': /* --continue-on-error */ ignore_errors = 1; break; case 'e': /* --exec=CMD */ script_exec = strdup(optarg); break; case 'f': /* --config=FILE */ config = strdup(optarg); break; case 'i': /* --iface=IFNAME */ iface = strdup(optarg); break; case 'l': /* --loglevel=LEVEL */ loglevel = loglvl(optarg); if (-1 == loglevel) return usage(1); break; case 'n': /* --foreground */ background = 0; break; case 100: /* --pidfile=BASENAME */ pidfile_name = strdup(optarg); break; case 'p': /* --drop-privs=USER[:GROUP] */ parse_privs(optarg); break; case 's': /* --syslog */ use_syslog = 1; break; case 't': /* --startup-delay=SEC */ startup_delay = atoi(optarg); break; case 'v': puts(VERSION); return 0; case 'h': /* --help */ case ':': /* Missing parameter for option. */ case '?': /* Unknown option. */ default: return usage(0); } } if (background) { if (daemon(0, 0) < 0) { fprintf(stderr, "Failed daemonizing %s: %m\n", __progname); return RC_OS_FORK_FAILURE; } use_syslog = 1; } if (use_syslog) { openlog(NULL, LOG_PID, LOG_USER); setlogmask(LOG_UPTO(loglevel)); } if (drop_privs()) { logit(LOG_WARNING, "Failed dropping privileges: %s", strerror(errno)); return RC_OS_CHANGE_PERSONA_FAILURE; } /* "Hello!" Let user know we've started up OK */ logit(LOG_NOTICE, "%s", VERSION_STRING); if (!config) config = strdup(DEFAULT_CONFIG_FILE); /* Prepare SSL library, if enabled */ ssl_init(); do { restart = 0; rc = alloc_context(&ctx); if (rc != RC_OK) break; if (os_install_signal_handler(ctx)) return RC_OS_INSTALL_SIGHANDLER_FAILED; cfg = conf_parse_file(config, ctx); if (!cfg) { free_context(ctx); return RC_FILE_IO_MISSING_FILE; } rc = ddns_main_loop(ctx); if (rc == RC_RESTART) restart = 1; free_context(ctx); cfg_free(cfg); } while (restart); if (use_syslog) closelog(); free(config); ssl_exit(); return rc; }
static void process_accept(struct conn_context * listen_ctx) { int client_fd, listen_fd; int events = listen_ctx->events; struct epoll_event evt; struct context_pool *pool; struct conn_context *client_ctx; int cpu_id = listen_ctx->cpu_id; int ret = 0; int i; listen_fd = listen_ctx->fd; //TODO: What else should I do. if (events & (EPOLLHUP | EPOLLERR)) return; for (i = 0; i < ACCEPT_PER_LISTEN_EVENT; i++) { int flags; client_fd = accept(listen_fd, NULL, NULL); if (client_fd < 0) { wdata[cpu_id].accept_cnt++; goto back; } flags = fcntl(client_fd, F_GETFL, 0); flags |= O_NONBLOCK; fcntl(client_fd, F_SETFL, flags); print_d("Accept socket %d from %d\n", client_fd, listen_fd); } pool = listen_ctx->pool; client_ctx = alloc_context(pool); assert(client_ctx); client_ctx->fd = client_fd; if (enable_proxy) client_ctx->handler = process_read_frontend; else client_ctx->handler = process_read; client_ctx->cpu_id = listen_ctx->cpu_id; client_ctx->ep_fd = listen_ctx->ep_fd; evt.events = EPOLLIN | EPOLLHUP | EPOLLERR; evt.data.ptr = client_ctx; ret = epoll_ctl(client_ctx->ep_fd, EPOLL_CTL_ADD, client_ctx->fd, &evt); if (ret < 0) { perror("Unable to add client socket read event to epoll"); goto free_back; } client_ctx->fd_added = 1; goto back; free_back: print_d("cpu[%d] close socket %d\n", cpu_id, client_ctx->fd); process_close(client_ctx); free_context(client_ctx); back: return; }
void *process_clients(void *arg) { int ret; struct worker_data *mydata = (struct worker_data *)arg; struct context_pool *pool; struct epoll_event evt; struct epoll_event evts[EVENTS_PER_BATCH]; int cpu_id; int ep_fd; int i; struct conn_context *ctx; if (enable_keepalive) http_response = http_200_keepalive; else http_response = http_200; http_response_len = strlen(http_response); ret = bind_process_cpu(mydata->cpu_id); if (ret < 0) { perror("Unable to Bind worker on CPU"); exit_cleanup(); } pool = init_pool(MAX_CONNS_PER_WORKER); if ((ep_fd = epoll_create(MAX_CONNS_PER_WORKER)) < 0) { perror("Unable to create epoll FD"); exit_cleanup(); } for (i = 0; i < la_num; i++) { ctx = alloc_context(pool); ctx->fd = la[i].listen_fd; ctx->handler = process_accept; cpu_id = mydata->cpu_id; ctx->cpu_id = cpu_id; ctx->ep_fd = ep_fd; evt.events = EPOLLIN | EPOLLHUP | EPOLLERR; evt.data.ptr = ctx; if (epoll_ctl(ctx->ep_fd, EPOLL_CTL_ADD, ctx->fd, &evt) < 0) { perror("Unable to add Listen Socket to epoll"); exit_cleanup(); } } wdata[cpu_id].polls_min = EVENTS_PER_BATCH; while (1) { int num_events; int i; int events; num_events = epoll_wait(ep_fd, evts, EVENTS_PER_BATCH, -1); if (num_events < 0) { if (errno == EINTR) continue; perror("epoll_wait() error"); } if (!num_events) wdata[cpu_id].polls_mpt++; else if (num_events < wdata[cpu_id].polls_min) wdata[cpu_id].polls_min = num_events; if (num_events > wdata[cpu_id].polls_max) wdata[cpu_id].polls_max = num_events; wdata[cpu_id].polls_sum += num_events; wdata[cpu_id].polls_cnt++; wdata[cpu_id].polls_avg = wdata[cpu_id].polls_sum / wdata[cpu_id].polls_cnt; wdata[cpu_id].polls_lst = num_events; for (i = 0 ; i < num_events; i++) { int active_fd; events = evts[i].events; ctx = evts[i].data.ptr; ctx->events = events; if (ctx->flags & PROXY_BACKEND_EVENT) active_fd = ctx->end_fd; else active_fd = ctx->fd; print_d("%dth event[0x%x] at fd %d\n", i, events, active_fd); ctx->handler(ctx); } } return NULL; }
void get_mmu_context(struct mm_struct *mm) { if(mm->context == NO_CONTEXT) alloc_context(mm); }
/* * Construct a striped mapping. * <number of stripes> <chunk size> [<dev_path> <offset>]+ */ static int vm_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct vm_c *vc; sector_t width, tmp_len; uint32_t vms; uint32_t chunk_size; int r; unsigned long long i; if (argc < 2) { ti->error = "Not enough arguments"; return -EINVAL; } if (kstrtouint(argv[0], 10, &vms) || !vms) { ti->error = "Invalid stripe count"; return -EINVAL; } if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) { ti->error = "Invalid chunk_size"; return -EINVAL; } width = ti->len; if (sector_div(width, vms)) { ti->error = "Target length not divisible by " "number of stripes"; return -EINVAL; } tmp_len = width; if (sector_div(tmp_len, chunk_size)) { ti->error = "Target length not divisible by " "chunk size"; return -EINVAL; } /* * Do we have enough arguments for that many stripes ? */ if (argc != (2 + 2 * vms)) { ti->error = "Not enough destinations " "specified"; return -EINVAL; } vc = alloc_context(vms); if (!vc) { ti->error = "Memory allocation for striped context " "failed"; return -ENOMEM; } INIT_WORK(&vc->trigger_event, trigger_event); /* Set pointer to dm target; used in trigger_event */ vc->ti = ti; vc->vms = vms; vc->vm_width = width; if (vms & (vms - 1)) vc->vms_shift = -1; else vc->vms_shift = __ffs(vms); r = dm_set_target_max_io_len(ti, chunk_size); if (r) { kfree(vc); return r; } ti->num_flush_bios = vms; ti->num_discard_bios = vms; ti->num_write_same_bios = vms; vc->chunk_size = chunk_size; if (chunk_size & (chunk_size - 1)) vc->chunk_size_shift = -1; else vc->chunk_size_shift = __ffs(chunk_size); /* * Get the stripe destinations. */ for (i = 0; i < vms; i++) { argv += 2; r = get_vm(ti, vc, i, argv); if (r < 0) { ti->error = "Couldn't parse stripe destination"; while (i--) dm_put_device(ti, vc->vm[i].dev); kfree(vc); return r; } atomic_set(&(vc->vm[i].error_count), 0); } /*volume manager initialize*/ vc->wp = 0;//////current 0 is NVMe //vc->wp = 1; vc->ws = kmalloc(sizeof(unsigned long long) * vc->vms, GFP_KERNEL); for(i = 0; i<vc->vms; i++) vc->ws[i] = 0; vc->gp_list = kmalloc(sizeof(char) * vc->vms, GFP_KERNEL); vc->num_gp = 0; vc->io_client = dm_io_client_create(); vc->gs = NULL; vc->overload = 0; for(i=0; i<vc->vms; i++) vc->gp_list[i] = Clean_Weight;//0 is clean { unsigned long long tem, disk_size; tem = 0; for(i = 0; i<vms; i++){ struct block_device *cur_bdev = vc->vm[i].dev->bdev; vc->vm[i].end_sector = i_size_read(cur_bdev->bd_inode)>>9;//unit of sector printk("vm%llu start_sector %llu, end_sector %llu, target_offset %llu\n", i, (unsigned long long) vc->vm[i].physical_start, (unsigned long long) vc->vm[i].end_sector, (unsigned long long)dm_target_offset(ti, vc->ws[i])); disk_size = vc->vm[i].end_sector * 512; do_div(disk_size, (unsigned long long) vc->vm[i].dev->bdev->bd_block_size); tem += disk_size; } vc->num_entry = tem;//num entry is blk num } printk("num entry is %llu, node size is %lu, req mem is %llu\n", vc->num_entry, sizeof(struct flag_nodes), sizeof(struct flag_nodes) * vc->num_entry); //flag set initialize vc->fs = (struct flag_set *) kmalloc(sizeof(struct flag_set), GFP_KERNEL); vc->fs->node_buf = kmem_cache_create("dirty_data_buf", sizeof(struct flag_nodes), 0, (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), NULL); vc->fs->table = (struct flag_nodes **)vmalloc(sizeof(struct flag_nodes*) * vc->num_entry); for(i=0; i<vc->num_entry; i++){ //vc->fs->table[i] = NULL;//late alloc code vc->fs->table[i] = kmem_cache_alloc(vc->fs->node_buf, GFP_KERNEL);//pre alloc start vc->fs->table[i]->msector = -1; vc->fs->table[i]->wp = -1;//pre alloc end } vc->num_map_block = 0;//vc->num_entry * sizeof(struct flag_nodes) / 4096; //vc->ws[0] += vc->num_map_block; vc->fs->reverse_table = vmalloc(sizeof(struct reverse_nodes*) * vc->vms); vc->d_num = kmalloc(sizeof(unsigned long long) * vc->vms, GFP_KERNEL); for(i=0; i<vc->vms; i++){ unsigned long long j; unsigned long long r_table_size = (vc->vm[i].end_sector + 7); unsigned long long phy_sect = vc->vm[i].physical_start; do_div(phy_sect, 8); do_div(r_table_size, 8); printk("r_table_size = %llu\n", r_table_size); vc->vm[i].num_dirty = r_table_size - phy_sect; vc->d_num[i] = vc->vm[i].num_dirty; vc->fs->reverse_table[i] = vmalloc(sizeof(struct reverse_nodes) * r_table_size); for(j=0; j<r_table_size; j++){ vc->fs->reverse_table[i][j].index = -1; vc->fs->reverse_table[i][j].dirty = 1; vc->fs->reverse_table[i][j].size = -1; } //printk("%u's first ptr is %p, final ptr is %p\n", i, &(vc->fs->reverse_table[i][0]), &(vc->fs->reverse_table[i][j])); } for(i=0; i<vc->vms; i++){ unsigned int minor = atom(vc->vm[i].dev->name); unsigned int major = atoj(vc->vm[i].dev->name); printk("dev name is %s\t", vc->vm[i].dev->name); if(major != 2600) vc->vm[i].main_dev = minor >> minor_shift; else vc->vm[i].main_dev = minor - 1; vc->vm[i].maj_dev = major; printk("main %u, maj %u\n", vc->vm[i].main_dev, vc->vm[i].maj_dev); }