int get_user_page(struct proc *p, unsigned long uvastart, int write, int force, struct page **plist) { pte_t pte; int ret = -1; struct page *pp; spin_lock(&p->pte_lock); pte = pgdir_walk(p->env_pgdir, (void*)uvastart, TRUE); if (!pte_walk_okay(pte)) goto err1; if (!pte_is_present(pte)) { unsigned long prot = PTE_P | PTE_U | PTE_A | PTE_W | PTE_D; #if 0 printk("[akaros]: get_user_page() uva=0x%llx pte absent\n", uvastart); #endif /* * TODO: ok to allocate with pte_lock? "prot" needs to be * based on VMR writability, refer to pgprot_noncached(). */ if (upage_alloc(p, &pp, 0)) goto err1; pte_write(pte, page2pa(pp), prot); } else { pp = pa2page(pte_get_paddr(pte)); /* __vmr_free_pgs() refcnt's pagemap pages differently */ if (atomic_read(&pp->pg_flags) & PG_PAGEMAP) { printk("[akaros]: get_user_page(): uva=0x%llx\n", uvastart); goto err1; } } if (write && (!pte_has_perm_urw(pte))) { /* TODO: How is Linux using the "force" parameter */ printk("[akaros]: get_user_page() uva=0x%llx pte ro\n", uvastart); goto err1; } /* TODO (GUP): change the interface such that devices provide the memory and * the user mmaps it, instead of trying to pin arbitrary user memory. */ warn_once("Extremely unsafe, unpinned memory mapped! If your process dies, you might scribble on RAM!"); plist[0] = pp; ret = 1; err1: spin_unlock(&p->pte_lock); return ret; }
static void perfmon_do_cores_alloc(void *opaque) { struct perfmon_alloc *pa = (struct perfmon_alloc *) opaque; struct perfmon_cpu_context *cctx = PERCPU_VARPTR(counters_env); int i; spin_lock_irqsave(&cctx->lock); if (perfmon_is_fixed_event(&pa->ev)) { uint64_t fxctrl_value = read_msr(MSR_CORE_PERF_FIXED_CTR_CTRL), tmp; i = PMEV_GET_EVENT(pa->ev.event); if (i >= (int) cpu_caps.fix_counters_x_proc) { i = -EINVAL; } else if (fxctrl_value & (FIXCNTR_MASK << i)) { i = -EBUSY; } else { cctx->fixed_counters[i] = pa->ev; PMEV_SET_EN(cctx->fixed_counters[i].event, 1); tmp = perfmon_get_fixevent_mask(&pa->ev, i, fxctrl_value); perfmon_enable_fix_event(i, TRUE); write_msr(MSR_CORE_PERF_FIXED_CTR0 + i, -(int64_t) pa->ev.trigger_count); write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, tmp); } } else { for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) { if (cctx->counters[i].event == 0) { if (!perfmon_event_available(i)) warn_once("Counter %d is free but not available", i); else break; } } if (i < (int) cpu_caps.counters_x_proc) { cctx->counters[i] = pa->ev; PMEV_SET_EN(cctx->counters[i].event, 1); perfmon_enable_event(i, TRUE); write_msr(MSR_IA32_PERFCTR0 + i, -(int64_t) pa->ev.trigger_count); write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i, cctx->counters[i].event); } else { i = -ENOSPC; } } spin_unlock_irqsave(&cctx->lock); pa->cores_counters[core_id()] = (counter_t) i; }
const struct packed_format *packed_format_of(IDL_tree stype) { static bool first = true; if(first) { first = false; strmap_init(&packed_cache); } const char *s_id = sdecl_name(stype); struct packed_format *ret = strmap_get(&packed_cache, s_id); if(ret != NULL) return ret; struct member_item *items = expand_member_list( IDL_TYPE_STRUCT(stype).member_list); int num_items = 0; while(items[num_items].type != NULL) num_items++; qsort(items, num_items, sizeof(struct member_item), &item_by_bitsize_cmp); /* packing of small (sub-word) items */ GList *items_by_size[BITS_PER_WORD - 1]; for(int i=0; i < (BITS_PER_WORD - 1); i++) { items_by_size[i] = NULL; } int num_small = 0; for(int i=0; i<num_items; i++) { struct member_item *item = &items[i]; /* TODO: produce N items for arrays where bits_each < BITS_PER_WORD, so * that smaller items can be packed after e.g. an array member that * leaves 11 bits unused in each word. */ int bits = MEMBER_BITS(item); if(bits >= BITS_PER_WORD) break; items_by_size[bits] = g_list_prepend(items_by_size[bits], item); num_small++; } for(int i=0; i < (BITS_PER_WORD - 1); i++) { items_by_size[i] = g_list_reverse(items_by_size[i]); } GPtrArray *packed = g_ptr_array_new(); int num_words = pack_items(packed, items_by_size, num_small, NULL, 0, 64); if(num_words > 63) { warn_once("structure `%s' can't be bit-packed\n", s_id); return NULL; } assert(num_words < 64); for(int i=0; i < (BITS_PER_WORD - 1); i++) { g_list_free(items_by_size[i]); } #if 0 printf("%s: packed %d/%d small items into %d words from `%s'\n", __func__, (int)packed->len, num_small, num_words, s_id); #endif /* packing of word-length, and longer, items */ for(int i=0; i<num_items; i++) { struct member_item *item = &items[i]; int nbits = MEMBER_BITS(item); if(nbits < BITS_PER_WORD) continue; g_ptr_array_add(packed, new_packed_item(num_words, 0, nbits, item)); int words = (nbits + BITS_PER_WORD - 1) / BITS_PER_WORD; #if 0 printf("%s: packing item `%s' of %d words (%d bits) as-is\n", __func__, item->name, words, nbits); #endif num_words += words; } #if 0 printf("%s: packed %d items into %d words from `%s'\n", __func__, items->len, num_words, s_id); for(int i=0; i<packed->len; i++) { const struct packed_item *pi = packed->pdata[i]; printf("... `%s' -> word %d, bit %d\n", pi->name, pi->word, pi->bit); } #endif assert(packed->len == num_items); g_free(items); items = NULL; ret = g_malloc(sizeof(struct packed_format) + sizeof(struct packed_item *) * packed->len); ret->num_words = num_words; ret->num_items = packed->len; memcpy(ret->items, &g_ptr_array_index(packed, 0), packed->len * sizeof(void *)); g_ptr_array_free(packed, TRUE); ret->num_bits = 0; for(int i=0; i<ret->num_items; i++) { ret->num_bits += ret->items[i]->len; } bool ok = strmap_add(&packed_cache, s_id, ret); assert(ok || errno != EEXIST); return ret; }
/* * get_cpu_cache_details() * @cpu: cpu to fill in. * @cpu_path: Full /sys path to cpu which will be represented by @cpu. * Populate @cpu with details from @cpu_path. * * Returns: EXIT_FAILURE or EXIT_SUCCESS. */ static int get_cpu_cache_details(cpu_t *cpu, const char *cpu_path) { uint32_t i; size_t len = cpu_path ? strlen(cpu_path) : 0; size_t len2 = strlen(SYS_CPU_CACHE_DIR) + 1; glob_t globbuf; char glob_path[len + len2]; char **results; int ret = EXIT_FAILURE; int ret2; (void)memset(glob_path, 0, sizeof(glob_path)); (void)memset(&globbuf, 0, sizeof(globbuf)); if (!cpu) { pr_dbg("%s: invalid cpu parameter\n", __func__); return ret; } if (!cpu_path) { pr_dbg("%s: invalid cpu path parameter\n", __func__); return ret; } (void)strncat(glob_path, cpu_path, len); (void)strncat(glob_path, SYS_CPU_CACHE_DIR, len2); len += len2; ret2 = file_exists(glob_path); if (!ret2) { /* * Not an error since some platforms don't provide cache * details * via /sys (ARM). */ /* if (warn_once(WARN_ONCE_NO_CACHE)) pr_dbg("%s does not exist\n", glob_path); */ return ret; } if (ret2 != S_IFDIR) { if (warn_once(WARN_ONCE_NO_CACHE)) pr_err("file %s is not a directory\n", glob_path); return ret; } (void)strncat(glob_path, GLOB_PATTERN_INDEX_PREFIX, sizeof(glob_path) - len - 1); ret2 = glob(glob_path, SHIM_GLOB_ONLYDIR, NULL, &globbuf); if (ret2 != 0) { if (warn_once(WARN_ONCE_NO_CACHE)) pr_err("glob on regex \"%s\" failed: %d\n", glob_path, ret); return ret; } results = globbuf.gl_pathv; cpu->cache_count = globbuf.gl_pathc; if (!cpu->cache_count) { if (warn_once(WARN_ONCE_NO_CACHE)) pr_err("no CPU caches found\n"); goto err; } cpu->caches = calloc(cpu->cache_count, sizeof(cpu_cache_t)); if (!cpu->caches) { size_t cache_bytes = cpu->cache_count * sizeof(cpu_cache_t); pr_err("failed to allocate %zu bytes for cpu caches\n", cache_bytes); goto err; } for (i = 0; i < cpu->cache_count; i++) { ret2 = add_cpu_cache_detail(&cpu->caches[i], results[i]); if (ret2 != EXIT_SUCCESS) goto err; } ret = EXIT_SUCCESS; err: globfree(&globbuf); /* reset */ glob_path[0] = '\0'; return ret; }
int main( int argc, char ** argv ) { /* getopt_long stores the option index here. */ int option_index = 0; int port = 9999; const char * config_file = NULL; const char * startup_message = ""; int timeout = 60; unsigned width = 512; unsigned height = 64; int no_init = 0; while (1) { const int c = getopt_long( argc, argv, "vp:c:t:W:H:m:n", long_options, &option_index ); if (c == -1) break; switch (c) { case 'v': verbose++; break; case 'n': no_init++; break; case 'c': config_file = optarg; break; case 't': timeout = atoi(optarg); break; case 'W': width = atoi(optarg); break; case 'H': height = atoi(optarg); break; case 'm': startup_message = optarg; break; default: usage(); return -1; } } const int sock = udp_socket(port); if (sock < 0) die("socket port %d failed: %s\n", port, strerror(errno)); const size_t image_size = width * height * 3; const size_t buf_size = (width*height*4)/packets_per_frame + 1; // largest possible UDP packet uint8_t *buf = malloc(buf_size); #if 0 if (sizeof(buf) < image_size + 1) die("%u x %u too large for UDP\n", width, height); #endif fprintf(stderr, "%u x %u, UDP port %u\n", width, height, port); ledscape_config_t * config = &ledscape_matrix_default; if (config_file) { config = ledscape_config(config_file); if (!config) return EXIT_FAILURE; } if (config->type == LEDSCAPE_MATRIX) { config->matrix_config.width = width; config->matrix_config.height = height; } ledscape_t * const leds = ledscape_init(config, no_init); if (!leds) return EXIT_FAILURE; const unsigned report_interval = 10; unsigned last_report = 0; unsigned long delta_sum = 0; unsigned frames = 0; uint32_t * const fb = calloc(width*height,4); ledscape_printf(fb, width, 0xFF0000, "%s", startup_message); ledscape_printf(fb+16*width, width, 0x00FF00, "%dx%d UDP port %d", width, height, port); ledscape_draw(leds, fb); while (1) { int rc = wait_socket(sock, timeout*1000); if (rc < 0) { // something failed memset(fb, 0, width*height*4); ledscape_printf(fb, width, 0xFF0000, "read failed?"); ledscape_draw(leds, fb); exit(EXIT_FAILURE); } if (rc == 0) { // go into timeout mode memset(fb, 0, width*height*4); ledscape_printf(fb, width, 0xFF0000, "timeout"); ledscape_draw(leds, fb); continue; } const ssize_t rlen = recv(sock, buf, buf_size, 0); if (rlen < 0) die("recv failed: %s\n", strerror(errno)); warn_once("received %zu bytes\n", rlen); /* if (buf[0] == 2) { // image type printf("image type: %.*s\n", (int) rlen - 1, &buf[1] ); continue; } if (buf[0] != 1) { // What is it? warn_once("Unknown image type '%c' (%02x)\n", buf[0], buf[0] ); continue; } */ const unsigned frame_part = buf[0]; if (frame_part != 0 && frame_part != 1) { printf("bad type %d\n", frame_part); continue; } if ((size_t) rlen != image_size + 1) { warn_once("WARNING: Received packet %zu bytes, expected %zu\n", rlen, image_size + 1 ); } struct timeval start_tv, stop_tv, delta_tv; gettimeofday(&start_tv, NULL); const unsigned frame_num = 0; // copy the 3-byte values into the 4-byte framebuffer // and turn onto the side for (unsigned x = 0 ; x < width ; x++) // 256 { for (unsigned y = 0 ; y < 32 ; y++) // 64 { uint32_t * out = (void*) &fb[(y+32*frame_part)*width + x]; const uint8_t * const in = &buf[1 + 3*(y*width + x)]; uint32_t r = in[0]; uint32_t g = in[1]; uint32_t b = in[2]; *out = (r << 16) | (g << 8) | (b << 0); } } // only draw after the second frame if (frame_part == 1) ledscape_draw(leds, fb); gettimeofday(&stop_tv, NULL); timersub(&stop_tv, &start_tv, &delta_tv); frames++; delta_sum += delta_tv.tv_usec; if (stop_tv.tv_sec - last_report < report_interval) continue; last_report = stop_tv.tv_sec; const unsigned delta_avg = delta_sum / frames; printf("%6u usec avg, max %.2f fps, actual %.2f fps (over %u frames)\n", delta_avg, report_interval * 1.0e6 / delta_avg, frames * 1.0 / report_interval, frames ); frames = delta_sum = 0; } return 0; }