/* * Read packet header from file */ int pkt_get_hdr(FILE *fp, Packet *pkt) { long val; struct tm t; int ozone, dzone; int cw, swap; int retVal; char xpkt[4]; struct tm *tm; TIMEINFO ti; retVal = OK; GetTimeInfo(&ti); tm = localtime(&ti.time); node_clear(&pkt->from); node_clear(&pkt->to); pkt->time = -1; pkt->baud = 0; pkt->version = 0; pkt->product_l = 0; pkt->product_h = 0; pkt->rev_min = 0; pkt->rev_maj = 0; pkt->passwd[0] = 0; pkt->capword = 0; /* Set zone to default, i.e. use the zone from your FIRST aka * specified in fidogate.conf */ pkt->from.zone = pkt->to.zone = cf_defzone(); /* Orig node */ if((val = pkt_get_int16(fp)) == ERROR) return ERROR; pkt->from.node = val; /* Dest node */ if((val = pkt_get_int16(fp)) == ERROR) return ERROR; pkt->to.node = val; /* Year */ if((val = pkt_get_int16(fp)) == ERROR) return ERROR; if(val == 0 || val < 1900 || val > 2099) #ifdef FIX_BAD_PKT_YEAR ; #else retVal = ERROR; #endif else
void __cpuinit acpi_unmap_pxm_to_node(int node) { int pxm = node_to_pxm_map[node]; pxm_to_node_map[pxm] = NID_INVAL; node_to_pxm_map[node] = PXM_INVAL; node_clear(node, nodes_found_map); }
/* Use the information discovered above to actually set up the nodes. */ int __init acpi_scan_nodes(unsigned long start, unsigned long end) { int i; if (acpi_numa <= 0) return -1; /* First clean up the node list */ for_each_node_mask(i, nodes_parsed) { cutoff_node(i, start, end); if (nodes[i].start == nodes[i].end) node_clear(i, nodes_parsed); }
// used to initialise an invalid node structure. The values currently in the // structure are unknown. We will assign a handle, because the only time we // ever need to initiate a newly created struct is when we have received a // socket, and the void node_init(node_t *node) { assert(node != NULL); node->handle = INVALID_HANDLE; node->filehandle = INVALID_HANDLE; node->storepath = NULL; node->stats = NULL; node->risp = NULL; expbuf_init(&node->in, DEFAULT_BUFFSIZE); expbuf_init(&node->out, DEFAULT_BUFFSIZE); expbuf_init(&node->filebuf, 0); data_init(&node->data); node_clear(node); node->active = false; }
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { struct bootnode *nd, oldnode; unsigned long start, end; int node, pxm; int i; if (srat_disabled()) return; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); return; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) return; if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) return; start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); bad_srat(); return; } i = conflicting_nodes(start, end); if (i == node) { printk(KERN_WARNING "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", pxm, start, end, nodes[i].start, nodes[i].end); } else if (i >= 0) { printk(KERN_ERR "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", pxm, start, end, node_to_pxm(i), nodes[i].start, nodes[i].end); bad_srat(); return; } nd = &nodes[node]; oldnode = *nd; if (!node_test_and_set(node, nodes_parsed)) { nd->start = start; nd->end = end; } else { if (start < nd->start) nd->start = start; if (nd->end < end) nd->end = end; } printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, nd->start, nd->end); e820_register_active_regions(node, nd->start >> PAGE_SHIFT, nd->end >> PAGE_SHIFT); push_node_boundaries(node, nd->start >> PAGE_SHIFT, nd->end >> PAGE_SHIFT); if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && (reserve_hotadd(node, start, end) < 0)) { /* Ignore hotadd region. Undo damage */ printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); *nd = oldnode; if ((nd->start | nd->end) == 0) node_clear(node, nodes_parsed); } }
/* * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr * to max_addr. The return value is the number of nodes allocated. */ static int __init split_nodes_interleave(struct numa_meminfo *ei, struct numa_meminfo *pi, u64 addr, u64 max_addr, int nr_nodes) { nodemask_t physnode_mask = NODE_MASK_NONE; u64 size; int big; int nid = 0; int i, ret; if (nr_nodes <= 0) return -1; if (nr_nodes > MAX_NUMNODES) { pr_info("numa=fake=%d too large, reducing to %d\n", nr_nodes, MAX_NUMNODES); nr_nodes = MAX_NUMNODES; } #ifdef XEN_HETEROMEM_FAKENUMA printk(KERN_ALERT "Trying to emualte nr_nodes %d \n",nr_nodes); #endif /* * Calculate target node size. x86_32 freaks on __udivdi3() so do * the division in ulong number of pages and convert back. */ size = max_addr - addr - mem_hole_size(addr, max_addr); #ifdef XEN_HETEROMEM_FAKENUMA printk(KERN_ALERT "max_addr %lu, " "addr %lu, " "mem_hole_size(addr, max_addr) %lu, " "size %lu, \n", max_addr, addr, mem_hole_size(addr, max_addr), size); #endif size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); #ifdef XEN_HETEROMEM_FAKENUMA printk(KERN_ALERT "size %lu, " "(unsigned long)(size >> PAGE_SHIFT) %lu, " "PFN_PHYS((unsigned long)(size >> PAGE_SHIFT)/nr_nodes) %lu \n ", size, (unsigned long)(size >> PAGE_SHIFT), PFN_PHYS((unsigned long)(size >> PAGE_SHIFT)/nr_nodes)); #endif /* * Calculate the number of big nodes that can be allocated as a result * of consolidating the remainder. */ big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / FAKE_NODE_MIN_SIZE; #ifdef XEN_HETEROMEM_FAKENUMA printk(KERN_ALERT "Trying to emualte big nodes %d, " "Pages %lu, " "FAKE_NODE_MIN_HASH_MASK %u, " "size & ~FAKE_NODE_MIN_HASH_MASK %u, " "FAKE_NODE_MIN_SIZE %u ," "nr_nodes %u \n", big, size, FAKE_NODE_MIN_HASH_MASK, size & ~FAKE_NODE_MIN_HASH_MASK, FAKE_NODE_MIN_SIZE, nr_nodes); #endif size &= FAKE_NODE_MIN_HASH_MASK; if (!size) { pr_err("Not enough memory for each node. " "NUMA emulation disabled.\n"); return -1; } for (i = 0; i < pi->nr_blks; i++) node_set(pi->blk[i].nid, physnode_mask); /* * Continue to fill physical nodes with fake nodes until there is no * memory left on any of them. */ while (nodes_weight(physnode_mask)) { for_each_node_mask(i, physnode_mask) { u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); u64 start, limit, end; int phys_blk; phys_blk = emu_find_memblk_by_nid(i, pi); if (phys_blk < 0) { node_clear(i, physnode_mask); continue; } start = pi->blk[phys_blk].start; limit = pi->blk[phys_blk].end; end = start + size; if (nid < big) end += FAKE_NODE_MIN_SIZE; /* * Continue to add memory to this fake node if its * non-reserved memory is less than the per-node size. */ while (end - start - mem_hole_size(start, end) < size) { end += FAKE_NODE_MIN_SIZE; if (end > limit) { end = limit; break; } } /* * If there won't be at least FAKE_NODE_MIN_SIZE of * non-reserved memory in ZONE_DMA32 for the next node, * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* * If there won't be enough non-reserved memory for the * next node, this one must extend to the end of the * physical node. */ if (limit - end - mem_hole_size(end, limit) < size) end = limit; ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, phys_blk, min(end, limit) - start); if (ret < 0) return ret; } }
// this function is called when we have received a new socket. We need to // create a new node, and add it to our node list. We need to pass to the node // any pointers to other sub-systems that it will need to have, and then we // insert the node into the 'node-circle' somewhere. Finally, we need to add // the new node to the event base. static void node_event_handler(int hid, short flags, void *data) { node_t *node; unsigned int avail; int res; assert(hid >= 0); node = (node_t *) data; assert(node != NULL); assert(node->handle == hid); assert(node->stats != NULL); assert(node->active == true); assert(node->event.ev_base != NULL); if (flags & EV_READ) { assert(node->in.max >= DEFAULT_BUFFSIZE); avail = node->in.max - node->in.length; if (avail < DEFAULT_BUFFSIZE) { // we dont have much space left in the buffer, lets double its size. expbuf_shrink(&node->in, node->in.max * 2); avail = node->in.max - node->in.length; } // for performance reasons, we will read the data in directly into the expanding buffer. assert(avail >= DEFAULT_BUFFSIZE); node->stats->reads++; res = read(hid, node->in.data+node->in.length, avail); if (res > 0) { node->stats->in_bytes += res; node->in.length += res; assert(node->in.length <= node->in.max); // if we pulled out the max we had avail in our buffer, that means we can pull out more at a time. if (res == avail) { expbuf_shrink(&node->in, node->in.max * 2); } assert(node->active); if (node->in.length > 0) { assert(node->risp != NULL); node->stats->cycles ++; res = risp_process(node->risp, node, node->in.length, (unsigned char *) node->in.data); assert(res <= node->in.length); assert(res >= 0); if (res < node->in.length) { node->stats->undone += (node->in.length - res); } if (res > 0) { expbuf_purge(&node->in, res); } } } else if (res == 0) { node->handle = INVALID_HANDLE; if (node->filehandle != INVALID_HANDLE) { close(node->filehandle); node->filehandle = INVALID_HANDLE; } node_clear(node); assert(node->active == false); printf("Node[%d] closed while reading.\n", hid); } else { assert(res == -1); if (errno != EAGAIN && errno != EWOULDBLOCK) { close(node->handle); node->handle = INVALID_HANDLE; if (node->filehandle != INVALID_HANDLE) { close(node->filehandle); node->filehandle = INVALID_HANDLE; } node_clear(node); assert(node->active == false); printf("Node[%d] closed while reading- because of error: %d\n", hid, errno); } } } if (flags & EV_WRITE && node->active) { // we've requested the event, so we should have data to process. assert(node->out.length > 0); assert(node->out.length <= node->out.max); node->stats->writes ++; res = send(hid, node->out.data, node->out.length, 0); if (res > 0) { // we managed to send some, or maybe all.... assert(res <= node->out.length); node->stats->out_bytes += res; expbuf_purge(&node->out, res); // if we are in the process of transmitting a file, then we need // to get more file data and put in the buffer, since we depleted // some of it. if (node->sending) { sendFileData(node); } } else if (res == 0) { node->handle = INVALID_HANDLE; if (node->filehandle != INVALID_HANDLE) { close(node->filehandle); node->filehandle = INVALID_HANDLE; } node_clear(node); assert(node->active == false); printf("Node[%d] closed while writing.\n", hid); } else { assert(res == -1); if (errno != EAGAIN && errno != EWOULDBLOCK) { close(node->handle); node->handle = INVALID_HANDLE; if (node->filehandle != INVALID_HANDLE) { close(node->filehandle); node->filehandle = INVALID_HANDLE; } node_clear(node); assert(node->active == false); printf("Node[%d] closed while writing - because of error: %d\n", hid, errno); } } // if we have sent everything, then we dont need to wait for a WRITE event anymore, so we need to re-establish the events. if (node->active && node->out.length == 0) { if (event_del(&node->event) != -1) { event_set(&node->event, hid, EV_READ | EV_PERSIST, node_event_handler, (void *)node); event_base_set(node->event.ev_base, &node->event); event_add(&node->event, 0); } } } }
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { struct bootnode *nd, oldnode; unsigned long start, end; int node, pxm; int i; if (srat_disabled()) return; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); return; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) return; if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) return; start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); bad_srat(); return; } i = conflicting_memblks(start, end); if (i == node) { printk(KERN_WARNING "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", pxm, start, end, nodes[i].start, nodes[i].end); } else if (i >= 0) { printk(KERN_ERR "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", pxm, start, end, node_to_pxm(i), nodes[i].start, nodes[i].end); bad_srat(); return; } nd = &nodes[node]; oldnode = *nd; if (!node_test_and_set(node, nodes_parsed)) { nd->start = start; nd->end = end; } else { if (start < nd->start) nd->start = start; if (nd->end < end) nd->end = end; } printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, start, end); if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { update_nodes_add(node, start, end); /* restore nodes[node] */ *nd = oldnode; if ((nd->start | nd->end) == 0) node_clear(node, nodes_parsed); } node_memblk_range[num_node_memblks].start = start; node_memblk_range[num_node_memblks].end = end; memblk_nodeid[num_node_memblks] = node; num_node_memblks++; }
void octree_insert(octree_t* tree, point_t* point, int index) { if (tree->root == NULL) // Empty tree { octree_node_t* node = leaf_new(point, index); tree->root = node; ++tree->num_points; } else if (tree->root->type == OCTREE_LEAF_NODE) { point_t center = {.x = 0.5 * (tree->bbox.x1 + tree->bbox.x2), .y = 0.5 * (tree->bbox.y1 + tree->bbox.y2), .z = 0.5 * (tree->bbox.z1 + tree->bbox.z2)}; // The tree consists of a single node. octree_node_t* root = tree->root; // Does the given point already exist here? if (point_distance(&root->leaf_node.point, point) == 0.0) return; // We need to create a branch node here. octree_node_t* node = root; tree->root = branch_new(); int slot = find_slot(¢er, point); tree->root->branch_node.children[slot] = node; } // Now we proceed with the normal logic, given that the root node // is a branch node. ASSERT(tree->root->type == OCTREE_BRANCH_NODE); octree_node_t* node = tree->root; point_t center = {.x = 0.5 * (tree->bbox.x1 + tree->bbox.x2), .y = 0.5 * (tree->bbox.y1 + tree->bbox.y2), .z = 0.5 * (tree->bbox.z1 + tree->bbox.z2)}; real_t lx = tree->bbox.x2 - tree->bbox.x1; real_t ly = tree->bbox.y2 - tree->bbox.y1; real_t lz = tree->bbox.z2 - tree->bbox.z1; int slot = find_slot(¢er, point); static real_t xf[] = {-0.25, -0.25, -0.25, -0.25, +0.25, +0.25, +0.25, +0.25}; static real_t yf[] = {-0.25, -0.25, +0.25, +0.25, -0.25, -0.25, +0.25, +0.25}; static real_t zf[] = {-0.25, +0.25, -0.25, +0.25, -0.25, +0.25, -0.25, +0.25}; while ((node->branch_node.children[slot] != NULL) && (node->branch_node.children[slot]->type == OCTREE_BRANCH_NODE)) { node = node->branch_node.children[slot]; center.x += xf[slot]*lx; lx *= 0.5; center.y += yf[slot]*ly; ly *= 0.5; center.z += zf[slot]*lz; lz *= 0.5; slot = find_slot(¢er, point); } octree_node_t* leaf = node->branch_node.children[slot]; if (leaf == NULL) { // No leaf here, so we create a new one! leaf = leaf_new(point, index); node->branch_node.children[slot] = leaf; ++tree->num_points; } else { // Is the point already in this node? if (point_distance(&leaf->leaf_node.point, point) == 0.0) return; else { // We have to make a new branch. int old_slot, new_slot; do { node->branch_node.children[slot] = branch_new(); node = node->branch_node.children[slot]; center.x += xf[slot]*lx; lx *= 0.5; center.y += yf[slot]*ly; ly *= 0.5; center.z += zf[slot]*lz; lz *= 0.5; new_slot = find_slot(¢er, point); old_slot = find_slot(¢er, &leaf->leaf_node.point); } while (new_slot == old_slot); node->branch_node.children[old_slot] = leaf; octree_node_t* new_leaf = leaf_new(point, index); node->branch_node.children[new_slot] = new_leaf; ++tree->num_points; } } } void octree_delete(octree_t* tree, point_t* point, int index) { // FIXME } int octree_size(octree_t* tree) { return tree->num_points; } static void node_clear(octree_node_t* node) { if (node == NULL) { return; } else if (node->type == OCTREE_LEAF_NODE) { polymec_free(node); } else { ASSERT(node->type == OCTREE_BRANCH_NODE); for (int i = 0; i < 8; ++i) node_clear(node->branch_node.children[i]); } } void octree_clear(octree_t* tree) { node_clear(tree->root); tree->root = NULL; tree->num_points = 0; }