node pointupdate(int low,int high,int point ,int curr,int diff){ if(point<low||point>high){ node c; c.right=0; c.left=0; return c; } if(low==high){ tree[curr].left=0; tree[curr].right=0; if(arr[low]=='('){ tree[curr].left=1; arr[low]=')'; } else { tree[curr].right=1; arr[low]='('; } //printf("curr:%d %d %d",curr,tree[curr].left,tree[curr].right); return tree[curr]; } if(low!=high){ int mid=low+(high-low)/2; if(point>=low&&point<=mid) node a=pointupdate(low,mid,point,2*curr+1,diff); else node b=pointupdate(mid+1,high,point,2*curr+2,diff); tree[curr]=mergenode(tree[2*curr+1],tree[2*curr+2]); return tree[curr]; //printf("curr:%d %d %d",curr,tree[curr].left,tree[curr].right); } }
node constructor(char arr[],int low,int high,int curr){ if(low==high){ tree[curr].left=0; tree[curr].right=0; if(arr[low]=='(')tree[curr].right=1; else tree[curr].left=1; // printf("curr:%d left:%d right:%d\n",curr,tree[curr].left,tree[curr].right); return tree[curr]; } int mid=low+(high-low)/2; tree[curr]=mergenode(constructor(arr,low,mid,2*curr+1),constructor(arr,mid+1,high,2*curr+2)); //printf("curr:%d left:%d right:%d\n",curr,tree[curr].left,tree[curr].right); return tree[curr]; }
MP_GLOBAL void __mp_freealloc(allochead *h, allocnode *n, void *i) { void *p=NULL; size_t l, s=0; /* If we are keeping the details (and possibly the contents) of a specified * number of recently freed memory allocations then we may have to recycle * the oldest freed allocation if the length of the queue would extend past * the user-specified limit. */ if ((i != NULL) && (h->flist.size != 0) && (h->flist.size == h->fmax)) __mp_recyclefreed(h); /* Remove the allocated node from the allocation tree. */ __mp_treeremove(&h->atree, &n->tnode); h->asize -= n->size; if (h->flags & FLG_PAGEALLOC) { p = (void *) __mp_rounddown((unsigned long) n->block, h->heap.memory.page); s = __mp_roundup(n->size + ((char *) n->block - (char *) p), h->heap.memory.page); if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points within the allocated pages. */ if ((l = (char *) n->block - (char *) p) > 0) __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE); if ((l = s - n->size - l) > 0) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l, MA_READWRITE); } } if (i != NULL) { /* We are keeping this node and so place it on the freed tree. * If all allocations are pages then we either prevent the original * contents from being both read or written to, or prevent the * allocation from being written to. If not then we may optionally * preserve its contents, otherwise it will be filled with the free * byte. */ n->info = i; if (h->flags & FLG_PAGEALLOC) if (h->flags & FLG_PRESERVE) { __mp_memprotect(&h->heap.memory, n->block, n->size, MA_READONLY); if (h->flags & FLG_OFLOWWATCH) { /* Replace any watch points within the allocated pages. * We have to do this here because when we change the * memory protection we may trigger a watch point on some * systems. */ if ((l = (char *) n->block - (char *) p) > 0) __mp_memwatch(&h->heap.memory, p, l, MA_NOACCESS); if ((l = s - n->size - l) > 0) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l, MA_NOACCESS); } } else __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS); else if (!(h->flags & FLG_PRESERVE)) __mp_memset(n->block, h->fbyte, n->size); __mp_addtail(&h->flist, &n->fnode); __mp_treeinsert(&h->gtree, &n->tnode, (unsigned long) n->block); h->gsize += n->size; } else { /* We are placing this node on the free tree and so it will become * available for reuse. If all allocations are pages then we prevent * the contents from being read or written to, otherwise the contents * will be filled with the free byte. */ if (h->flags & FLG_PAGEALLOC) { /* Any watch points will have already been removed, and the * surrounding overflow buffers will already be protected with * the MA_NOACCESS flag. */ __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS); n->block = p; n->size = s; } else if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points that were made to monitor the overflow * buffers. */ __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow, MA_READWRITE); __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow, MA_READWRITE); } n->block = (char *) n->block - h->oflow; n->size += h->oflow << 1; n->info = NULL; if (!(h->flags & FLG_PAGEALLOC)) __mp_memset(n->block, h->fbyte, n->size); __mp_treeinsert(&h->ftree, &n->tnode, n->size); h->fsize += n->size; mergenode(h, n); } }
MP_GLOBAL void __mp_recyclefreed(allochead *h) { allocnode *n; void *p; size_t l, s; n = (allocnode *) ((char *) h->flist.head - offsetof(allocnode, fnode)); /* Remove the freed node from the freed list and the freed tree. */ __mp_remove(&h->flist, &n->fnode); __mp_treeremove(&h->gtree, &n->tnode); h->gsize -= n->size; if (h->flags & FLG_PAGEALLOC) { p = (void *) __mp_rounddown((unsigned long) n->block, h->heap.memory.page); s = __mp_roundup(n->size + ((char *) n->block - (char *) p), h->heap.memory.page); if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points within the allocated pages. */ if ((l = (char *) n->block - (char *) p) > 0) __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE); if ((l = s - n->size - l) > 0) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l, MA_READWRITE); } } /* We are placing this node on the free tree and so it will become * available for reuse. If all allocations are pages then we prevent * the contents from being read or written to, otherwise the contents * will be filled with the free byte. */ if (h->flags & FLG_PAGEALLOC) { /* Any watch points will have already been removed, and the * surrounding overflow buffers will already be protected with * the MA_NOACCESS flag. */ __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS); n->block = p; n->size = s; } else if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points that were made to monitor the overflow * buffers. */ __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow, MA_READWRITE); __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow, MA_READWRITE); } n->block = (char *) n->block - h->oflow; n->size += h->oflow << 1; n->info = NULL; if (!(h->flags & FLG_PAGEALLOC)) __mp_memset(n->block, h->fbyte, n->size); __mp_treeinsert(&h->ftree, &n->tnode, n->size); h->fsize += n->size; mergenode(h, n); }
MP_GLOBAL allocnode * __mp_getalloc(allochead *h, size_t l, size_t a, void *i) { allocnode *n, *r, *s; heapnode *p; treenode *t; size_t b, m; b = h->oflow << 1; if (l == 0) l = 1; if (a == 0) a = h->heap.memory.align; else if (a > h->heap.memory.page) a = h->heap.memory.page; else a = __mp_poweroftwo(a); /* If all allocations are not pages then we must add more bytes to the * allocation request to account for alignment. */ if (h->flags & FLG_PAGEALLOC) m = 0; else m = a - 1; /* If we have no suitable space for this allocation then we must allocate * memory via the heap manager. */ if ((t = __mp_searchhigher(h->ftree.root, l + b + m)) == NULL) { if ((n = getnode(h)) == NULL) return NULL; /* If all allocations are pages then we must specify that we want our * heap allocation to be page-aligned. */ if (h->flags & FLG_PAGEALLOC) m = h->heap.memory.page; else m = a; if ((p = __mp_heapalloc(&h->heap, __mp_roundup(l + b, h->heap.memory.page), m, 0)) == NULL) { __mp_freeslot(&h->table, n); return NULL; } /* Initialise the free memory. If all allocations are pages then we * prevent any free memory from being both read from and written to. */ if (h->flags & FLG_PAGEALLOC) __mp_memprotect(&h->heap.memory, p->block, p->size, MA_NOACCESS); else __mp_memset(p->block, h->fbyte, p->size); /* Insert the new memory block into the correct position in the * memory block list. This is vital for merging free nodes. */ if ((t = __mp_searchlower(h->atree.root, (unsigned long) p->block)) || (t = __mp_searchlower(h->gtree.root, (unsigned long) p->block))) r = (allocnode *) ((char *) t - offsetof(allocnode, tnode)); else r = (allocnode *) &h->list; while (((s = (allocnode *) r->lnode.next)->lnode.next != NULL) && (s->block < p->block)) r = s; __mp_insert(&h->list, &r->lnode, &n->lnode); __mp_treeinsert(&h->ftree, &n->tnode, p->size); n->block = p->block; n->size = p->size; n->info = NULL; h->fsize += p->size; /* Merge the memory block with any bordering free nodes. This * is also vital to maintain the property that the memory block * list does not ever contain two bordering free nodes. */ n = mergenode(h, n); } else n = (allocnode *) ((char *) t - offsetof(allocnode, tnode)); /* Split the free node as requested. */ return splitnode(h, n, l, a, i); }