static void place(void *bp, size_t asize) { //printf ("in place \n"); REQUIRES (bp!=NULL); REQUIRES ((size_t)(bp)%8 == 0); size_t csize = GET_SIZE(HDRP(bp)); if ((csize - asize) >= (2*DSIZE)) { //printf("needs to split block\n"); remove_block (bp, csize); PUT(HDRP(bp), PACK(asize, 1)); PUT(FTRP(bp), PACK(asize, 1)); bp = NEXT_BLKP(bp); //printf ("setting bp to next block %p\n", bp); //printf ("ready to add block\n"); PUT(HDRP(bp), PACK(csize-asize, 0)); //printf ("added header\n"); PUT(FTRP(bp), PACK(csize-asize, 0)); //printf ("added footer \n"); add_block(bp,csize-asize); } else { //printf("no need to split block\n"); PUT(HDRP(bp), PACK(csize, 1)); PUT(FTRP(bp), PACK(csize, 1)); remove_block(bp,csize); } //printf ("returning from place \n"); }
static int change_size(ospfs_inode_t *oi, uint32_t new_size) { uint32_t old_size = oi->oi_size; int r = 0; // grow while (ospfs_size2nblocks(oi->oi_size) < ospfs_size2nblocks(new_size)) { r = add_block(oi); if (r < 0) break; } if (r == -EIO) return -EIO; else if (r == -ENOSPC) { // shrink back to old size while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(old_size)) r = remove_block(oi); oi->oi_size = old_size; return -ENOSPC; } while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size)) { r = remove_block(oi); if (r < 0) return r; } // update to exact size oi->oi_size = new_size; return 0; }
static int change_size(ospfs_inode_t *oi, uint32_t new_size) { uint32_t old_size = oi->oi_size; int r = 0; while (ospfs_size2nblocks(oi->oi_size) < ospfs_size2nblocks(new_size)) { /* EXERCISE: Your code here */ r = add_block (oi); if (r == -EIO) return r; else if (r == -ENOSPC) { while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size)) //too big { r = remove_block(oi); } new_size = old_size; return -ENOSPC; } } while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size)) { /* EXERCISE: Your code here */ r = remove_block(oi); if (r<0) return r; } /* EXERCISE: Make sure you update necessary file meta data and return the proper value. */ oi->oi_size = new_size; return 0; }
static int change_size(ospfs_inode_t *oi, uint32_t new_size) { uint32_t old_size = oi->oi_size; int r = 0; while (ospfs_size2nblocks(oi->oi_size) < ospfs_size2nblocks(new_size)) { /* EXERCISE: Your code here */ r = add_block(oi); if(r < 0){ oi->oi_size = old_size; return r; } if(r < 0) break; } while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size)) { /* EXERCISE: Your code here */ r = remove_block(oi); if (r < 0) { oi->oi_size = old_size; return r; } } /* EXERCISE: Make sure you update necessary file meta data and return the proper value. */ oi->oi_size = new_size; return 0; }
void *ws_realloc_i(void *ptr, size_t size, const char *file, int line) { WsMemBlockHdr *b = ((WsMemBlockHdr *) ptr) - 1; void *n; if (ptr == NULL) return ws_malloc_i(size, file, line); if (b->size >= size) /* We can use the old block. */ return ptr; /* Allocate a bigger block. */ n = ws_malloc_i(size, file, line); if (n == NULL) return NULL; memcpy(n, ptr, b->size); /* Free old block. */ remove_block(b); free(b); return n; }
void run(t_params const* params) { int fd; pid_t master; pid_t ptyfk; struct termios term; fd = open(params->file, O_WRONLY | O_CREAT | (params->append == 1 ? O_APPEND : O_TRUNC), 0644); if (fd == -1) xperror(); ptyfk = my_forkpty(&master, NULL); if (ptyfk == -1) xperror(); if (ptyfk == 0) { dup2_fd(master); execl(params->shell, params->shell, NULL); exit(0); } update_win_size(master); signal(SIGWINCH, handle_resize); remove_block(master, 0); init_term(&term); printf("Script started, file is %s\n", params->file); read_input(ptyfk, master, fd); restore_term(&term); close(fd); printf("Script done, file is %s\n", params->file); }
void isba_destroy(isb_allocator_t buf) { do {remove_block(buf);} while (buf->num_block > 0); RTfree(buf->blocks); RTfree(buf); }
static void *coalesce(void *bp) { size_t prev_alloc = GET_ALLOC(FTRP(PREV_BLKP(bp))) || (PREV_BLKP(bp) == bp); size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(bp))); size_t size = GET_SIZE(HDRP(bp)); if (prev_alloc && next_alloc) { // Do nothing } else if (prev_alloc && !next_alloc) { size += GET_SIZE(HDRP(NEXT_BLKP(bp))); remove_block(NEXT_BLKP(bp),GET_SIZE(HDRP(NEXT_BLKP(bp)))); PUT(HDRP(bp), PACK(size, 0)); PUT(FTRP(bp), PACK(size,0)); } else if (!prev_alloc && next_alloc) { size += GET_SIZE(HDRP(PREV_BLKP(bp))); bp = PREV_BLKP(bp); remove_block(bp,GET_SIZE(HDRP(bp))); PUT(HDRP(bp), PACK(size, 0)); PUT(FTRP(bp), PACK(size, 0)); } else { size += GET_SIZE(HDRP(PREV_BLKP(bp))) + GET_SIZE(HDRP(NEXT_BLKP(bp))); void *pbp = PREV_BLKP(bp); remove_block(pbp, GET_SIZE(HDRP(pbp))); void *nbp = NEXT_BLKP(bp); remove_block(nbp, GET_SIZE(HDRP(nbp))); bp = PREV_BLKP(bp); PUT(HDRP(bp), PACK(size, 0)); PUT(FTRP(bp), PACK(size, 0)); } insert_free_list(bp,size); return bp; }
void ws_free_i(void *ptr) { WsMemBlockHdr *b = ((WsMemBlockHdr *) ptr) - 1; if (ptr == NULL) return; remove_block(b); free(b); }
static int change_size(ospfs_inode_t *oi, uint32_t new_size) { uint32_t old_size = oi->oi_size; int r = 0; /* EXERCISE: Your code here */ if (ospfs_size2nblocks(old_size) < ospfs_size2nblocks(new_size)) { while (ospfs_size2nblocks(oi->oi_size) < ospfs_size2nblocks(new_size)) { if ((r = add_block(oi))) { break; } oi->oi_size = (ospfs_size2nblocks(oi->oi_size) + 1) * OSPFS_BLKSIZE; } if (ospfs_size2nblocks(oi->oi_size) == ospfs_size2nblocks(new_size)) { oi->oi_size = new_size; goto out; } while (ospfs_size2nblocks(old_size) != ospfs_size2nblocks(oi->oi_size)) { remove_block(oi); oi->oi_size = (ospfs_size2nblocks(oi->oi_size) - 1) * OSPFS_BLKSIZE; } oi->oi_size = old_size; } else if (ospfs_size2nblocks(old_size) > ospfs_size2nblocks(new_size)) { while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size)) { /* EXERCISE: Your code here */ remove_block(oi); oi->oi_size = (ospfs_size2nblocks(oi->oi_size) - 1) * OSPFS_BLKSIZE; } oi->oi_size = new_size; } else { oi->oi_size = new_size; } out: return r; }
/* \brief virtually pops the top from the stack. it is not physically popped of, and a reference is provided to the top element of the stack this element is ensured to live until the next pop call and may live longer */ int * isba_pop_int(isb_allocator_t buf) { if (buf->cur_index == 0) { if (buf->num_block == 1) return NULL; remove_block(buf); buf->cur_index = BLOCK_ELT_SIZE; } buf->cur_index--; return &buf->blocks[buf->num_block-1][buf->cur_index*buf->el_size]; }
void BlockchainDB::pop_block(block& blk, std::vector<transaction>& txs) { blk = get_top_block(); remove_block(); for (const auto& h : boost::adaptors::reverse(blk.tx_hashes)) { txs.push_back(get_tx(h)); remove_transaction(h); } remove_transaction(get_transaction_hash(blk.miner_tx)); }
/** \brief pops and discards a number of elements on the stack */ void isba_discard_int(isb_allocator_t buf, size_t amount) { if (amount > isba_size_int ( buf ) ) Warning(info, "too high discard: %zu > %zu", amount, isba_size_int( buf ) ); if (buf->cur_index < amount) { size_t blocks = amount>>BLOCK_ELT_POW; if (buf->num_block == 1 || buf->num_block <= blocks) Abort("Discard %zu on buffer of size %zu elements", amount, isba_size_int(buf)); size_t x; for (x = 0; x <= blocks; x++) remove_block(buf); buf->cur_index = BLOCK_ELT_SIZE-(amount&(BLOCK_ELT_SIZE-1))+buf->cur_index; } else {
static void dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr) { struct dcache_block *db = dcache_hit (dcache, addr); if (db) { splay_tree_remove (dcache->tree, (splay_tree_key) db->addr); remove_block (&dcache->oldest, db); append_block (&dcache->freelist, db); --dcache->size; } }
static int change_size(ospfs_inode_t *oi, uint32_t new_size) { uint32_t old_size = oi->oi_size; int r = 0; /* EXERCISE: Your code here */ //Grow file //eprintk("size:%d, size2: %d\n", oi->oi_size, new_size); if(ospfs_size2nblocks(oi->oi_size) < ospfs_size2nblocks(new_size)){ while (ospfs_size2nblocks(oi->oi_size) < ospfs_size2nblocks(new_size) && r >= 0){ r = add_block(oi); } //Shrink file back to original if there is an error if(r < 0){ //Only shrink till old_size + the blocksize - 1, avoids the extra time when add //rounds up while(ospfs_size2nblocks(oi->oi_size) > old_size + OSPFS_BLKSIZE - 1){ remove_block(oi); } oi->oi_size = old_size; return r; } //Subtract excess size if add rounded up oi->oi_size -= OSPFS_BLKSIZE - old_size % OSPFS_BLKSIZE; //Shrink file }else if(ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size)){ while (ospfs_size2nblocks(oi->oi_size) > ospfs_size2nblocks(new_size) && r >= 0) { r = remove_block(oi); } //If error return error and exit if(r < 0) return r; } oi->oi_size = new_size; return 0; }
/* * coalesce - boundary tag coalescing. Return ptr to coalesced block */ static void *coalesce(void *bp) { size_t prev_alloc = GET_ALLOC(FTRP(PREV_BLKP(bp))); size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(bp))); size_t size = GET_SIZE(HDRP(bp)); if (prev_alloc && next_alloc) { // Case 1 //insert_block(bp); return bp; } else if (prev_alloc && !next_alloc) { // Case 2 // ef next block er free tökum við hana úr free lista remove_block(NEXT_BLKP(bp)); size += GET_SIZE(HDRP(NEXT_BLKP(bp))); PUT(HDRP(bp), PACK(size, 0)); PUT(FTRP(bp), PACK(size,0)); } else if (!prev_alloc && next_alloc) { // Case 3 // ef prev blokk er frà tökum við hana úr free lista remove_block(PREV_BLKP(bp)); size += GET_SIZE(HDRP(PREV_BLKP(bp))); PUT(FTRP(bp), PACK(size, 0)); PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0)); bp = PREV_BLKP(bp); } else { // Case 4 // ef next og prev blokkir frÃar tökum við þær úr free lista remove_block(PREV_BLKP(bp)); remove_block(NEXT_BLKP(bp)); size += GET_SIZE(HDRP(PREV_BLKP(bp))) + GET_SIZE(FTRP(NEXT_BLKP(bp))); PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0)); PUT(FTRP(NEXT_BLKP(bp)), PACK(size, 0)); bp = PREV_BLKP(bp); } return bp; }
/* * alloc - Allocates block of req_size bytes at start of free block * and split if free block is larger */ static void alloc(void *free_block, size_t req_size) { void *next_bp; size_t csize = GET_SIZE(HDRP(free_block)); //Split the free block into allocated and free. if ((csize - req_size) >= HEADER_SIZE) { PUT(HDRP(free_block), PACK(req_size, 1)); //Allocating the block PUT(FTRP(free_block), PACK(req_size, 1)); remove_block(free_block,csize); next_bp = NEXT_BLKP(free_block); PUT(HDRP(next_bp), PACK(csize-req_size, 0));//Resetting the size of the free block PUT(FTRP(next_bp), PACK(csize-req_size, 0)); coalesce(next_bp); //Coalesce of the newly resized free block } else { PUT(HDRP(free_block), PACK(csize, 1)); PUT(FTRP(free_block), PACK(csize, 1)); remove_block(free_block,csize); } }
static struct dcache_block * dcache_alloc (DCACHE *dcache, CORE_ADDR addr) { struct dcache_block *db; if (dcache->size >= dcache_size) { /* Evict the least recently allocated line. */ db = dcache->oldest; remove_block (&dcache->oldest, db); splay_tree_remove (dcache->tree, (splay_tree_key) db->addr); } else { db = dcache->freelist; if (db) remove_block (&dcache->freelist, db); else db = xmalloc (offsetof (struct dcache_block, data) + dcache->line_size); dcache->size++; } db->addr = MASK (dcache, addr); db->refs = 0; /* Put DB at the end of the list, it's the newest. */ append_block (&dcache->oldest, db); splay_tree_insert (dcache->tree, (splay_tree_key) db->addr, (splay_tree_value) db); return db; }
/* * find_fit - Find a fit for a block with asize bytes */ static void *find_fit(size_t asize) { // first fit search void *bp = heap_listp; while(bp != 0){ if (!GET_ALLOC(HDRP(bp)) && (asize <= GET_SIZE(HDRP(bp)))) { // tökum blokkina sem fannst útúr free listanum remove_block(bp); // remove-um frekar à place fallinu return bp; } bp = (void*)GET(SUCC(bp)); } return NULL; // no fit }
int remove_folder(int number) { int result = -1; inode_t *folder = (inode_t *)get_block(number); if (folder != NULL) { int *start = (int *)folder->content; int *end = (int *)((void *)folder + size_of_block); while (start < end) { if (*start > 0) { remove_block(*start); } start++; } destroy_block(folder); result = set_block_status(number, BLOCK_STATUS_FREE); } return result; }
void hippo_stack_manager_free(HippoStackManager *manager) { stop_notification_timeout(manager); g_signal_handlers_disconnect_by_func(manager->model, G_CALLBACK(on_ready), manager); manager_set_stack(manager, NULL); while (manager->blocks != NULL) { remove_block(manager->blocks->data, manager); } hippo_window_set_visible(manager->notification_window, FALSE); g_object_unref(manager->notification_window); manager->notification_window = NULL; manager->notification_base_item = NULL; manager->notification_item = NULL; hippo_window_set_visible(manager->browser_window, FALSE); g_object_unref(manager->browser_window); manager->browser_window = NULL; manager->browser_base_item = NULL; manager->browser_scroll_item = NULL; manager->browser_item = NULL; manager->browser_resize_grip = NULL; g_object_unref(manager->actions); manager->actions = NULL; g_object_unref(manager->platform); manager->platform = NULL; g_object_unref(manager->model); manager->model = NULL; g_free(manager); }
static void on_item_removed(DDMFeed *stack, DDMDataResource *item, StackManager *manager) { HippoBlock *block; g_debug("Block removed, resource_id=%s", ddm_data_resource_get_resource_id(item)); block = g_hash_table_lookup(manager->item_to_block, item); if (block == NULL) { g_warning("Block removed that we don't know about"); return; } g_signal_handlers_disconnect_by_func(G_OBJECT(block), G_CALLBACK(on_block_sort_changed), manager); remove_block(block, manager); g_hash_table_remove(manager->item_to_block, item); }
bool BNode::remove_digest(const char* digest, LRU* cache){ if( size_b + size_c == 0 ) return false; bool flag = false; if( leaf ){ int pos_b = get_block_pos_s( digest ); Block* current_b = blocks[ pos_b ]; cache->add( current_b ); flag = current_b->remove(digest); if( current_b->is_empty()) remove_block( pos_b, cache ); else if( current_b->is_underloaded()){ if( pos_b < size_b-1 && !blocks[pos_b+1]->is_half() ){ //ie blocks[pos_b+1].size >= d+1 Block* sibling = blocks[pos_b+1]; cache->add( sibling ); const char* tmp = sibling->pick_smallest(); cache->add( current_b ); current_b->add( tmp ); cache->add( sibling ); sibling->remove( tmp ); //no rebalance needed because sibling.size >= d+1 }else if( pos_b > 0 && !blocks[pos_b-1]->is_half() ){ //ie blocks[pos_b-1].size >= d+1 Block* sibling = blocks[pos_b-1]; cache->add( sibling ); const char* tmp = sibling->pick_greatest(); cache->add( current_b ); current_b->add( tmp ); cache->add( sibling ); sibling->remove( tmp ); //no rebalance needed because sibling.size >= d+1 }else{//merge if( pos_b < size_b-1 ){ //right fusion cache->add( blocks[pos_b+1] ); current_b->merge( blocks[pos_b+1] ); remove_block( pos_b+1, cache ); }else if( pos_b > 0){ //left fusion pos_b>0 because d>=2 cache->add( blocks[pos_b-1] ); blocks[pos_b-1]->merge( current_b ); remove_block( pos_b, cache ); } //when we have only one block on the whole tree, do nothing } } if( memcmp( id, digest, DIGEST_LENGTH) ==0 && size_b>0) memcpy( id, (blocks[size_b-1])->get_id(), DIGEST_LENGTH); }else{ int pos_c = get_child_pos_s( digest ); BNode* current_c = children[ pos_c ]; bool flag = current_c->remove_digest(digest, cache); rebalance( current_c, pos_c ); if( memcmp( id, digest, DIGEST_LENGTH) ==0 ) memcpy( id, (children[size_c-1])->get_id(), DIGEST_LENGTH); //size_c>=1 because d>1 } return flag; }
int QTextDocumentPrivate::undoRedo(bool undo) { PMDEBUG("%s, undoState=%d, undoStack size=%d", undo ? "undo:" : "redo:", undoState, undoStack.size()); if (!undoEnabled || (undo && undoState == 0) || (!undo && undoState == undoStack.size())) return -1; undoEnabled = false; beginEditBlock(); while (1) { if (undo) --undoState; QTextUndoCommand &c = undoStack[undoState]; int resetBlockRevision = c.pos; switch(c.command) { case QTextUndoCommand::Inserted: remove(c.pos, c.length, (QTextUndoCommand::Operation)c.operation); PMDEBUG(" erase: from %d, length %d", c.pos, c.length); c.command = QTextUndoCommand::Removed; break; case QTextUndoCommand::Removed: PMDEBUG(" insert: format %d (from %d, length %d, strpos=%d)", c.format, c.pos, c.length, c.strPos); insert_string(c.pos, c.strPos, c.length, c.format, (QTextUndoCommand::Operation)c.operation); c.command = QTextUndoCommand::Inserted; break; case QTextUndoCommand::BlockInserted: case QTextUndoCommand::BlockAdded: remove_block(c.pos, &c.blockFormat, c.command, (QTextUndoCommand::Operation)c.operation); PMDEBUG(" blockremove: from %d", c.pos); if (c.command == QTextUndoCommand::BlockInserted) c.command = QTextUndoCommand::BlockRemoved; else c.command = QTextUndoCommand::BlockDeleted; break; case QTextUndoCommand::BlockRemoved: case QTextUndoCommand::BlockDeleted: PMDEBUG(" blockinsert: charformat %d blockformat %d (pos %d, strpos=%d)", c.format, c.blockFormat, c.pos, c.strPos); insert_block(c.pos, c.strPos, c.format, c.blockFormat, (QTextUndoCommand::Operation)c.operation, c.command); resetBlockRevision += 1; if (c.command == QTextUndoCommand::BlockRemoved) c.command = QTextUndoCommand::BlockInserted; else c.command = QTextUndoCommand::BlockAdded; break; case QTextUndoCommand::CharFormatChanged: { resetBlockRevision = -1; // ## TODO PMDEBUG(" charFormat: format %d (from %d, length %d)", c.format, c.pos, c.length); FragmentIterator it = find(c.pos); Q_ASSERT(!it.atEnd()); int oldFormat = it.value()->format; setCharFormat(c.pos, c.length, formats.charFormat(c.format)); c.format = oldFormat; break; } case QTextUndoCommand::BlockFormatChanged: { resetBlockRevision = -1; // ## TODO PMDEBUG(" blockformat: format %d pos %d", c.format, c.pos); QTextBlock it = blocksFind(c.pos); Q_ASSERT(it.isValid()); int oldFormat = block(it)->format; block(it)->format = c.format; QTextBlockGroup *oldGroup = qobject_cast<QTextBlockGroup *>(objectForFormat(formats.blockFormat(oldFormat))); QTextBlockGroup *group = qobject_cast<QTextBlockGroup *>(objectForFormat(formats.blockFormat(c.format))); c.format = oldFormat; if (group != oldGroup) { if (oldGroup) oldGroup->blockRemoved(it); if (group) group->blockInserted(it); } else if (group) { group->blockFormatChanged(it); } documentChange(it.position(), it.length()); break; } case QTextUndoCommand::GroupFormatChange: { resetBlockRevision = -1; // ## TODO PMDEBUG(" group format change"); QTextObject *object = objectForIndex(c.objectIndex); int oldFormat = formats.objectFormatIndex(c.objectIndex); changeObjectFormat(object, c.format); c.format = oldFormat; break; } case QTextUndoCommand::Custom: resetBlockRevision = -1; // ## TODO if (undo) c.custom->undo(); else c.custom->redo(); break; default: Q_ASSERT(false); } if (resetBlockRevision >= 0) { int b = blocks.findNode(resetBlockRevision); QTextBlockData *B = blocks.fragment(b); B->revision = c.revision; } if (undo) { if (undoState == 0 || !undoStack[undoState-1].block) break; } else { ++undoState; if (undoState == undoStack.size() || !undoStack[undoState-1].block) break; } } undoEnabled = true; int editPos = -1; if (docChangeFrom >= 0) { editPos = qMin(docChangeFrom + docChangeLength, length() - 1); } endEditBlock(); emitUndoAvailable(isUndoAvailable()); emitRedoAvailable(isRedoAvailable()); return editPos; }
void QTextDocumentPrivate::move(int pos, int to, int length, QTextUndoCommand::Operation op) { Q_ASSERT(to <= fragments.length() && to <= pos); Q_ASSERT(pos >= 0 && pos+length <= fragments.length()); Q_ASSERT(blocks.length() == fragments.length()); if (pos == to) return; const bool needsInsert = to != -1; #if !defined(QT_NO_DEBUG) const bool startAndEndInSameFrame = (frameAt(pos) == frameAt(pos + length - 1)); const bool endIsEndOfChildFrame = (isAncestorFrame(frameAt(pos), frameAt(pos + length - 1)) && text.at(find(pos + length - 1)->stringPosition) == QTextEndOfFrame); const bool startIsStartOfFrameAndEndIsEndOfFrameWithCommonParent = (text.at(find(pos)->stringPosition) == QTextBeginningOfFrame && text.at(find(pos + length - 1)->stringPosition) == QTextEndOfFrame && frameAt(pos)->parentFrame() == frameAt(pos + length - 1)->parentFrame()); const bool isFirstTableCell = (qobject_cast<QTextTable *>(frameAt(pos + length - 1)) && frameAt(pos + length - 1)->parentFrame() == frameAt(pos)); Q_ASSERT(startAndEndInSameFrame || endIsEndOfChildFrame || startIsStartOfFrameAndEndIsEndOfFrameWithCommonParent || isFirstTableCell); #endif beginEditBlock(); split(pos); split(pos+length); uint dst = needsInsert ? fragments.findNode(to) : 0; uint dstKey = needsInsert ? fragments.position(dst) : 0; uint x = fragments.findNode(pos); uint end = fragments.findNode(pos+length); uint w = 0; while (x != end) { uint n = fragments.next(x); uint key = fragments.position(x); uint b = blocks.findNode(key+1); QTextBlockData *B = blocks.fragment(b); int blockRevision = B->revision; QTextFragmentData *X = fragments.fragment(x); QTextUndoCommand c = { QTextUndoCommand::Removed, true, op, X->format, X->stringPosition, key, { X->size }, blockRevision }; QTextUndoCommand cInsert = { QTextUndoCommand::Inserted, true, op, X->format, X->stringPosition, dstKey, { X->size }, blockRevision }; if (key+1 != blocks.position(b)) { // qDebug("remove_string from %d length %d", key, X->size); Q_ASSERT(noBlockInString(text.mid(X->stringPosition, X->size))); w = remove_string(key, X->size, op); if (needsInsert) { insert_string(dstKey, X->stringPosition, X->size, X->format, op); dstKey += X->size; } } else { // qDebug("remove_block at %d", key); Q_ASSERT(X->size == 1 && isValidBlockSeparator(text.at(X->stringPosition))); b = blocks.previous(b); B = 0; c.command = blocks.size(b) == 1 ? QTextUndoCommand::BlockDeleted : QTextUndoCommand::BlockRemoved; w = remove_block(key, &c.blockFormat, QTextUndoCommand::BlockAdded, op); if (needsInsert) { insert_block(dstKey++, X->stringPosition, X->format, c.blockFormat, op, QTextUndoCommand::BlockRemoved); cInsert.command = blocks.size(b) == 1 ? QTextUndoCommand::BlockAdded : QTextUndoCommand::BlockInserted; cInsert.blockFormat = c.blockFormat; } } appendUndoItem(c); if (B) B->revision = undoState; x = n; if (needsInsert) appendUndoItem(cInsert); } if (w) unite(w); Q_ASSERT(blocks.length() == fragments.length()); endEditBlock(); }
static void *coalesce(void *bp) { //printf ("in coalesce\n"); REQUIRES (bp!=NULL); REQUIRES ((size_t)(bp)%8 == 0); //mm_checkheap(1); size_t prev_alloc = GET_ALLOC(FTRP(PREV_BLKP(bp))); size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(bp))); size_t size = GET_SIZE(HDRP(bp)); //printf("coalescing"); //Case 1 - both allocated /*if (size < 16) { add_block(bp,size); return bp; }*/ if (prev_alloc && next_alloc) { // printf ("case 1 \n ") ; add_block(bp, size); } //Case 3 - prev allocated but next block is free else if (prev_alloc && !next_alloc) { // printf ("case 3 \n"); remove_block( NEXT_BLKP(bp),GET_SIZE(HDRP(NEXT_BLKP(bp))) ); size += GET_SIZE(HDRP(NEXT_BLKP(bp))); PUT(HDRP(bp), PACK(size, 0)); PUT(FTRP(bp), PACK(size,0)); add_block(bp, size); } //Case 2 - prev is free but next is allocated else if (!prev_alloc && next_alloc) { remove_block( PREV_BLKP(bp), GET_SIZE(HDRP(PREV_BLKP(bp))) ); size += GET_SIZE(HDRP(PREV_BLKP(bp))); PUT(FTRP(bp), PACK(size, 0)); PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0)); bp = PREV_BLKP(bp); add_block(bp, size); } // Case 4 - both prev and next are free else { remove_block( PREV_BLKP(bp),GET_SIZE(HDRP(PREV_BLKP(bp))) ); remove_block( NEXT_BLKP(bp),GET_SIZE(FTRP(NEXT_BLKP(bp))) ); size += GET_SIZE(HDRP(PREV_BLKP(bp))) + GET_SIZE(FTRP(NEXT_BLKP(bp))); PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0)); PUT(FTRP(NEXT_BLKP(bp)), PACK(size, 0)); bp = PREV_BLKP(bp); add_block(bp, size); } ENSURES ( (size_t)(bp)%8 == 0); //printf ("returning from coalesce\n"); return bp; }
/* * Free block identified with <page, offset> */ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) { void *page_start; struct block_header *block, *tmpblock; offset -= XV_ALIGN; spin_lock(&pool->lock); page_start = get_ptr_atomic(page, 0); block = (struct block_header *)((char *)page_start + offset); /* Catch double free bugs */ BUG_ON(test_flag(block, BLOCK_FREE)); block->size = ALIGN(block->size, XV_ALIGN); tmpblock = BLOCK_NEXT(block); if (offset + block->size + XV_ALIGN == PAGE_SIZE) tmpblock = NULL; /* Merge next block if its free */ if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) { /* * Blocks smaller than XV_MIN_ALLOC_SIZE * are not inserted in any free list. */ if (tmpblock->size >= XV_MIN_ALLOC_SIZE) { remove_block(pool, page, offset + block->size + XV_ALIGN, tmpblock, get_index_for_insert(tmpblock->size)); } block->size += tmpblock->size + XV_ALIGN; } /* Merge previous block if its free */ if (test_flag(block, PREV_FREE)) { tmpblock = (struct block_header *)((char *)(page_start) + get_blockprev(block)); offset = offset - tmpblock->size - XV_ALIGN; if (tmpblock->size >= XV_MIN_ALLOC_SIZE) remove_block(pool, page, offset, tmpblock, get_index_for_insert(tmpblock->size)); tmpblock->size += block->size + XV_ALIGN; block = tmpblock; } /* No used objects in this page. Free it. */ if (block->size == PAGE_SIZE - XV_ALIGN) { put_ptr_atomic(page_start); spin_unlock(&pool->lock); __free_page(page); stat_dec(&pool->total_pages); return; } set_flag(block, BLOCK_FREE); if (block->size >= XV_MIN_ALLOC_SIZE) insert_block(pool, page, offset, block); if (offset + block->size + XV_ALIGN != PAGE_SIZE) { tmpblock = BLOCK_NEXT(block); set_flag(tmpblock, PREV_FREE); set_blockprev(tmpblock, offset); } put_ptr_atomic(page_start); spin_unlock(&pool->lock); }
/** * xv_malloc - Allocate block of given size from pool. * @pool: pool to allocate from * @size: size of block to allocate * @page: page no. that holds the object * @offset: location of object within page * * On success, <page, offset> identifies block allocated * and 0 is returned. On failure, <page, offset> is set to * 0 and -ENOMEM is returned. * * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail. */ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, u32 *offset, gfp_t flags) { int error; u32 index, tmpsize, origsize, tmpoffset; struct block_header *block, *tmpblock; *page = NULL; *offset = 0; origsize = size; if (unlikely(!size || size > XV_MAX_ALLOC_SIZE)) return -ENOMEM; size = ALIGN(size, XV_ALIGN); spin_lock(&pool->lock); index = find_block(pool, size, page, offset); if (!*page) { spin_unlock(&pool->lock); if (flags & GFP_NOWAIT) return -ENOMEM; error = grow_pool(pool, flags); if (unlikely(error)) return error; spin_lock(&pool->lock); index = find_block(pool, size, page, offset); } if (!*page) { spin_unlock(&pool->lock); return -ENOMEM; } block = get_ptr_atomic(*page, *offset); remove_block(pool, *page, *offset, block, index); /* Split the block if required */ tmpoffset = *offset + size + XV_ALIGN; tmpsize = block->size - size; tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN); if (tmpsize) { tmpblock->size = tmpsize - XV_ALIGN; set_flag(tmpblock, BLOCK_FREE); clear_flag(tmpblock, PREV_FREE); set_blockprev(tmpblock, *offset); if (tmpblock->size >= XV_MIN_ALLOC_SIZE) insert_block(pool, *page, tmpoffset, tmpblock); if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) { tmpblock = BLOCK_NEXT(tmpblock); set_blockprev(tmpblock, tmpoffset); } } else { /* This block is exact fit */ if (tmpoffset != PAGE_SIZE) clear_flag(tmpblock, PREV_FREE); } block->size = origsize; clear_flag(block, BLOCK_FREE); put_ptr_atomic(block); spin_unlock(&pool->lock); *offset += XV_ALIGN; return 0; }
/* * create_dynamic_block - create new block(s) to be used for a new * job allocation. * RET - a list of created block(s) or NULL on failure errno is set. */ extern List create_dynamic_block(List block_list, select_ba_request_t *request, List my_block_list, bool track_down_nodes) { int rc = SLURM_SUCCESS; ListIterator itr, itr2; bg_record_t *bg_record = NULL, *found_record = NULL; List results = NULL; List new_blocks = NULL; bitstr_t *my_bitmap = NULL; select_ba_request_t blockreq; int cnodes = request->procs / bg_conf->cpu_ratio; uint16_t start_geo[SYSTEM_DIMENSIONS]; if (cnodes < bg_conf->smallest_block) { error("Can't create this size %d " "on this system ionodes_per_mp is %d", request->procs, bg_conf->ionodes_per_mp); goto finished; } memset(&blockreq, 0, sizeof(select_ba_request_t)); memcpy(start_geo, request->geometry, sizeof(start_geo)); /* We need to lock this just incase a blocks_overlap is called which will in turn reset and set the system as it sees fit. */ slurm_mutex_lock(&block_state_mutex); if (my_block_list) { reset_ba_system(track_down_nodes); itr = list_iterator_create(my_block_list); while ((bg_record = list_next(itr))) { if (bg_record->magic != BLOCK_MAGIC) { /* This should never happen since we only call this on copies of blocks and we check on this during the copy. */ error("create_dynamic_block: " "got a block with bad magic?"); continue; } if (bg_record->free_cnt) { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) { int dim; char start_geo[SYSTEM_DIMENSIONS+1]; char geo[SYSTEM_DIMENSIONS+1]; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) { start_geo[dim] = alpha_num[ bg_record->start[dim]]; geo[dim] = alpha_num[ bg_record->geo[dim]]; } start_geo[dim] = '\0'; geo[dim] = '\0'; info("not adding %s(%s) %s %s %s %u " "(free_cnt)", bg_record->bg_block_id, bg_record->mp_str, bg_block_state_string( bg_record->state), start_geo, geo, bg_record->cnode_cnt); } continue; } if (!my_bitmap) { my_bitmap = bit_alloc(bit_size(bg_record->bitmap)); } if (!bit_super_set(bg_record->bitmap, my_bitmap)) { bit_or(my_bitmap, bg_record->bitmap); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) { int dim; char start_geo[SYSTEM_DIMENSIONS+1]; char geo[SYSTEM_DIMENSIONS+1]; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) { start_geo[dim] = alpha_num[ bg_record->start[dim]]; geo[dim] = alpha_num[ bg_record->geo[dim]]; } start_geo[dim] = '\0'; geo[dim] = '\0'; info("adding %s(%s) %s %s %s %u", bg_record->bg_block_id, bg_record->mp_str, bg_block_state_string( bg_record->state), start_geo, geo, bg_record->cnode_cnt); } if (check_and_set_mp_list( bg_record->ba_mp_list) == SLURM_ERROR) { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("something happened in " "the load of %s", bg_record->bg_block_id); list_iterator_destroy(itr); FREE_NULL_BITMAP(my_bitmap); rc = SLURM_ERROR; goto finished; } } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) { int dim; char start_geo[SYSTEM_DIMENSIONS+1]; char geo[SYSTEM_DIMENSIONS+1]; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) { start_geo[dim] = alpha_num[ bg_record->start[dim]]; geo[dim] = alpha_num[ bg_record->geo[dim]]; } start_geo[dim] = '\0'; geo[dim] = '\0'; info("not adding %s(%s) %s %s %s %u ", bg_record->bg_block_id, bg_record->mp_str, bg_block_state_string( bg_record->state), start_geo, geo, bg_record->cnode_cnt); } /* just so we don't look at it later */ bg_record->free_cnt = -1; } } list_iterator_destroy(itr); FREE_NULL_BITMAP(my_bitmap); } else { reset_ba_system(false); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("No list was given"); } if (request->avail_mp_bitmap) ba_set_removable_mps(request->avail_mp_bitmap, 1); if (request->size==1 && cnodes < bg_conf->mp_cnode_cnt) { switch(cnodes) { #ifdef HAVE_BGL case 32: blockreq.small32 = 4; blockreq.small128 = 3; break; case 128: blockreq.small128 = 4; break; #else case 16: blockreq.small16 = 2; blockreq.small32 = 1; blockreq.small64 = 1; blockreq.small128 = 1; blockreq.small256 = 1; break; case 32: blockreq.small32 = 2; blockreq.small64 = 1; blockreq.small128 = 1; blockreq.small256 = 1; break; case 64: blockreq.small64 = 2; blockreq.small128 = 1; blockreq.small256 = 1; break; case 128: blockreq.small128 = 2; blockreq.small256 = 1; break; case 256: blockreq.small256 = 2; break; #endif default: error("This size %d is unknown on this system", cnodes); goto finished; break; } /* Sort the list so the small blocks are in the order * of ionodes. */ list_sort(block_list, (ListCmpF)bg_record_cmpf_inc); request->conn_type[0] = SELECT_SMALL; new_blocks = list_create(destroy_bg_record); /* check only blocks that are free and small */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, true, true) == SLURM_SUCCESS) goto finished; /* check only blocks that are free and any size */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, true, false) == SLURM_SUCCESS) goto finished; /* check usable blocks that are small with any state */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, false, true) == SLURM_SUCCESS) goto finished; /* check all usable blocks */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, false, false) == SLURM_SUCCESS) goto finished; /* Re-sort the list back to the original order. */ list_sort(block_list, (ListCmpF)bg_record_sort_aval_inc); list_destroy(new_blocks); new_blocks = NULL; if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("small block not able to be placed inside others"); } if (request->conn_type[0] == SELECT_NAV) request->conn_type[0] = SELECT_TORUS; //debug("going to create %d", request->size); if (!new_ba_request(request)) { if (request->geometry[0] != (uint16_t)NO_VAL) { char *geo = give_geo(request->geometry); error("Problems with request for size %d geo %s", request->size, geo); xfree(geo); } else { error("Problems with request for size %d. " "No geo given.", request->size); } rc = ESLURM_INTERCONNECT_FAILURE; goto finished; } /* try on free midplanes */ rc = SLURM_SUCCESS; if (results) list_flush(results); else { #ifdef HAVE_BGQ results = list_create(destroy_ba_mp); #else results = list_create(NULL); #endif } rc = allocate_block(request, results); /* This could be changed in allocate_block so set it back up */ memcpy(request->geometry, start_geo, sizeof(start_geo)); if (rc) { rc = SLURM_SUCCESS; goto setup_records; } if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("allocate failure for size %d base " "partitions of free midplanes", request->size); rc = SLURM_ERROR; if (!list_count(my_block_list) || !my_block_list) goto finished; /*Try to put block starting in the smallest of the exisiting blocks*/ itr = list_iterator_create(my_block_list); itr2 = list_iterator_create(my_block_list); while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { bool is_small = 0; /* never check a block with a job running */ if (bg_record->free_cnt || bg_record->job_running != NO_JOB_RUNNING) continue; /* Here we are only looking for the first block on the midplane. So either the count is greater or equal than bg_conf->mp_cnode_cnt or the first bit is set in the ionode_bitmap. */ if (bg_record->cnode_cnt < bg_conf->mp_cnode_cnt) { bool found = 0; if (bit_ffs(bg_record->ionode_bitmap) != 0) continue; /* Check to see if we have other blocks in this midplane that have jobs running. */ while ((found_record = list_next(itr2))) { if (!found_record->free_cnt && (found_record->job_running != NO_JOB_RUNNING) && bit_overlap(bg_record->bitmap, found_record->bitmap)) { found = 1; break; } } list_iterator_reset(itr2); if (found) continue; is_small = 1; } if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("removing %s(%s) for request %d", bg_record->bg_block_id, bg_record->mp_str, request->size); remove_block(bg_record->ba_mp_list, is_small); rc = SLURM_SUCCESS; if (results) list_flush(results); else { #ifdef HAVE_BGQ results = list_create(destroy_ba_mp); #else results = list_create(NULL); #endif } rc = allocate_block(request, results); /* This could be changed in allocate_block so set it back up */ memcpy(request->geometry, start_geo, sizeof(start_geo)); if (rc) { rc = SLURM_SUCCESS; break; } if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("allocate failure for size %d base partitions", request->size); rc = SLURM_ERROR; } list_iterator_destroy(itr); list_iterator_destroy(itr2); setup_records: if (rc == SLURM_SUCCESS) { /*set up bg_record(s) here */ new_blocks = list_create(destroy_bg_record); blockreq.save_name = request->save_name; #ifdef HAVE_BGL blockreq.blrtsimage = request->blrtsimage; #endif blockreq.linuximage = request->linuximage; blockreq.mloaderimage = request->mloaderimage; blockreq.ramdiskimage = request->ramdiskimage; memcpy(blockreq.conn_type, request->conn_type, sizeof(blockreq.conn_type)); add_bg_record(new_blocks, &results, &blockreq, 0, 0); } finished: if (request->avail_mp_bitmap && (bit_ffc(request->avail_mp_bitmap) == -1)) ba_reset_all_removed_mps(); slurm_mutex_unlock(&block_state_mutex); /* reset the ones we mucked with */ itr = list_iterator_create(my_block_list); while ((bg_record = (bg_record_t *) list_next(itr))) { if (bg_record->free_cnt == -1) bg_record->free_cnt = 0; } list_iterator_destroy(itr); xfree(request->save_name); if (results) list_destroy(results); errno = rc; return new_blocks; }
/* * output_flush() takes an offset and a size of part of the output file, known * in the comments as the new area, and causes any fully flushed pages to be * written to the output file the new area in combination with previous areas * creates. The data structure output_blocks has ordered blocks of areas that * have been flushed which are maintained by this routine. Any area can only * be flushed once and an error will result is the new area overlaps with a * previously flushed area. * * The goal of this is to again minimize the number of dirty pages the link * editor has and hopfully improve performance in a memory starved system and * to prevent these pages to be written to the swap area when they could just be * written to the output file (if only external pagers worked well ...). */ extern void output_flush( unsigned long offset, unsigned long size) { unsigned long write_offset, write_size; struct block **p, *block, *before, *after; kern_return_t r; if(flush == FALSE) return; /* if(offset == 588824 && size != 0) printf("in output_flush() offset = %lu size = %lu\n", offset, size); */ if(offset + size > output_size) fatal("internal error: output_flush(offset = %lu, size = %lu) out " "of range for output_size = %lu", offset, size, output_size); #ifdef DEBUG if(debug & (1 << 12)) print_block_list(); if(debug & (1 << 11)) print("output_flush(offset = %lu, size %lu)", offset, size); #endif /* DEBUG */ if(size == 0){ #ifdef DEBUG if(debug & (1 << 11)) print("\n"); #endif /* DEBUG */ return; } /* * Search through the ordered output blocks to find the block before the * new area and after the new area if any exist. */ before = NULL; after = NULL; p = &(output_blocks); while(*p){ block = *p; if(offset < block->offset){ after = block; break; } else{ before = block; } p = &(block->next); } /* * Check for overlap of the new area with the block before and after the * new area if there are such blocks. */ if(before != NULL){ if(before->offset + before->size > offset){ warning("internal error: output_flush(offset = %lu, size = %lu) " "overlaps with flushed block(offset = %lu, size = %lu)", offset, size, before->offset, before->size); printf("calling abort()\n"); abort(); } } if(after != NULL){ if(offset + size > after->offset){ warning("internal error: output_flush(offset = %lu, size = %lu) " "overlaps with flushed block(offset = %lu, size = %lu)", offset, size, after->offset, after->size); printf("calling abort()\n"); abort(); } } /* * Now see how the new area fits in with the blocks before and after it * (that is does it touch both, one or the other or neither blocks). * For each case first the offset and size to write (write_offset and * write_size) are set for the area of full pages that can now be * written from the block. Then the area written in the block * (->written_offset and ->written_size) are set to reflect the total * area in the block now written. Then offset and size the block * refers to (->offset and ->size) are set to total area of the block. * Finally the links to others blocks in the list are adjusted if a * block is added or removed. * * See if there is a block before the new area and the new area * starts at the end of that block. */ if(before != NULL && before->offset + before->size == offset){ /* * See if there is also a block after the new area and the new area * ends at the start of that block. */ if(after != NULL && offset + size == after->offset){ /* * This is the case where the new area exactly fill the area * between two existing blocks. The total area is folded into * the block before the new area and the block after the new * area is removed from the list. */ if(before->offset == 0 && before->written_size == 0){ write_offset = 0; before->written_offset = 0; } else write_offset =before->written_offset + before->written_size; if(after->written_size == 0) write_size = trunc(after->offset + after->size - write_offset, host_pagesize); else write_size = trunc(after->written_offset - write_offset, host_pagesize); if(write_size != 0){ before->written_size += write_size; } if(after->written_size != 0) before->written_size += after->written_size; before->size += size + after->size; /* remove the block after the new area */ before->next = after->next; remove_block(after); } else{ /* * This is the case where the new area starts at the end of the * block just before it but does not end where the block after * it (if any) starts. The new area is folded into the block * before the new area. */ write_offset = before->written_offset + before->written_size; write_size = trunc(offset + size - write_offset, host_pagesize); if(write_size != 0) before->written_size += write_size; before->size += size; } } /* * See if the new area and the new area ends at the start of the block * after it (if any). */ else if(after != NULL && offset + size == after->offset){ /* * This is the case where the new area ends at the begining of the * block just after it but does not start where the block before it. * (if any) ends. The new area is folded into this block after the * new area. */ write_offset = round(offset, host_pagesize); if(after->written_size == 0) write_size = trunc(after->offset + after->size - write_offset, host_pagesize); else write_size = trunc(after->written_offset - write_offset, host_pagesize); if(write_size != 0){ after->written_offset = write_offset; after->written_size += write_size; } else if(write_offset != after->written_offset){ after->written_offset = write_offset; } after->offset = offset; after->size += size; } else{ /* * This is the case where the new area neither starts at the end of * the block just before it (if any) or ends where the block after * it (if any) starts. A new block is created and the new area is * is placed in it. */ write_offset = round(offset, host_pagesize); write_size = trunc(offset + size - write_offset, host_pagesize); block = get_block(); block->offset = offset; block->size = size; block->written_offset = write_offset; block->written_size = write_size; /* * Insert this block in the ordered list in the correct place. */ if(before != NULL){ block->next = before->next; before->next = block; } else{ block->next = output_blocks; output_blocks = block; } } /* * Now if there are full pages to write write them to the output file. */ if(write_size != 0){ #ifdef DEBUG if((debug & (1 << 11)) || (debug & (1 << 10))) print(" writing (write_offset = %lu write_size = %lu)\n", write_offset, write_size); #endif /* DEBUG */ lseek(fd, write_offset, L_SET); if(write(fd, output_addr + write_offset, write_size) != (int)write_size) system_fatal("can't write to output file"); if((r = vm_deallocate(mach_task_self(), (vm_address_t)(output_addr + write_offset), write_size)) != KERN_SUCCESS) mach_fatal(r, "can't vm_deallocate() buffer for output file"); } #ifdef DEBUG else{ if(debug & (1 << 11)) print(" no write\n"); } #endif /* DEBUG */ }