void lci_options(int *cnt, char *vec[]) { int i; if (*cnt < 2) { print_usage_on(stderr); exit(EXIT_FAILURE); } for (i = 1; i != *cnt; ++i) { if (parse_bool_flag(vec[i], "-b", -1) || parse_bool_flag(vec[i], "--no-banner", 6)) { log_puts(LCI_SEV_DEBUG, "no banner\n"); show_banner = 0; remove_index(&i, cnt, &vec[0]); continue; } if (parse_bool_flag(vec[i], "-c", -1) || parse_bool_flag(vec[i], "--no-compiler", 6)) { log_puts(LCI_SEV_DEBUG, "no compiler\n"); run_compiler = 0; remove_index(&i, cnt, &vec[0]); continue; } if (parse_bool_flag(vec[i], "-f", -1) || parse_bool_flag(vec[i], "--force-lint", 3)) { log_puts(LCI_SEV_DEBUG, "force lint\n"); force_lint = 1; remove_index(&i, cnt, &vec[0]); continue; } if (parse_bool_flag(vec[i], "-l", -1) || parse_bool_flag(vec[i], "--no-lint", 6)) { log_puts(LCI_SEV_DEBUG, "no lint\n"); run_lint = 0; remove_index(&i, cnt, &vec[0]); continue; } if (parse_bool_flag(vec[i], "-v", -1) || parse_bool_flag(vec[i], "--verbose", 6)) { log_puts(LCI_SEV_DEBUG, "verbose\n"); inc_severity_ceiling(); remove_index(&i, cnt, &vec[0]); continue; } if (parse_bool_flag(vec[i], "--help", 3)) { log_puts(LCI_SEV_DEBUG, "help\n"); print_usage_on(stdout); exit(EXIT_SUCCESS); } if (parse_bool_flag(vec[i], "--version", 6)) { log_puts(LCI_SEV_DEBUG, "version\n"); print_version_on(stdout); exit(EXIT_SUCCESS); } break; } }
void main(){ node_t * head = NULL; head = malloc(sizeof(node_t)); remove_index(5,&head); head->val =0; head->next = NULL; remove_index(5,&head); int i=1; for(i;i<10;i++){ push(head,i); print_list(head); } remove_index(5,&head); remove_byValue(&head,2); print_list(head); }
// Returns the number of bytes remaining. This means a 0 if everything // goes OK, and a positive number if some of the bytes couldn't be // written. Returns -1 if the connection was reset and reconnection failed. int send_with_reconnect(int index, int size) { int result = 0; int bytes_remaining = size; int done = 0; int error = 0; int sockfd = rcvdb[index].sockfd; if (bytes_remaining <= 0) { bytes_remaining = 1; } while (!done && bytes_remaining > 0) { int retry = 0; int write_size = bytes_remaining; if (write_size > MAX_PAYLOAD_SIZE) { write_size = MAX_PAYLOAD_SIZE; } error = send(sockfd, random_buffer, write_size, MSG_DONTWAIT); logWrite(PEER_WRITE, NULL, "Wrote %d bytes", error); // Handle failed connection while (error == -1 && errno == ECONNRESET && retry < 3) { reconnect_receiver(index); sockfd= rcvdb[index].sockfd; error = send(sockfd, random_buffer, size, MSG_DONTWAIT); logWrite(PEER_WRITE, NULL, "Wrote %d reconnected bytes", error); retry++; } //if still disconnected, reset if (error == -1 && errno == ECONNRESET) { remove_index(index, &write_fds); printf("Error: send_receiver() - failed send to %s three times. \n", ipToString(rcvdb[index].ip)); result = -1; } else if (error == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { result = bytes_remaining; done = 1; } else if (error == -1) { perror("send_receiver: send"); clean_exit(1); } else { total_size += error; bytes_remaining -= error; if (error < write_size) { done = 1; } result = bytes_remaining; // printf("Total: %d, Pending: %d\n", total_size, rcvdb[index].pending); } } return result; }
bool PtrArray::remove(void *p) { unsigned int i; for (i=0; i<len; i++) { if (pdata[i]==p) { remove_index(i); return true; } } return false; }
void remove_mail(int id) { if(remove_index(id)) { p("Mail "); put_int(id); p(" removed, you have "); put_int(mail_set.size()); p(" mails\n"); } else { p("Mail does not exist, you have "); put_int(mail_set.size()); p(" mails"); puts(""); } }
int allocate_idx_list(netsnmp_session * session, netsnmp_pdu *pdu) { netsnmp_session *sp; netsnmp_variable_list *vp, *vp2, *next, *res; int flags = 0; sp = find_agentx_session(session, pdu->sessid); if (sp == NULL) return AGENTX_ERR_NOT_OPEN; if (pdu->flags & AGENTX_MSG_FLAG_ANY_INSTANCE) flags |= ALLOCATE_ANY_INDEX; if (pdu->flags & AGENTX_MSG_FLAG_NEW_INSTANCE) flags |= ALLOCATE_NEW_INDEX; /* * XXX - what about errors? * * If any allocations fail, then we need to * *fully* release the earlier ones. * (i.e. remove them completely from the index registry, * not simply mark them as available for re-use) * * For now - assume they all succeed. */ for (vp = pdu->variables; vp != NULL; vp = next) { next = vp->next_variable; res = register_index(vp, flags, session); if (res == NULL) { /* * If any allocations fail, we need to *fully* release * all previous ones (i.e. remove them completely * from the index registry) */ for (vp2 = pdu->variables; vp2 != vp; vp2 = vp2->next_variable) { remove_index(vp2, session); } return AGENTX_ERR_INDEX_NONE_AVAILABLE; /* XXX */ } else { (void) snmp_clone_var(res, vp); free(res); } vp->next_variable = next; } return AGENTX_ERR_NOERROR; }
/** remove_data * * Removes ALL nodes whose data is EQUAL to the data you passed in or rather when the comparison function returns true (!0) * @warning Note the data the node is pointing to is also freed. If you have any pointers to this node's data it will be freed! * * @param llist a pointer to the list * @param data data to compare to. * @param compare_func a pointer to a function that when it returns true it will remove the element from the list and do nothing otherwise @see equal_op. * @param free_func a pointer to a function that is responsible for freeing the node's data * @return the number of nodes that were removed. */ int remove_data(list* llist, const void* data, equal_op compare_func, list_op free_func) { /// @todo Implement changing the return value! /// @note remember to also free all nodes you remove. /// @note free_func is a function that is responsible for freeing the node's data only. int count=0; int i; node* currentNode; node* temp; currentNode=llist->head; for(i=0;i<llist->size;i++){ temp=currentNode->next; if(compare_func(data,currentNode->data)){ count++; remove_index(llist,i,free_func); } currentNode=temp; } return count; }
/** remove_if * * Removes all nodes whose data when passed into the predicate function returns true * * @param llist a pointer to the list * @param pred_func a pointer to a function that when it returns true it will remove the element from the list and do nothing otherwise @see list_pred. * @param free_func a pointer to a function that is responsible for freeing the node's data * @return the number of nodes that were removed. */ int remove_if(list* llist, list_pred pred_func, list_op free_func) { /// @todo Implement changing the return value! /// @note remember to also free all nodes you remove. /// @note be sure to call pred_func on the NODES DATA to check if the node needs to be removed. /// @note free_func is a function that is responsible for freeing the node's data only. int count=0; int i; node* temp; node* currentNode; currentNode=llist->head; for(i=0;i<llist->size;i++){ temp=currentNode->next; if(pred_func(currentNode->data)){ count++; remove_index(llist,i,free_func); } currentNode=temp; } return count; }
/* * This is range deletion. So, instead of adjusting balance of the * space on sibling nodes for each change, this just removes the range * and merges from right to left even if it is not same parent. * * +--------------- (A, B, C)--------------------+ * | | | * +-- (AA, AB, AC) -+ +- (BA, BB, BC) -+ + (CA, CB, CC) + * | | | | | | | | | * (AAA,AAB)(ABA,ABB)(ACA,ACB) (BAA,BAB)(BBA)(BCA,BCB) (CAA)(CBA,CBB)(CCA) * * [less : A, AA, AAA, AAB, AB, ABA, ABB, AC, ACA, ACB, B, BA ... : greater] * * If we merged from cousin (or re-distributed), we may have to update * the index until common parent. (e.g. removed (ACB), then merged * from (BAA,BAB) to (ACA), we have to adjust B in root node to BB) * * See, adjust_parent_sep(). * * FIXME: no re-distribute. so, we don't guarantee above than 50% * space efficiency. And if range is end of key (truncate() case), we * don't need to merge, and adjust_parent_sep(). * * FIXME2: we may want to split chop work for each step. instead of * blocking for a long time. */ int btree_chop(struct btree *btree, tuxkey_t start, u64 len) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = btree->sb; struct btree_ops *ops = btree->ops; struct buffer_head **prev, *leafprev = NULL; struct chopped_index_info *cii; struct cursor *cursor; tuxkey_t limit; int ret, done = 0; if (!has_root(btree)) return 0; /* Chop all range if len >= TUXKEY_LIMIT */ limit = (len >= TUXKEY_LIMIT) ? TUXKEY_LIMIT : start + len; prev = malloc(sizeof(*prev) * btree->root.depth); if (prev == NULL) return -ENOMEM; memset(prev, 0, sizeof(*prev) * btree->root.depth); cii = malloc(sizeof(*cii) * btree->root.depth); if (cii == NULL) { ret = -ENOMEM; goto error_cii; } memset(cii, 0, sizeof(*cii) * btree->root.depth); cursor = alloc_cursor(btree, 0); if (!cursor) { ret = -ENOMEM; goto error_alloc_cursor; } down_write(&btree->lock); ret = btree_probe(cursor, start); if (ret) goto error_btree_probe; /* Walk leaves */ while (1) { struct buffer_head *leafbuf; tuxkey_t this_key; /* * FIXME: If leaf was merged and freed later, we don't * need to redirect leaf and leaf_chop() */ if ((ret = cursor_redirect(cursor))) goto out; leafbuf = cursor_pop(cursor); /* Adjust start and len for this leaf */ this_key = cursor_level_this_key(cursor); if (start < this_key) { if (limit < TUXKEY_LIMIT) len -= this_key - start; start = this_key; } ret = ops->leaf_chop(btree, start, len, bufdata(leafbuf)); if (ret) { if (ret < 0) { blockput(leafbuf); goto out; } mark_buffer_dirty_non(leafbuf); } /* Try to merge this leaf with prev */ if (leafprev) { if (try_leaf_merge(btree, leafprev, leafbuf)) { trace(">>> can merge leaf %p into leaf %p", leafbuf, leafprev); remove_index(cursor, cii); mark_buffer_dirty_non(leafprev); blockput_free(sb, leafbuf); goto keep_prev_leaf; } blockput(leafprev); } leafprev = leafbuf; keep_prev_leaf: if (cursor_level_next_key(cursor) >= limit) done = 1; /* Pop and try to merge finished nodes */ while (done || cursor_level_finished(cursor)) { struct buffer_head *buf; int level = cursor->level; struct chopped_index_info *ciil = &cii[level]; /* Get merge src buffer, and go parent level */ buf = cursor_pop(cursor); /* * Logging chopped indexes * FIXME: If node is freed later (e.g. merged), * we dont't need to log this */ if (ciil->count) { log_bnode_del(sb, bufindex(buf), ciil->start, ciil->count); } memset(ciil, 0, sizeof(*ciil)); /* Try to merge node with prev */ if (prev[level]) { assert(level); if (try_bnode_merge(sb, prev[level], buf)) { trace(">>> can merge node %p into node %p", buf, prev[level]); remove_index(cursor, cii); mark_buffer_unify_non(prev[level]); blockput_free_unify(sb, buf); goto keep_prev_node; } blockput(prev[level]); } prev[level] = buf; keep_prev_node: if (!level) goto chop_root; } /* Push back down to leaf level */ do { ret = cursor_advance_down(cursor); if (ret < 0) goto out; } while (ret); } chop_root: /* Remove depth if possible */ while (btree->root.depth > 1 && bcount(bufdata(prev[0])) == 1) { trace("drop btree level"); btree->root.block = bufindex(prev[1]); btree->root.depth--; tux3_mark_btree_dirty(btree); /* * We know prev[0] is redirected and dirty. So, in * here, we can just cancel bnode_redirect by bfree(), * instead of defered_bfree() * FIXME: we can optimize freeing bnode without * bnode_redirect, and if we did, this is not true. */ bfree(sb, bufindex(prev[0]), 1); log_bnode_free(sb, bufindex(prev[0])); blockput_free_unify(sb, prev[0]); vecmove(prev, prev + 1, btree->root.depth); } ret = 0; out: if (leafprev) blockput(leafprev); for (int i = 0; i < btree->root.depth; i++) { if (prev[i]) blockput(prev[i]); } release_cursor(cursor); error_btree_probe: up_write(&btree->lock); free_cursor(cursor); error_alloc_cursor: free(cii); error_cii: free(prev); return ret; }
void test_contains_and_find_one() { constexpr size_t NUM_ROWS = 1 << 10; // Create a table and insert the first row. auto db = grnxx::open_db(""); auto table = db->create_table("Table"); auto column = table->create_column("Column", T::type()); grnxx::Array<T> values; values.resize(NUM_ROWS); for (size_t i = 0; i < NUM_ROWS; ++i) { generate_random_value(&values[i]); grnxx::Int row_id = table->insert_row(); column->set(row_id, values[i]); } // Test all the values. for (size_t i = 0; i < NUM_ROWS; ++i) { assert(column->contains(values[i])); grnxx::Int row_id = column->find_one(values[i]); assert(!row_id.is_na()); assert(values[i].match(values[row_id.raw()])); } // Test all the values with index if available. try { column->create_index("Index", GRNXX_TREE_INDEX); for (size_t i = 0; i < NUM_ROWS; ++i) { assert(column->contains(values[i])); grnxx::Int row_id = column->find_one(values[i]); assert(!row_id.is_na()); assert(values[i].match(values[row_id.raw()])); } column->remove_index("Index"); } catch (...) { } // Remove N/A values. for (size_t i = 0; i < NUM_ROWS; ++i) { if (values[i].is_na()) { table->remove_row(grnxx::Int(i)); } } // Test all the values. for (size_t i = 0; i < NUM_ROWS; ++i) { if (!values[i].is_na()) { assert(column->contains(values[i])); grnxx::Int row_id = column->find_one(values[i]); assert(!row_id.is_na()); assert(values[i].match(values[row_id.raw()])); } } assert(!column->contains(T::na())); assert(column->find_one(T::na()).is_na()); // Test all the values with index if available. try { column->create_index("Index", GRNXX_TREE_INDEX); for (size_t i = 0; i < NUM_ROWS; ++i) { if (!values[i].is_na()) { assert(column->contains(values[i])); grnxx::Int row_id = column->find_one(values[i]); assert(!row_id.is_na()); assert(values[i].match(values[row_id.raw()])); } } assert(!column->contains(T::na())); assert(column->find_one(T::na()).is_na()); column->remove_index("Index"); } catch (...) { } // Insert a trailing N/A value. table->insert_row_at(grnxx::Int(NUM_ROWS)); assert(column->contains(T::na())); assert(column->find_one(T::na()).match(grnxx::Int(NUM_ROWS))); try { column->create_index("Index", GRNXX_TREE_INDEX); assert(column->contains(T::na())); assert(column->find_one(T::na()).match(grnxx::Int(NUM_ROWS))); column->remove_index("Index"); } catch (...) { } // Remove non-N/A values. for (size_t i = 0; i < NUM_ROWS; ++i) { if (!values[i].is_na()) { table->remove_row(grnxx::Int(i)); } } // Test all the values. for (size_t i = 0; i < NUM_ROWS; ++i) { if (!values[i].is_na()) { assert(!column->contains(values[i])); assert(column->find_one(values[i]).is_na()); } } assert(column->contains(T::na())); assert(column->find_one(T::na()).match(grnxx::Int(NUM_ROWS))); // Test all the values with index if available. try { column->create_index("Index", GRNXX_TREE_INDEX); for (size_t i = 0; i < NUM_ROWS; ++i) { if (!values[i].is_na()) { assert(!column->contains(values[i])); assert(column->find_one(values[i]).is_na()); } } assert(column->contains(T::na())); assert(column->find_one(T::na()).match(grnxx::Int(NUM_ROWS))); column->remove_index("Index"); } catch (...) { } }
static Array<Tuple<time_kind_t,event_t>> dependencies(const int direction, const time_kind_t kind, const event_t event) { GEODE_ASSERT(abs(direction)==1); static_assert(compress_kind==0,"Verify that -kind != kind for kinds we care about"); // Parse event const section_t section = parse_section(event); const auto block = parse_block(event); const uint8_t dimensions = parse_dimensions(event), parent_to_child_symmetry = dimensions>>2, dimension = dimensions&3; const auto ekind = event&ekind_mask; // See mpi/graph for summarized explanation Array<Tuple<time_kind_t,event_t>> deps; switch (direction*kind) { case -allocate_line_kind: { GEODE_ASSERT(ekind==line_ekind); break; } case response_recv_kind: case -request_send_kind: { GEODE_ASSERT(ekind==block_lines_ekind); const auto other_kind = kind==response_recv_kind ? schedule_kind : allocate_line_kind; const auto parent_section = section.parent(dimension).transform(symmetry_t::invert_global(parent_to_child_symmetry)); const auto permutation = section_t::quadrant_permutation(parent_to_child_symmetry); const uint8_t parent_dimension = permutation.find(dimension); const auto block_base = Vector<uint8_t,4>(block.subset(permutation)).remove_index(parent_dimension); deps.append(tuple(other_kind,line_event(parent_section,parent_dimension,block_base))); break; } case request_send_kind: { GEODE_ASSERT(ekind==block_lines_ekind); deps.append(tuple(response_send_kind,event)); break; } case -response_send_kind: case response_send_kind: { GEODE_ASSERT(ekind==block_lines_ekind); deps.append(tuple(direction<0?request_send_kind:response_recv_kind,event)); break; } case -response_recv_kind: { GEODE_ASSERT(ekind==block_lines_ekind); deps.append(tuple(response_send_kind,event)); break; } case allocate_line_kind: case -schedule_kind: { GEODE_ASSERT(ekind==line_ekind); if (section.sum()!=35) { const auto other_kind = kind==allocate_line_kind ? request_send_kind : response_recv_kind; const auto child_section = section.child(dimension).standardize<8>(); const auto permutation = section_t::quadrant_permutation(symmetry_t::invert_global(child_section.y)); const uint8_t child_dimension = permutation.find(dimension); const dimensions_t dimensions(child_section.y,child_dimension); auto child_block = Vector<uint8_t,4>(block.slice<0,3>().insert(0,dimension).subset(permutation)); for (const uint8_t b : range(section_blocks(child_section.x)[child_dimension])) { child_block[child_dimension] = b; deps.append(tuple(other_kind,block_lines_event(child_section.x,dimensions,child_block))); } } break; } case schedule_kind: { GEODE_ASSERT(ekind==line_ekind); deps.append(tuple(compute_kind,event)); // Corresponds to many different microline compute events break; } case -compute_kind: // Note: all microline compute events have the same line event case compute_kind: { GEODE_ASSERT(ekind==line_ekind); deps.append(tuple(direction<0?schedule_kind:wakeup_kind,event)); break; } case -wakeup_kind: { GEODE_ASSERT(ekind==line_ekind); deps.append(tuple(compute_kind,event)); // Corresponds to many different microline compute events break; } case wakeup_kind: { GEODE_ASSERT(ekind==line_ekind); const auto block_base = block.slice<0,3>(); for (const uint8_t b : range(section_blocks(section)[dimension])) deps.append(tuple(output_send_kind,block_line_event(section,dimension,block_base.insert(b,dimension)))); break; } case -output_send_kind: case output_send_kind: { GEODE_ASSERT(ekind==block_line_ekind); if (direction<0) deps.append(tuple(wakeup_kind,line_event(section,dimension,block.remove_index(dimension)))); else deps.append(tuple(output_recv_kind,event)); break; } case -output_recv_kind: case output_recv_kind: { GEODE_ASSERT(ekind==block_line_ekind); deps.append(tuple(direction<0?output_send_kind:snappy_kind,event)); break; } case -snappy_kind: case snappy_kind: { GEODE_ASSERT(ekind==block_line_ekind); if (direction<0) deps.append(tuple(output_recv_kind,event)); break; } default: break; } return deps; }
int tree_chop(struct btree *btree, struct delete_info *info, millisecond_t deadline) { int depth = btree->root.depth, level = depth - 1, suspend = 0; struct cursor *cursor; struct buffer_head *leafbuf, **prev, *leafprev = NULL; struct btree_ops *ops = btree->ops; struct sb *sb = btree->sb; int ret; cursor = alloc_cursor(btree, 0); prev = malloc(sizeof(*prev) * depth); memset(prev, 0, sizeof(*prev) * depth); down_write(&btree->lock); probe(btree, info->resume, cursor); leafbuf = level_pop(cursor); /* leaf walk */ while (1) { ret = (ops->leaf_chop)(btree, info->key, bufdata(leafbuf)); if (ret) { mark_buffer_dirty(leafbuf); if (ret < 0) goto error_leaf_chop; } /* try to merge this leaf with prev */ if (leafprev) { struct vleaf *this = bufdata(leafbuf); struct vleaf *that = bufdata(leafprev); /* try to merge leaf with prev */ if ((ops->leaf_need)(btree, this) <= (ops->leaf_free)(btree, that)) { trace(">>> can merge leaf %p into leaf %p", leafbuf, leafprev); (ops->leaf_merge)(btree, that, this); remove_index(cursor, level); mark_buffer_dirty(leafprev); brelse_free(btree, leafbuf); //dirty_buffer_count_check(sb); goto keep_prev_leaf; } brelse(leafprev); } leafprev = leafbuf; keep_prev_leaf: //nanosleep(&(struct timespec){ 0, 50 * 1000000 }, NULL); //printf("time remaining: %Lx\n", deadline - gettime()); // if (deadline && gettime() > deadline) // suspend = -1; if (info->blocks && info->freed >= info->blocks) suspend = -1; /* pop and try to merge finished nodes */ while (suspend || level_finished(cursor, level)) { /* try to merge node with prev */ if (prev[level]) { assert(level); /* node has no prev */ struct bnode *this = cursor_node(cursor, level); struct bnode *that = bufdata(prev[level]); trace_off("check node %p against %p", this, that); trace_off("this count = %i prev count = %i", bcount(this), bcount(that)); /* try to merge with node to left */ if (bcount(this) <= sb->entries_per_node - bcount(that)) { trace(">>> can merge node %p into node %p", this, that); merge_nodes(that, this); remove_index(cursor, level - 1); mark_buffer_dirty(prev[level]); brelse_free(btree, level_pop(cursor)); //dirty_buffer_count_check(sb); goto keep_prev_node; } brelse(prev[level]); } prev[level] = level_pop(cursor); keep_prev_node: /* deepest key in the cursor is the resume address */ if (suspend == -1 && !level_finished(cursor, level)) { suspend = 1; /* only set resume once */ info->resume = from_be_u64((cursor->path[level].next)->key); } if (!level) { /* remove depth if possible */ while (depth > 1 && bcount(bufdata(prev[0])) == 1) { trace("drop btree level"); btree->root.block = bufindex(prev[1]); mark_btree_dirty(btree); brelse_free(btree, prev[0]); //dirty_buffer_count_check(sb); depth = --btree->root.depth; vecmove(prev, prev + 1, depth); //set_sb_dirty(sb); } //sb->snapmask &= ~snapmask; delete_snapshot_from_disk(); //set_sb_dirty(sb); //save_sb(sb); ret = suspend; goto out; } level--; trace_off(printf("pop to level %i, block %Lx, %i of %i nodes\n", level, bufindex(cursor->path[level].buffer), cursor->path[level].next - cursor_node(cursor, level)->entries, bcount(cursor_node(cursor, level)));); } /* push back down to leaf level */ while (level < depth - 1) { struct buffer_head *buffer = sb_bread(vfs_sb(sb), from_be_u64(cursor->path[level++].next++->block)); if (!buffer) { ret = -EIO; goto out; } level_push(cursor, buffer, ((struct bnode *)bufdata(buffer))->entries); trace_off(printf("push to level %i, block %Lx, %i nodes\n", level, bufindex(buffer), bcount(cursor_node(cursor, level)));); }