my_bool test_compare(MY_BITMAP *map, uint bitsize) { MY_BITMAP map2; uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; uint i, test_bit; uint no_loops= bitsize > 128 ? 128 : bitsize; if (bitmap_init(&map2, map2buf, bitsize, FALSE)) { diag("init error for bitsize %d", bitsize); return TRUE; } /* Test all 4 possible combinations of set/unset bits. */ for (i=0; i < no_loops; i++) { test_bit=get_rand_bit(bitsize); bitmap_clear_bit(map, test_bit); bitmap_clear_bit(&map2, test_bit); if (!bitmap_is_subset(map, &map2)) goto error_is_subset; bitmap_set_bit(map, test_bit); if (bitmap_is_subset(map, &map2)) goto error_is_subset; bitmap_set_bit(&map2, test_bit); if (!bitmap_is_subset(map, &map2)) goto error_is_subset; bitmap_clear_bit(map, test_bit); if (!bitmap_is_subset(map, &map2)) goto error_is_subset; /* Note that test_bit is not cleared i map2. */ } bitmap_clear_all(map); bitmap_clear_all(&map2); /* Test all 4 possible combinations of set/unset bits. */ for (i=0; i < no_loops; i++) { test_bit=get_rand_bit(bitsize); if (bitmap_is_overlapping(map, &map2)) goto error_is_overlapping; bitmap_set_bit(map, test_bit); if (bitmap_is_overlapping(map, &map2)) goto error_is_overlapping; bitmap_set_bit(&map2, test_bit); if (!bitmap_is_overlapping(map, &map2)) goto error_is_overlapping; bitmap_clear_bit(map, test_bit); if (bitmap_is_overlapping(map, &map2)) goto error_is_overlapping; bitmap_clear_bit(&map2, test_bit); /* Note that test_bit is not cleared i map2. */ } return FALSE; error_is_subset: diag("is_subset error bitsize = %u", bitsize); return TRUE; error_is_overlapping: diag("is_overlapping error bitsize = %u", bitsize); return TRUE; }
my_bool test_prefix(MY_BITMAP *map, uint bitsize) { uint i, j, test_bit; uint no_loops= bitsize > 128 ? 128 : bitsize; for (i=0; i < no_loops; i++) { test_bit=get_rand_bit(bitsize); bitmap_set_prefix(map, test_bit); if (!bitmap_is_prefix(map, test_bit)) goto error1; bitmap_clear_all(map); for (j=0; j < test_bit; j++) bitmap_set_bit(map, j); if (!bitmap_is_prefix(map, test_bit)) goto error2; bitmap_set_all(map); for (j=bitsize - 1; ~(j-test_bit); j--) bitmap_clear_bit(map, j); if (!bitmap_is_prefix(map, test_bit)) goto error3; bitmap_clear_all(map); } for (i=0; i < bitsize; i++) { if (bitmap_is_prefix(map, i + 1)) goto error4; bitmap_set_bit(map, i); if (!bitmap_is_prefix(map, i + 1)) goto error5; test_bit=get_rand_bit(bitsize); bitmap_set_bit(map, test_bit); if (test_bit <= i && !bitmap_is_prefix(map, i + 1)) goto error5; else if (test_bit > i) { if (bitmap_is_prefix(map, i + 1)) goto error4; bitmap_clear_bit(map, test_bit); } } return FALSE; error1: diag("prefix1 error bitsize = %u, prefix_size = %u", bitsize,test_bit); return TRUE; error2: diag("prefix2 error bitsize = %u, prefix_size = %u", bitsize,test_bit); return TRUE; error3: diag("prefix3 error bitsize = %u, prefix_size = %u", bitsize,test_bit); return TRUE; error4: diag("prefix4 error bitsize = %u, i = %u", bitsize,i); return TRUE; error5: diag("prefix5 error bitsize = %u, i = %u", bitsize,i); return TRUE; }
static inline void remove_from_partition_kill_list (temp_expr_table_p tab, int p, int version) { #ifdef ENABLE_CHECKING gcc_assert (tab->kill_list[p]); #endif bitmap_clear_bit (tab->kill_list[p], version); if (bitmap_empty_p (tab->kill_list[p])) { bitmap_clear_bit (tab->partition_in_use, p); BITMAP_FREE (tab->kill_list[p]); } }
my_bool test_intersect(MY_BITMAP *map, uint bitsize) { uint bitsize2 = 1 + get_rand_bit(MAX_TESTED_BITMAP_SIZE - 1); MY_BITMAP map2; uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; uint i, test_bit1, test_bit2, test_bit3; if (bitmap_init(&map2, map2buf, bitsize2, FALSE)) { diag("init error for bitsize %d", bitsize2); return TRUE; } test_bit1= get_rand_bit(bitsize); test_bit2= get_rand_bit(bitsize); bitmap_set_bit(map, test_bit1); bitmap_set_bit(map, test_bit2); test_bit3= get_rand_bit(bitsize2); bitmap_set_bit(&map2, test_bit3); if (test_bit2 < bitsize2) bitmap_set_bit(&map2, test_bit2); bitmap_intersect(map, &map2); if (test_bit2 < bitsize2) { if (!bitmap_is_set(map, test_bit2)) goto error; bitmap_clear_bit(map, test_bit2); } if (test_bit1 == test_bit3) { if (!bitmap_is_set(map, test_bit1)) goto error; bitmap_clear_bit(map, test_bit1); } if (!bitmap_is_clear_all(map)) goto error; bitmap_set_all(map); bitmap_set_all(&map2); for (i=0; i < bitsize2; i++) bitmap_clear_bit(&map2, i); bitmap_intersect(map, &map2); if (!bitmap_is_clear_all(map)) goto error; return FALSE; error: diag("intersect error bitsize = %u, bit1 = %u, bit2 = %u, bit3 = %u", bitsize, test_bit1, test_bit2, test_bit3); return TRUE; }
/* Mark register REGNO in MODE as dead at program point POINT. Update BB_GEN_PSEUDOS and BB_KILLED_PSEUDOS. Return TRUE if the liveness tracking sets were modified, or FALSE if nothing changed. */ static bool mark_regno_dead (int regno, machine_mode mode, int point) { int last; bool changed = false; if (regno < FIRST_PSEUDO_REGISTER) { for (last = regno + hard_regno_nregs[regno][mode]; regno < last; regno++) make_hard_regno_dead (regno); } else { if (sparseset_bit_p (pseudos_live, regno)) { mark_pseudo_dead (regno, point); changed = true; } bitmap_clear_bit (bb_gen_pseudos, regno); bitmap_set_bit (bb_killed_pseudos, regno); } return changed; }
DISPATCH_ALLOC_NOINLINE static void _dispatch_alloc_continuation_free(dispatch_continuation_t c) { bitmap_t *b, *s; unsigned int b_idx, idx; get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx); bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY); if (slowpath(s)) { (void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY); } // We only try to madvise(2) pages outside of the first page. // (Allocations in the first page do not have a supermap entry.) if (slowpath(bitmap_now_empty) && slowpath(s)) { return _dispatch_alloc_maybe_madvise_page(c); } }
static void mark_n_free(struct page_allocator *allocator, unsigned int start_page, unsigned int page_q) { unsigned int page_i; assert(allocator); assert(start_page + page_q <= allocator->pages_n); for (page_i = start_page; page_i < start_page + page_q; page_i++) { bitmap_clear_bit(allocator->bitmap, page_i); } allocator->free += page_q * allocator->page_size; }
static bool check_argument_store (rtx mem, HOST_WIDE_INT off, HOST_WIDE_INT min_sp_off, HOST_WIDE_INT max_sp_off, bitmap sp_bytes) { HOST_WIDE_INT byte; for (byte = off; byte < off + GET_MODE_SIZE (GET_MODE (mem)); byte++) { if (byte < min_sp_off || byte >= max_sp_off || !bitmap_clear_bit (sp_bytes, byte - min_sp_off)) return false; } return true; }
int set_task_state(struct task_struct *tsk, int state) { if ((state == TASK_RUNNING) && (tsk->state != TASK_RUNNING)) { list_add_tail(&tsk->rq, &pri_runq[tsk->prio]); bitmap_set_bit(&pri_bitmap, tsk->prio); } else if ((state != TASK_RUNNING) && (tsk->state == TASK_RUNNING)) { list_del_init(&tsk->rq); if (list_empty(&pri_runq[tsk->prio])) bitmap_clear_bit(&pri_bitmap, tsk->prio); } tsk->state = state; return 0; }
my_bool test_get_first_bit(MY_BITMAP *map, uint bitsize) { uint i, test_bit= 0; uint no_loops= bitsize > 128 ? 128 : bitsize; bitmap_set_all(map); for (i=0; i < bitsize; i++) bitmap_clear_bit(map, i); if (bitmap_get_first_set(map) != MY_BIT_NONE) goto error1; bitmap_clear_all(map); for (i=0; i < bitsize; i++) bitmap_set_bit(map, i); if (bitmap_get_first(map) != MY_BIT_NONE) goto error2; bitmap_clear_all(map); for (i=0; i < no_loops; i++) { test_bit=get_rand_bit(bitsize); bitmap_set_bit(map, test_bit); if (bitmap_get_first_set(map) != test_bit) goto error1; bitmap_set_all(map); bitmap_clear_bit(map, test_bit); if (bitmap_get_first(map) != test_bit) goto error2; bitmap_clear_all(map); } return FALSE; error1: diag("get_first_set error bitsize=%u,prefix_size=%u",bitsize,test_bit); return TRUE; error2: diag("get_first error bitsize= %u, prefix_size= %u",bitsize,test_bit); return TRUE; }
static bool check_argument_store (HOST_WIDE_INT size, HOST_WIDE_INT off, HOST_WIDE_INT min_sp_off, HOST_WIDE_INT max_sp_off, bitmap sp_bytes) { HOST_WIDE_INT byte; for (byte = off; byte < off + size; byte++) { if (byte < min_sp_off || byte >= max_sp_off || !bitmap_clear_bit (sp_bytes, byte - min_sp_off)) return false; } return true; }
static basic_block cfg_blocks_get (void) { basic_block bb; bb = cfg_blocks[cfg_blocks_head]; gcc_assert (!cfg_blocks_empty_p ()); gcc_assert (bb); cfg_blocks_head = ((cfg_blocks_head + 1) % cfg_blocks.length ()); --cfg_blocks_num; bitmap_clear_bit (bb_in_list, bb->index); return bb; }
static void eratosthenes_once(struct eratosthenes *s, unsigned long limit, unsigned long p) { unsigned long n = VAL_TO_BIT(3*p); unsigned long obits = LIMIT_TO_NBITS(s->limit); if (obits > n) { n = obits + p - 1 - ((obits - n - 1) % p); } assert((BIT_TO_VAL(n) % p) == 0); assert((BIT_TO_VAL(n) / p) > 1); while (n < LIMIT_TO_NBITS(limit)) { bitmap_clear_bit(s->b, n); n += p; } }
my_bool test_get_all_bits(MY_BITMAP *map, uint bitsize) { uint i; bitmap_set_all(map); if (!bitmap_is_set_all(map)) goto error1; if (!bitmap_is_prefix(map, bitsize)) goto error5; bitmap_clear_all(map); if (!bitmap_is_clear_all(map)) goto error2; if (!bitmap_is_prefix(map, 0)) goto error6; for (i=0; i<bitsize;i++) bitmap_set_bit(map, i); if (!bitmap_is_set_all(map)) goto error3; for (i=0; i<bitsize;i++) bitmap_clear_bit(map, i); if (!bitmap_is_clear_all(map)) goto error4; return FALSE; error1: diag("Error in set_all, bitsize = %u", bitsize); return TRUE; error2: diag("Error in clear_all, bitsize = %u", bitsize); return TRUE; error3: diag("Error in bitmap_is_set_all, bitsize = %u", bitsize); return TRUE; error4: diag("Error in bitmap_is_clear_all, bitsize = %u", bitsize); return TRUE; error5: diag("Error in set_all through set_prefix, bitsize = %u", bitsize); return TRUE; error6: diag("Error in clear_all through set_prefix, bitsize = %u", bitsize); return TRUE; }
my_bool test_set_get_clear_bit(MY_BITMAP *map, uint bitsize) { uint i, test_bit; uint no_loops= bitsize > 128 ? 128 : bitsize; for (i=0; i < no_loops; i++) { test_bit= get_rand_bit(bitsize); bitmap_set_bit(map, test_bit); if (!bitmap_is_set(map, test_bit)) goto error1; bitmap_clear_bit(map, test_bit); if (bitmap_is_set(map, test_bit)) goto error2; } return FALSE; error1: printf("Error in set bit, bit %u, bitsize = %u", test_bit, bitsize); return TRUE; error2: printf("Error in clear bit, bit %u, bitsize = %u", test_bit, bitsize); return TRUE; }
static void process_defs (df_ref *def_rec, int top_flag) { df_ref def; while ((def = *def_rec++) != NULL) { df_ref curr_def = reg_defs[DF_REF_REGNO (def)]; unsigned int dregno; if ((DF_REF_FLAGS (def) & DF_REF_AT_TOP) != top_flag) continue; dregno = DF_REF_REGNO (def); if (curr_def) reg_defs_stack.safe_push (curr_def); else { /* Do not store anything if "transitioning" from NULL to NULL. But otherwise, push a special entry on the stack to tell the leave_block callback that the entry in reg_defs was NULL. */ if (DF_REF_FLAGS (def) & DF_MD_GEN_FLAGS) ; else reg_defs_stack.safe_push (def); } if (DF_REF_FLAGS (def) & DF_MD_GEN_FLAGS) { bitmap_set_bit (local_md, dregno); reg_defs[dregno] = NULL; } else { bitmap_clear_bit (local_md, dregno); reg_defs[dregno] = def; } } }
static bool try_unroll_loop_completely (struct loop *loop, edge exit, tree niter, enum unroll_level ul, HOST_WIDE_INT maxiter, location_t locus) { unsigned HOST_WIDE_INT n_unroll = 0, ninsns, unr_insns; struct loop_size size; bool n_unroll_found = false; edge edge_to_cancel = NULL; int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS; /* See if we proved number of iterations to be low constant. EXIT is an edge that will be removed in all but last iteration of the loop. EDGE_TO_CACNEL is an edge that will be removed from the last iteration of the unrolled sequence and is expected to make the final loop not rolling. If the number of execution of loop is determined by standard induction variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving from the iv test. */ if (tree_fits_uhwi_p (niter)) { n_unroll = tree_to_uhwi (niter); n_unroll_found = true; edge_to_cancel = EDGE_SUCC (exit->src, 0); if (edge_to_cancel == exit) edge_to_cancel = EDGE_SUCC (exit->src, 1); } /* We do not know the number of iterations and thus we can not eliminate the EXIT edge. */ else exit = NULL; /* See if we can improve our estimate by using recorded loop bounds. */ if (maxiter >= 0 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll)) { n_unroll = maxiter; n_unroll_found = true; /* Loop terminates before the IV variable test, so we can not remove it in the last iteration. */ edge_to_cancel = NULL; } if (!n_unroll_found) return false; if (n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d " "(--param max-completely-peeled-times limit reached).\n", loop->num); return false; } if (!edge_to_cancel) edge_to_cancel = loop_edge_to_cancel (loop); if (n_unroll) { sbitmap wont_exit; edge e; unsigned i; bool large; vec<edge> to_remove = vNULL; if (ul == UL_SINGLE_ITER) return false; large = tree_estimate_loop_size (loop, exit, edge_to_cancel, &size, PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)); ninsns = size.overall; if (large) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: it is too large.\n", loop->num); return false; } unr_insns = estimated_unrolled_size (&size, n_unroll); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Loop size: %d\n", (int) ninsns); fprintf (dump_file, " Estimated size after unrolling: %d\n", (int) unr_insns); } /* If the code is going to shrink, we don't need to be extra cautious on guessing if the unrolling is going to be profitable. */ if (unr_insns /* If there is IV variable that will become constant, we save one instruction in the loop prologue we do not account otherwise. */ <= ninsns + (size.constant_iv != false)) ; /* We unroll only inner loops, because we do not consider it profitable otheriwse. We still can cancel loopback edge of not rolling loop; this is always a good idea. */ else if (ul == UL_NO_GROWTH) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: size would grow.\n", loop->num); return false; } /* Outer loops tend to be less interesting candidates for complete unrolling unless we can do a lot of propagation into the inner loop body. For now we disable outer loop unrolling when the code would grow. */ else if (loop->inner) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "it is not innermost and code would grow.\n", loop->num); return false; } /* If there is call on a hot path through the loop, then there is most probably not much to optimize. */ else if (size.num_non_pure_calls_on_hot_path) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "contains call and code would grow.\n", loop->num); return false; } /* If there is pure/const call in the function, then we can still optimize the unrolled loop body if it contains some other interesting code than the calls and code storing or cumulating the return value. */ else if (size.num_pure_calls_on_hot_path /* One IV increment, one test, one ivtmp store and one useful stmt. That is about minimal loop doing pure call. */ && (size.non_call_stmts_on_hot_path <= 3 + size.num_pure_calls_on_hot_path)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "contains just pure calls and code would grow.\n", loop->num); return false; } /* Complette unrolling is major win when control flow is removed and one big basic block is created. If the loop contains control flow the optimization may still be a win because of eliminating the loop overhead but it also may blow the branch predictor tables. Limit number of branches on the hot path through the peeled sequence. */ else if (size.num_branches_on_hot_path * (int)n_unroll > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " " number of branches on hot path in the unrolled sequence" " reach --param max-peel-branches limit.\n", loop->num); return false; } else if (unr_insns > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "(--param max-completely-peeled-insns limit reached).\n", loop->num); return false; } dump_printf_loc (report_flags, locus, "loop turned into non-loop; it never loops.\n"); initialize_original_copy_tables (); wont_exit = sbitmap_alloc (n_unroll + 1); bitmap_ones (wont_exit); bitmap_clear_bit (wont_exit, 0); if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), n_unroll, wont_exit, exit, &to_remove, DLTHE_FLAG_UPDATE_FREQ | DLTHE_FLAG_COMPLETTE_PEEL)) { free_original_copy_tables (); free (wont_exit); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Failed to duplicate the loop\n"); return false; } FOR_EACH_VEC_ELT (to_remove, i, e) { bool ok = remove_path (e); gcc_assert (ok); } to_remove.release (); free (wont_exit); free_original_copy_tables (); }
int unpack_row_old(Relay_log_info *rli, TABLE *table, uint const colcnt, uchar *record, uchar const *row, MY_BITMAP const *cols, uchar const **row_end, ulong *master_reclength, MY_BITMAP* const rw_set, Log_event_type const event_type) { DBUG_ASSERT(record && row); my_ptrdiff_t const offset= record - (uchar*) table->record[0]; size_t master_null_bytes= table->s->null_bytes; if (colcnt != table->s->fields) { Field **fptr= &table->field[colcnt-1]; do master_null_bytes= (*fptr)->last_null_byte(); while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF && fptr-- > table->field); /* If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time, there were no nullable fields nor BIT fields at all in the columns that are common to the master and the slave. In that case, there is only one null byte holding the X bit. OBSERVE! There might still be nullable columns following the common columns, so table->s->null_bytes might be greater than 1. */ if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF) master_null_bytes= 1; } DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); memcpy(record, row, master_null_bytes); // [1] int error= 0; bitmap_set_all(rw_set); Field **const begin_ptr = table->field; Field **field_ptr; uchar const *ptr= row + master_null_bytes; Field **const end_ptr= begin_ptr + colcnt; for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) { Field *const f= *field_ptr; if (bitmap_is_set(cols, field_ptr - begin_ptr)) { f->move_field_offset(offset); ptr= f->unpack(f->ptr, ptr); f->move_field_offset(-offset); /* Field...::unpack() cannot return 0 */ DBUG_ASSERT(ptr != NULL); } else bitmap_clear_bit(rw_set, field_ptr - begin_ptr); } *row_end = ptr; if (master_reclength) { if (*field_ptr) *master_reclength = (*field_ptr)->ptr - table->record[0]; else *master_reclength = table->s->reclength; } /* Set properties for remaining columns, if there are any. We let the corresponding bit in the write_set be set, to write the value if it was not there already. We iterate over all remaining columns, even if there were an error, to get as many error messages as possible. We are still able to return a pointer to the next row, so redo that. This generation of error messages is only relevant when inserting new rows. */ for ( ; *field_ptr ; ++field_ptr) { uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; DBUG_PRINT("debug", ("flags = 0x%x, mask = 0x%x, flags & mask = 0x%x", (*field_ptr)->flags, mask, (*field_ptr)->flags & mask)); if (event_type == WRITE_ROWS_EVENT && ((*field_ptr)->flags & mask) == mask) { rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, "Field `%s` of table `%s`.`%s` " "has no default value and cannot be NULL", (*field_ptr)->field_name, table->s->db.str, table->s->table_name.str); error = ER_NO_DEFAULT_FOR_FIELD; } else (*field_ptr)->set_default(); } return error; }
void tests(void) { #ifndef USING_WOLFSSL struct bitmap *b; BIGNUM *bn; size_t len; int i, j, k, n; u_char bbuf[1024], bnbuf[1024]; int r; #else struct bitmap *b; BIGNUM *bn; #endif TEST_START("bitmap_new"); b = bitmap_new(); ASSERT_PTR_NE(b, NULL); bn = BN_new(); ASSERT_PTR_NE(bn, NULL); TEST_DONE(); TEST_START("bitmap_set_bit / bitmap_test_bit"); #ifndef USING_WOLFSSL for (i = -1; i < NTESTS; i++) { for (j = -1; j < NTESTS; j++) { for (k = -1; k < NTESTS; k++) { bitmap_zero(b); /* wolfSSL does not have support for BN_clear at this time */ BN_clear(bn); test_subtest_info("set %d/%d/%d", i, j, k); /* Set bits */ if (i >= 0) { ASSERT_INT_EQ(bitmap_set_bit(b, i), 0); ASSERT_INT_EQ(BN_set_bit(bn, i), 1); } if (j >= 0) { ASSERT_INT_EQ(bitmap_set_bit(b, j), 0); ASSERT_INT_EQ(BN_set_bit(bn, j), 1); } if (k >= 0) { ASSERT_INT_EQ(bitmap_set_bit(b, k), 0); ASSERT_INT_EQ(BN_set_bit(bn, k), 1); } /* Check perfect match between bitmap and bn */ test_subtest_info("match %d/%d/%d", i, j, k); for (n = 0; n < NTESTS; n++) { ASSERT_INT_EQ(BN_is_bit_set(bn, n), bitmap_test_bit(b, n)); } /* Test length calculations */ test_subtest_info("length %d/%d/%d", i, j, k); ASSERT_INT_EQ(BN_num_bits(bn), (int)bitmap_nbits(b)); ASSERT_INT_EQ(BN_num_bytes(bn), (int)bitmap_nbytes(b)); /* Test serialisation */ test_subtest_info("serialise %d/%d/%d", i, j, k); len = bitmap_nbytes(b); memset(bbuf, 0xfc, sizeof(bbuf)); ASSERT_INT_EQ(bitmap_to_string(b, bbuf, sizeof(bbuf)), 0); for (n = len; n < (int)sizeof(bbuf); n++) ASSERT_U8_EQ(bbuf[n], 0xfc); r = BN_bn2bin(bn, bnbuf); ASSERT_INT_GE(r, 0); ASSERT_INT_EQ(r, (int)len); ASSERT_MEM_EQ(bbuf, bnbuf, len); /* Test deserialisation */ test_subtest_info("deserialise %d/%d/%d", i, j, k); bitmap_zero(b); ASSERT_INT_EQ(bitmap_from_string(b, bnbuf, len), 0); for (n = 0; n < NTESTS; n++) { ASSERT_INT_EQ(BN_is_bit_set(bn, n), bitmap_test_bit(b, n)); } /* Test clearing bits */ test_subtest_info("clear %d/%d/%d", i, j, k); for (n = 0; n < NTESTS; n++) { ASSERT_INT_EQ(bitmap_set_bit(b, n), 0); ASSERT_INT_EQ(BN_set_bit(bn, n), 1); } if (i >= 0) { bitmap_clear_bit(b, i); /* wolfSSL does not have support for BN_clear_bit at this time */ BN_clear_bit(bn, i); } if (j >= 0) { bitmap_clear_bit(b, j); /* wolfSSL does not have support for BN_clear_bit at this time */ BN_clear_bit(bn, j); } if (k >= 0) { bitmap_clear_bit(b, k); /* wolfSSL does not have support for BN_clear_bit at this time */ BN_clear_bit(bn, k); } for (n = 0; n < NTESTS; n++) { ASSERT_INT_EQ(BN_is_bit_set(bn, n), bitmap_test_bit(b, n)); } } } } #endif /* USING_WOLFSSL */ bitmap_free(b); BN_free(bn); TEST_DONE(); }
my_bool test_compare_operators(MY_BITMAP *map, uint bitsize) { uint i, j, test_bit1, test_bit2, test_bit3,test_bit4; uint no_loops= bitsize > 128 ? 128 : bitsize; MY_BITMAP map2_obj, map3_obj; MY_BITMAP *map2= &map2_obj, *map3= &map3_obj; uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; uint32 map3buf[MAX_TESTED_BITMAP_SIZE]; bitmap_init(&map2_obj, map2buf, bitsize, FALSE); bitmap_init(&map3_obj, map3buf, bitsize, FALSE); bitmap_clear_all(map2); bitmap_clear_all(map3); for (i=0; i < no_loops; i++) { test_bit1=get_rand_bit(bitsize); bitmap_set_prefix(map, test_bit1); test_bit2=get_rand_bit(bitsize); bitmap_set_prefix(map2, test_bit2); bitmap_intersect(map, map2); test_bit3= test_bit2 < test_bit1 ? test_bit2 : test_bit1; bitmap_set_prefix(map3, test_bit3); if (!bitmap_cmp(map, map3)) goto error1; bitmap_clear_all(map); bitmap_clear_all(map2); bitmap_clear_all(map3); test_bit1=get_rand_bit(bitsize); test_bit2=get_rand_bit(bitsize); test_bit3=get_rand_bit(bitsize); bitmap_set_prefix(map, test_bit1); bitmap_set_prefix(map2, test_bit2); test_bit3= test_bit2 > test_bit1 ? test_bit2 : test_bit1; bitmap_set_prefix(map3, test_bit3); bitmap_union(map, map2); if (!bitmap_cmp(map, map3)) goto error2; bitmap_clear_all(map); bitmap_clear_all(map2); bitmap_clear_all(map3); test_bit1=get_rand_bit(bitsize); test_bit2=get_rand_bit(bitsize); test_bit3=get_rand_bit(bitsize); bitmap_set_prefix(map, test_bit1); bitmap_set_prefix(map2, test_bit2); bitmap_xor(map, map2); test_bit3= test_bit2 > test_bit1 ? test_bit2 : test_bit1; test_bit4= test_bit2 < test_bit1 ? test_bit2 : test_bit1; bitmap_set_prefix(map3, test_bit3); for (j=0; j < test_bit4; j++) bitmap_clear_bit(map3, j); if (!bitmap_cmp(map, map3)) goto error3; bitmap_clear_all(map); bitmap_clear_all(map2); bitmap_clear_all(map3); test_bit1=get_rand_bit(bitsize); test_bit2=get_rand_bit(bitsize); test_bit3=get_rand_bit(bitsize); bitmap_set_prefix(map, test_bit1); bitmap_set_prefix(map2, test_bit2); bitmap_subtract(map, map2); if (test_bit2 < test_bit1) { bitmap_set_prefix(map3, test_bit1); for (j=0; j < test_bit2; j++) bitmap_clear_bit(map3, j); } if (!bitmap_cmp(map, map3)) goto error4; bitmap_clear_all(map); bitmap_clear_all(map2); bitmap_clear_all(map3); test_bit1=get_rand_bit(bitsize); bitmap_set_prefix(map, test_bit1); bitmap_invert(map); bitmap_set_all(map3); for (j=0; j < test_bit1; j++) bitmap_clear_bit(map3, j); if (!bitmap_cmp(map, map3)) goto error5; bitmap_clear_all(map); bitmap_clear_all(map3); } return FALSE; error1: diag("intersect error bitsize=%u,size1=%u,size2=%u", bitsize, test_bit1,test_bit2); return TRUE; error2: diag("union error bitsize=%u,size1=%u,size2=%u", bitsize, test_bit1,test_bit2); return TRUE; error3: diag("xor error bitsize=%u,size1=%u,size2=%u", bitsize, test_bit1,test_bit2); return TRUE; error4: diag("subtract error bitsize=%u,size1=%u,size2=%u", bitsize, test_bit1,test_bit2); return TRUE; error5: diag("invert error bitsize=%u,size=%u", bitsize, test_bit1); return TRUE; }