static __inline__ void bm_access_4_aligned(struct bitmap* const bm, const Addr a, const BmAccessTypeT access_type) { struct bitmap2* p2; struct bitmap1* p1; UWord* p0; SPLIT_ADDRESS(a); tl_assert(bm); p2 = bm2_lookup_or_insert(bm, a1); p1 = &p2->bm1; p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w; bm0_set(p0, a0+0); bm0_set(p0, a0+1); bm0_set(p0, a0+2); bm0_set(p0, a0+3); }
/** * Record an access of type access_type at addresses a .. a + size - 1 in * bitmap bm. */ void bm_access_range(struct bitmap* const bm, const Addr a1, const Addr a2, const BmAccessTypeT access_type) { Addr b, b_next; tl_assert(bm); tl_assert(a1 < a2); /* The current implementation of bm_access_range does not work for the */ /* ADDR0_COUNT highest addresses in the address range. At least on Linux */ /* this is not a problem since the upper part of the address space is */ /* reserved for the kernel. */ tl_assert(a2 + ADDR0_COUNT > a2); for (b = a1; b < a2; b = b_next) { Addr b_start; Addr b_end; struct bitmap2* bm2; SPLIT_ADDRESS(b); b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT; if (b_next > a2) { b_next = a2; } bm2 = bm2_lookup_or_insert_exclusive(bm, b1); tl_assert(bm2); if ((bm2->addr << ADDR0_BITS) < a1) b_start = a1; else if ((bm2->addr << ADDR0_BITS) < a2) b_start = (bm2->addr << ADDR0_BITS); else break; tl_assert(a1 <= b_start && b_start <= a2); if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2) b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT; else b_end = a2; tl_assert(a1 <= b_end && b_end <= a2); tl_assert(b_start < b_end); tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK)); if (access_type == eLoad) { for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++) { bm0_set(bm2->bm1.bm0_r, b0); } } else { for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++) { bm0_set(bm2->bm1.bm0_w, b0); } } } }