/* Processes a single file. */ static void process_file(const char* path, struct stat* sb) { if (sb->st_size == 0) { if (ignore_empty_flag) return; } /* NOTE: Check for duplicate arguments? */ if (physical_flag) { /* TODO: Make this less pessimal */ size_t i, bucket = BUCKET_INDEX(sb->st_size); for (i = 0; i < buckets[bucket].allocated; i++) { if (buckets[bucket].files[i].device == sb->st_dev && buckets[bucket].files[i].inode == sb->st_ino) { return; } } } init_file(alloc_file(&buckets[BUCKET_INDEX(sb->st_size)]), path, sb); }
/*! Allocates memory from the heap \param size - number of bytes required \return starting address of the memory on success Null on failure */ void * AllocateFromHeap(int size) { int bucket_size, bucket_index; CACHE_PTR cache_ptr; HEAP_DATA_PTR heap_data_ptr; bucket_size = ALIGN_UP(size + sizeof(HEAP_DATA), 2); if ( bucket_size > MAX_HEAP_BUCKET_SIZE ) { // if the requested size is greater than PAGE_SIZE then allocate from VM UINT32 page_size = ALIGN_UP(bucket_size, VM_PAGE_SHIFT); heap_data_ptr = VM_ALLOC(page_size); if ( heap_data_ptr == NULL ) return NULL; heap_data_ptr->bucket_index = VM_BUCKET; *((UINT32 *)&heap_data_ptr->buffer[0]) = page_size; return &heap_data_ptr->buffer[1]; } bucket_index = BUCKET_INDEX(bucket_size); cache_ptr = &CACHE_FROM_INDEX(bucket_index); heap_data_ptr = (HEAP_DATA_PTR) AllocateBuffer(cache_ptr, CACHE_ALLOC_SLEEP); if ( heap_data_ptr == NULL ) return NULL; heap_data_ptr->bucket_index = bucket_index; return heap_data_ptr->buffer; }
void BKE_mask_spline_feather_collapse_inner_loops( MaskSpline *spline, float (*feather_points)[2], const unsigned int tot_feather_point) { #define BUCKET_INDEX(co) \ feather_bucket_index_from_coord(co, min, bucket_scale, buckets_per_side) int buckets_per_side, tot_bucket; float bucket_size, bucket_scale[2]; FeatherEdgesBucket *buckets; unsigned int i; float min[2], max[2]; float max_delta_x = -1.0f, max_delta_y = -1.0f, max_delta; if (tot_feather_point < 4) { /* self-intersection works only for quads at least, * in other cases polygon can't be self-intersecting anyway */ return; } /* find min/max corners of mask to build buckets in that space */ INIT_MINMAX2(min, max); for (i = 0; i < tot_feather_point; i++) { unsigned int next = i + 1; float delta; minmax_v2v2_v2(min, max, feather_points[i]); if (next == tot_feather_point) { if (spline->flag & MASK_SPLINE_CYCLIC) next = 0; else break; } delta = fabsf(feather_points[i][0] - feather_points[next][0]); if (delta > max_delta_x) max_delta_x = delta; delta = fabsf(feather_points[i][1] - feather_points[next][1]); if (delta > max_delta_y) max_delta_y = delta; } /* prevent divisionsby zero by ensuring bounding box is not collapsed */ if (max[0] - min[0] < FLT_EPSILON) { max[0] += 0.01f; min[0] -= 0.01f; } if (max[1] - min[1] < FLT_EPSILON) { max[1] += 0.01f; min[1] -= 0.01f; } /* use dynamically calculated buckets per side, so we likely wouldn't * run into a situation when segment doesn't fit two buckets which is * pain collecting candidates for intersection */ max_delta_x /= max[0] - min[0]; max_delta_y /= max[1] - min[1]; max_delta = MAX2(max_delta_x, max_delta_y); buckets_per_side = min_ii(512, 0.9f / max_delta); if (buckets_per_side == 0) { /* happens when some segment fills the whole bounding box across some of dimension */ buckets_per_side = 1; } tot_bucket = buckets_per_side * buckets_per_side; bucket_size = 1.0f / buckets_per_side; /* pre-compute multipliers, to save mathematical operations in loops */ bucket_scale[0] = 1.0f / ((max[0] - min[0]) * bucket_size); bucket_scale[1] = 1.0f / ((max[1] - min[1]) * bucket_size); /* fill in buckets' edges */ buckets = MEM_callocN(sizeof(FeatherEdgesBucket) * tot_bucket, "feather buckets"); for (i = 0; i < tot_feather_point; i++) { int start = i, end = i + 1; int start_bucket_index, end_bucket_index; if (end == tot_feather_point) { if (spline->flag & MASK_SPLINE_CYCLIC) end = 0; else break; } start_bucket_index = BUCKET_INDEX(feather_points[start]); end_bucket_index = BUCKET_INDEX(feather_points[end]); feather_bucket_add_edge(&buckets[start_bucket_index], start, end); if (start_bucket_index != end_bucket_index) { FeatherEdgesBucket *end_bucket = &buckets[end_bucket_index]; FeatherEdgesBucket *diagonal_bucket_a, *diagonal_bucket_b; feather_bucket_get_diagonal(buckets, start_bucket_index, end_bucket_index, buckets_per_side, &diagonal_bucket_a, &diagonal_bucket_b); feather_bucket_add_edge(end_bucket, start, end); feather_bucket_add_edge(diagonal_bucket_a, start, end); feather_bucket_add_edge(diagonal_bucket_a, start, end); } } /* check all edges for intersection with edges from their buckets */ for (i = 0; i < tot_feather_point; i++) { int cur_a = i, cur_b = i + 1; int start_bucket_index, end_bucket_index; FeatherEdgesBucket *start_bucket; if (cur_b == tot_feather_point) cur_b = 0; start_bucket_index = BUCKET_INDEX(feather_points[cur_a]); end_bucket_index = BUCKET_INDEX(feather_points[cur_b]); start_bucket = &buckets[start_bucket_index]; feather_bucket_check_intersect(feather_points, tot_feather_point, start_bucket, cur_a, cur_b); if (start_bucket_index != end_bucket_index) { FeatherEdgesBucket *end_bucket = &buckets[end_bucket_index]; FeatherEdgesBucket *diagonal_bucket_a, *diagonal_bucket_b; feather_bucket_get_diagonal(buckets, start_bucket_index, end_bucket_index, buckets_per_side, &diagonal_bucket_a, &diagonal_bucket_b); feather_bucket_check_intersect(feather_points, tot_feather_point, end_bucket, cur_a, cur_b); feather_bucket_check_intersect(feather_points, tot_feather_point, diagonal_bucket_a, cur_a, cur_b); feather_bucket_check_intersect(feather_points, tot_feather_point, diagonal_bucket_b, cur_a, cur_b); } } /* free buckets */ for (i = 0; i < tot_bucket; i++) { if (buckets[i].segments) MEM_freeN(buckets[i].segments); } MEM_freeN(buckets); #undef BUCKET_INDEX }