示例#1
0
WEAK buffer_t copy_of_buffer(void *user_context, const buffer_t &buf) {
    buffer_t result = buf;
    size_t buffer_size = full_extent(result);
    // TODO: ERROR RETURN
    result.host = (uint8_t *)halide_malloc(user_context, buffer_size * result.elem_size);
    copy_from_to(user_context, buf, result);
    return result;
}
示例#2
0
WEAK void prune_cache() {
#if CACHE_DEBUGGING
    validate_cache();
#endif
    CacheEntry *prune_candidate = least_recently_used;
    while (current_cache_size > max_cache_size &&
           prune_candidate != NULL) {
        CacheEntry *more_recent = prune_candidate->more_recent;
        
        if (prune_candidate->in_use_count == 0) {
            uint32_t h = prune_candidate->hash;
            uint32_t index = h % kHashTableSize;

            // Remove from hash table
            CacheEntry *prev_hash_entry = cache_entries[index];
            if (prev_hash_entry == prune_candidate) {
                cache_entries[index] = prune_candidate->next;
            } else {
                while (prev_hash_entry != NULL && prev_hash_entry->next != prune_candidate) {
                    prev_hash_entry = prev_hash_entry->next;
                }
                halide_assert(NULL, prev_hash_entry != NULL);
                prev_hash_entry->next = prune_candidate->next;
            }

            // Remove from less recent chain.
            if (least_recently_used == prune_candidate) {
                least_recently_used = more_recent;
            }
            if (more_recent != NULL) {
                more_recent->less_recent = prune_candidate->less_recent;
            }

            // Remove from more recent chain.
            if (most_recently_used == prune_candidate) {
                most_recently_used = prune_candidate->less_recent;
            }
            if (prune_candidate->less_recent != NULL) {
                prune_candidate->less_recent = more_recent;
            }

            // Decrease cache used amount.
            for (int32_t i = 0; i < prune_candidate->tuple_count; i++) {
                current_cache_size -= full_extent(prune_candidate->buffer(i));
            }

            // Deallocate the entry.
            prune_candidate->destroy();
            halide_free(NULL, prune_candidate);
        }

        prune_candidate = more_recent;
    }
#if CACHE_DEBUGGING
    validate_cache();
#endif
}
示例#3
0
WEAK void copy_from_to(void *user_context, const buffer_t &from, buffer_t &to) {
    size_t buffer_size = full_extent(from);
    halide_assert(user_context, from.elem_size == to.elem_size);
    for (int i = 0; i < 4; i++) {
        halide_assert(user_context, from.extent[i] == to.extent[i]);
        halide_assert(user_context, from.stride[i] == to.stride[i]);
    }
    memcpy(to.host, from.host, buffer_size * from.elem_size);
}
示例#4
0
WEAK void prune_cache() {
#if CACHE_DEBUGGING
    validate_cache();
#endif
    while (current_cache_size > max_cache_size &&
            least_recently_used != NULL) {
        CacheEntry *lru_entry = least_recently_used;
        uint32_t h = lru_entry->hash;
        uint32_t index = h % kHashTableSize;

        CacheEntry *entry = cache_entries[index];
        if (entry == lru_entry) {
            cache_entries[index] = lru_entry->next;
        } else {
            while (entry != NULL && entry->next != lru_entry) {
                entry = entry->next;
            }
            halide_assert(NULL, entry != NULL);
            entry->next = lru_entry->next;
        }
        least_recently_used = lru_entry->more_recent;
        if (least_recently_used != NULL) {
            least_recently_used->less_recent = NULL;
        }
        if (most_recently_used == lru_entry) {
            most_recently_used = NULL;
        }
        for (int32_t i = 0; i < lru_entry->tuple_count; i++) {
            current_cache_size -= full_extent(lru_entry->buffer(i));
        }

        lru_entry->destroy();
        halide_free(NULL, lru_entry);
    }
#if CACHE_DEBUGGING
    validate_cache();
#endif
}