void ccv_array_free(ccv_array_t* array) { if (!ccv_cache_opt || !(array->type & CCV_REUSABLE) || array->sig == 0) { array->refcount = 0; ccfree(array->data); ccfree(array); } else { size_t size = sizeof(ccv_array_t) + array->size * array->rsize; ccv_cache_put(&ccv_cache, array->sig, array, size, 1 /* type 1 */); } }
void ccv_matrix_free(ccv_matrix_t* mat) { int type = *(int*)mat; assert(!(type & CCV_UNMANAGED)); if (type & CCV_MATRIX_DENSE) { ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat; dmt->refcount = 0; if (!ccv_cache_opt || // e don't enable cache !(dmt->type & CCV_REUSABLE) || // or this is not a reusable piece dmt->sig == 0 || // or this doesn't have valid signature (dmt->type & CCV_NO_DATA_ALLOC)) // or this matrix is allocated as header-only, therefore we cannot cache it ccfree(dmt); else { assert(CCV_GET_DATA_TYPE(dmt->type) == CCV_8U || CCV_GET_DATA_TYPE(dmt->type) == CCV_32S || CCV_GET_DATA_TYPE(dmt->type) == CCV_32F || CCV_GET_DATA_TYPE(dmt->type) == CCV_64S || CCV_GET_DATA_TYPE(dmt->type) == CCV_64F); size_t size = ccv_compute_dense_matrix_size(dmt->rows, dmt->cols, dmt->type); ccv_cache_put(&ccv_cache, dmt->sig, dmt, size, 0 /* type 0 */); } } else if (type & CCV_MATRIX_SPARSE) { ccv_sparse_matrix_t* smt = (ccv_sparse_matrix_t*)mat; int i; for (i = 0; i < CCV_GET_SPARSE_PRIME(smt->prime); i++) if (smt->vector[i].index != -1) { ccv_dense_vector_t* iter = &smt->vector[i]; ccfree(iter->data.u8); iter = iter->next; while (iter != 0) { ccv_dense_vector_t* iter_next = iter->next; ccfree(iter->data.u8); ccfree(iter); iter = iter_next; } } ccfree(smt->vector); ccfree(smt); } else if ((type & CCV_MATRIX_CSR) || (type & CCV_MATRIX_CSC)) { ccv_compressed_sparse_matrix_t* csm = (ccv_compressed_sparse_matrix_t*)mat; csm->refcount = 0; ccfree(csm); } }
void ccv_matrix_free(ccv_matrix_t* mat) { int type = *(int*)mat; assert(!(type & CCV_UNMANAGED)); if (type & CCV_MATRIX_DENSE) { ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat; dmt->refcount = 0; if (!ccv_cache_opt || // e don't enable cache !(dmt->type & CCV_REUSABLE) || // or this is not a reusable piece dmt->sig == 0 || // or this doesn't have valid signature (dmt->type & CCV_NO_DATA_ALLOC)) // or this matrix is allocated as header-only, therefore we cannot cache it ccfree(dmt); else { assert(CCV_GET_DATA_TYPE(dmt->type) == CCV_8U || CCV_GET_DATA_TYPE(dmt->type) == CCV_32S || CCV_GET_DATA_TYPE(dmt->type) == CCV_32F || CCV_GET_DATA_TYPE(dmt->type) == CCV_64S || CCV_GET_DATA_TYPE(dmt->type) == CCV_64F); size_t size = ccv_compute_dense_matrix_size(dmt->rows, dmt->cols, dmt->type); ccv_cache_put(&ccv_cache, dmt->sig, dmt, size, 0 /* type 0 */); } } else if (type & CCV_MATRIX_SPARSE) { ccv_sparse_matrix_t* smt = (ccv_sparse_matrix_t*)mat; int i; for (i = 0; i < smt->size; i++) { if (smt->index[i].ifbit > 1) ccfree(smt->vector[i].index); // It is a union of index / data, can just free them. } ccfree(smt->index); ccfree(smt->vector); ccfree(smt); } else if ((type & CCV_MATRIX_CSR) || (type & CCV_MATRIX_CSC)) { ccv_compressed_sparse_matrix_t* csm = (ccv_compressed_sparse_matrix_t*)mat; csm->refcount = 0; ccfree(csm); } }