ccv_dense_matrix_t* ccv_dense_matrix_new(int rows, int cols, int type, void* data, uint64_t sig) { ccv_dense_matrix_t* mat; if (ccv_cache_opt && sig != 0 && !data && !(type & CCV_NO_DATA_ALLOC)) { uint8_t type; mat = (ccv_dense_matrix_t*)ccv_cache_out(&ccv_cache, sig, &type); if (mat) { assert(type == 0); mat->type |= CCV_GARBAGE; // set the flag so the upper level function knows this is from recycle-bin mat->refcount = 1; return mat; } } if (type & CCV_NO_DATA_ALLOC) { mat = (ccv_dense_matrix_t*)ccmalloc(sizeof(ccv_dense_matrix_t)); mat->type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE | CCV_NO_DATA_ALLOC) & ~CCV_GARBAGE; mat->data.u8 = data; } else { mat = (ccv_dense_matrix_t*)(data ? data : ccmalloc(ccv_compute_dense_matrix_size(rows, cols, type))); mat->type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE) & ~CCV_GARBAGE; mat->type |= data ? CCV_UNMANAGED : CCV_REUSABLE; // it still could be reusable because the signature could be derived one. mat->data.u8 = (unsigned char*)(mat + 1); } mat->sig = sig; mat->rows = rows; mat->cols = cols; mat->step = (cols * CCV_GET_DATA_TYPE_SIZE(type) * CCV_GET_CHANNEL(type) + 3) & -4; mat->refcount = 1; return mat; }
void ccv_matrix_free(ccv_matrix_t* mat) { int type = *(int*)mat; assert(!(type & CCV_UNMANAGED)); if (type & CCV_MATRIX_DENSE) { ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat; dmt->refcount = 0; if (!ccv_cache_opt || // e don't enable cache !(dmt->type & CCV_REUSABLE) || // or this is not a reusable piece dmt->sig == 0 || // or this doesn't have valid signature (dmt->type & CCV_NO_DATA_ALLOC)) // or this matrix is allocated as header-only, therefore we cannot cache it ccfree(dmt); else { assert(CCV_GET_DATA_TYPE(dmt->type) == CCV_8U || CCV_GET_DATA_TYPE(dmt->type) == CCV_32S || CCV_GET_DATA_TYPE(dmt->type) == CCV_32F || CCV_GET_DATA_TYPE(dmt->type) == CCV_64S || CCV_GET_DATA_TYPE(dmt->type) == CCV_64F); size_t size = ccv_compute_dense_matrix_size(dmt->rows, dmt->cols, dmt->type); ccv_cache_put(&ccv_cache, dmt->sig, dmt, size, 0 /* type 0 */); } } else if (type & CCV_MATRIX_SPARSE) { ccv_sparse_matrix_t* smt = (ccv_sparse_matrix_t*)mat; int i; for (i = 0; i < CCV_GET_SPARSE_PRIME(smt->prime); i++) if (smt->vector[i].index != -1) { ccv_dense_vector_t* iter = &smt->vector[i]; ccfree(iter->data.u8); iter = iter->next; while (iter != 0) { ccv_dense_vector_t* iter_next = iter->next; ccfree(iter->data.u8); ccfree(iter); iter = iter_next; } } ccfree(smt->vector); ccfree(smt); } else if ((type & CCV_MATRIX_CSR) || (type & CCV_MATRIX_CSC)) { ccv_compressed_sparse_matrix_t* csm = (ccv_compressed_sparse_matrix_t*)mat; csm->refcount = 0; ccfree(csm); } }
ccv_dense_matrix_t* ccv_dense_matrix_new(int rows, int cols, int type, void* data, uint64_t sig) { ccv_dense_matrix_t* mat; if (ccv_cache_opt && sig != 0 && !data && !(type & CCV_NO_DATA_ALLOC)) { uint8_t type; mat = (ccv_dense_matrix_t*)ccv_cache_out(&ccv_cache, sig, &type); if (mat) { assert(type == 0); mat->type |= CCV_GARBAGE; // set the flag so the upper level function knows this is from recycle-bin mat->refcount = 1; return mat; } } if (type & CCV_NO_DATA_ALLOC) { mat = (ccv_dense_matrix_t*)ccmalloc(sizeof(ccv_dense_matrix_t)); mat->type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE | CCV_NO_DATA_ALLOC) & ~CCV_GARBAGE; mat->data.u8 = data; } else { const size_t hdr_size = (sizeof(ccv_dense_matrix_t) + 15) & -16; mat = (ccv_dense_matrix_t*)(data ? data : ccmalloc(ccv_compute_dense_matrix_size(rows, cols, type))); mat->type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE) & ~CCV_GARBAGE; mat->type |= data ? CCV_UNMANAGED : CCV_REUSABLE; // it still could be reusable because the signature could be derived one. mat->data.u8 = (unsigned char*)mat + hdr_size; } mat->sig = sig; #if CCV_NNC_TENSOR_TFB mat->resides = CCV_TENSOR_CPU_MEMORY; mat->format = CCV_TENSOR_FORMAT_NHWC; mat->datatype = CCV_GET_DATA_TYPE(type); mat->channels = CCV_GET_CHANNEL(type); mat->reserved = 0; #endif mat->rows = rows; mat->cols = cols; mat->step = CCV_GET_STEP(cols, type); mat->refcount = 1; return mat; }
void ccv_matrix_free(ccv_matrix_t* mat) { int type = *(int*)mat; assert(!(type & CCV_UNMANAGED)); if (type & CCV_MATRIX_DENSE) { ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat; dmt->refcount = 0; if (!ccv_cache_opt || // e don't enable cache !(dmt->type & CCV_REUSABLE) || // or this is not a reusable piece dmt->sig == 0 || // or this doesn't have valid signature (dmt->type & CCV_NO_DATA_ALLOC)) // or this matrix is allocated as header-only, therefore we cannot cache it ccfree(dmt); else { assert(CCV_GET_DATA_TYPE(dmt->type) == CCV_8U || CCV_GET_DATA_TYPE(dmt->type) == CCV_32S || CCV_GET_DATA_TYPE(dmt->type) == CCV_32F || CCV_GET_DATA_TYPE(dmt->type) == CCV_64S || CCV_GET_DATA_TYPE(dmt->type) == CCV_64F); size_t size = ccv_compute_dense_matrix_size(dmt->rows, dmt->cols, dmt->type); ccv_cache_put(&ccv_cache, dmt->sig, dmt, size, 0 /* type 0 */); } } else if (type & CCV_MATRIX_SPARSE) { ccv_sparse_matrix_t* smt = (ccv_sparse_matrix_t*)mat; int i; for (i = 0; i < smt->size; i++) { if (smt->index[i].ifbit > 1) ccfree(smt->vector[i].index); // It is a union of index / data, can just free them. } ccfree(smt->index); ccfree(smt->vector); ccfree(smt); } else if ((type & CCV_MATRIX_CSR) || (type & CCV_MATRIX_CSC)) { ccv_compressed_sparse_matrix_t* csm = (ccv_compressed_sparse_matrix_t*)mat; csm->refcount = 0; ccfree(csm); } }