bool IndexPseudoPyramidTree::remove(const Point& point) { // Find bucket the point would belong to int searchKey = computePseudoPyramidValue(numDimensions, point, minPoint, maxPoint, scaleFactors, cumulativeSFProducts); OneDMap::iterator keyValue = hashMap.find(searchKey); // Bucket has been found, point MIGHT be stored in structure if (keyValue != hashMap.end()) { // Search through points in bucket to see if it contains the given point IndexList& indices = keyValue->second; IndexList::iterator pointIt = indices.begin(); for (pointIt; (pointIt != indices.end()); pointIt++) { if (point == points[*pointIt]) { break; } } // If the point was found if (pointIt != indices.end()) { // Add the index to the lsit emptyElementIndices.push_back(*pointIt); // Remove index pointing to the point in the bucket removeElementAtIterator(indices, pointIt); // If the amount of empty elements of the point array is // higher than a certain threshold, clean up unused points if (maxEmptyElements != -1 && // NOTE: -1 means the list should NOT be cleaned emptyElementIndices.size() >= maxEmptyElements) { if (cleanupProcedure == CLEANUP_PROC_REBUILD) { rebuild(); } else { defragment(); } } return true; } // Point is not contained in bucket -- cannot remove else { return false; } } // No bucket found, so point is not being stored in the structure else { return false; } }
int h_rmdir(const char *path) { /*Remove a directory ищем папку по указанному пути дефрагментация */ printf("rmdir\n"); int cnt = sizeof(path)/sizeof(char); char* newstr; for (int i = 0; i < N; i++) { if ((fs+i)->name != NULL) { newstr = strncpy(newstr,(fs+i)->name,cnt); if (!strcmp(newstr,path)) { //if dir then delete from fs //else delete from bytes then delete from fs if (!fs[i].isdir) { int start = fs[i].first_cluster*4000; for (int j = start; j < start + fs[i].size; j++) { bytes[j] = NULL; } for (int k = fs[i].first_cluster; k <= fs[i].last_cluster; k++) { //free(pointers[k]); memset(pointers+k,'\0',sizeof(file)); } } memset(fs+i,'\0',sizeof(file)); //free(fs[i]); //fs[i] = NULL; } } } defragment(); }
int h_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { /* Write data to an open file Write should return exactly the number of bytes requested except on error. An exception to this is when the 'direct_io' mount option is specified (see read operation). Changed in version 2.2 дилемма: наверно, и здесь нужна дефрагментация, если объем записываемых данных больше размера файла а так аналогично чтению */ printf("write\n"); file* g = get_file(path); int flag = 0; if (g->size > 0) { flag = 1; fs[first_free_file].isdir = g->isdir; fs[first_free_file].name = g->name; fs[first_free_file].size = 0; first_free_file++; //f = NULL; for (int i = g->first_cluster; i <= g->last_cluster; i++) { // pointers[i] = NULL; memset(pointers+i,'\0',sizeof(file)); bytes[i*4000] = NULL; } free(g); //g = NULL; } file* f = get_file(path); //if f.size == 0 -> get place in bytes f->size += size; f->first_cluster = first_free_cluster; int clust = sizeof(char)*4000; size -= clust; float check = size/clust; int cnt_clusters = size/clust; if ((check - cnt_clusters)>0) { cnt_clusters += 1; } f->last_cluster = f->first_cluster + cnt_clusters; /*for (int i = first_free_cluster; i < first_free_cluster + cnt_clusters; i++) { pointers[i] = f; }*/ for (int j = f->first_cluster*4000/* + offset*/; j <= f->last_cluster*4000; j++) { memcpy(bytes[j],buf[j],sizeof(char)); } //change first free cluster first_free_cluster = f->last_cluster + 1; if (flag) defragment(); return size; }