void zbx_mem_dump_stats(zbx_mem_info_t *info) { void *chunk; int index; zbx_uint64_t counter, total, total_free = 0; zbx_uint64_t min_size = __UINT64_C(0xffffffffffffffff), max_size = __UINT64_C(0); LOCK_INFO; zabbix_log(LOG_LEVEL_DEBUG, "=== memory statistics for %s ===", info->mem_descr); for (index = 0; index < MEM_BUCKET_COUNT; index++) { counter = 0; chunk = info->buckets[index]; while (NULL != chunk) { counter++; min_size = MIN(min_size, CHUNK_SIZE(chunk)); max_size = MAX(max_size, CHUNK_SIZE(chunk)); chunk = mem_get_next_chunk(chunk); } if (counter > 0) { total_free += counter; zabbix_log(LOG_LEVEL_DEBUG, "free chunks of size %2s %3d bytes: %8d", index == MEM_BUCKET_COUNT - 1 ? ">=" : "", MEM_MIN_BUCKET_SIZE + 8 * index, counter); } } zabbix_log(LOG_LEVEL_DEBUG, "min chunk size: %10u bytes", min_size); zabbix_log(LOG_LEVEL_DEBUG, "max chunk size: %10u bytes", max_size); total = (info->total_size - info->used_size - info->free_size) / (2 * MEM_SIZE_FIELD) + 1; zabbix_log(LOG_LEVEL_DEBUG, "memory of total size %u bytes fragmented into %d chunks", info->total_size, total); zabbix_log(LOG_LEVEL_DEBUG, "of those, %10u bytes are in %8d free chunks", info->free_size, total_free); zabbix_log(LOG_LEVEL_DEBUG, "of those, %10u bytes are in %8d used chunks", info->used_size, total - total_free); zabbix_log(LOG_LEVEL_DEBUG, "================================"); UNLOCK_INFO; }
static inline void collector_sweep_abnormal_chunk_con(Conclctor *sweeper, Wspace *wspace, Chunk_Header *chunk) { assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED)); POINTER_SIZE_INT *table = chunk->table; table[0] &= cur_alloc_mask; if(!table[0]){ collector_add_free_chunk(sweeper, (Free_Chunk*)chunk); } else { wspace_reg_live_abnormal_chunk(wspace, chunk); sweeper->live_obj_size += CHUNK_SIZE(chunk); sweeper->live_obj_num++; } }
static void mem_link_chunk(zbx_mem_info_t *info, void *chunk) { int index; index = mem_bucket_by_size(CHUNK_SIZE(chunk)); if (NULL != info->buckets[index]) mem_set_prev_chunk(info->buckets[index], chunk); mem_set_prev_chunk(chunk, NULL); mem_set_next_chunk(chunk, info->buckets[index]); info->buckets[index] = chunk; }
static void mem_unlink_chunk(zbx_mem_info_t *info, void *chunk) { int index; void *prev_chunk, *next_chunk; void **next_in_prev_chunk, **prev_in_next_chunk; index = mem_bucket_by_size(CHUNK_SIZE(chunk)); prev_chunk = mem_get_prev_chunk(chunk); next_chunk = mem_get_next_chunk(chunk); next_in_prev_chunk = mem_ptr_to_next_field(prev_chunk, &info->buckets[index]); prev_in_next_chunk = mem_ptr_to_prev_field(next_chunk); *next_in_prev_chunk = next_chunk; if (NULL != prev_in_next_chunk) *prev_in_next_chunk = prev_chunk; }
size_t malloc_usable_size(void* p) { return p ? CHUNK_SIZE(MEM_TO_CHUNK(p)) - OVERHEAD : 0; }
bool KPngPlugin::readInfo( KFileMetaInfo& info, uint what) { if ( info.path().isEmpty() ) // remote file return false; QFile f(info.path()); if ( !f.open(IO_ReadOnly) ) return false; QIODevice::Offset fileSize = f.size(); if (fileSize < 29) return false; // the technical group will be read from the first 29 bytes. If the file // is smaller, we can't even read this. bool readComments = false; if (what & (KFileMetaInfo::Fastest | KFileMetaInfo::DontCare | KFileMetaInfo::ContentInfo)) readComments = true; else fileSize = 29; // No need to read more uchar *data = new uchar[fileSize+1]; f.readBlock(reinterpret_cast<char*>(data), fileSize); data[fileSize]='\n'; // find the start if (data[0] == 137 && data[1] == 80 && data[2] == 78 && data[3] == 71 && data[4] == 13 && data[5] == 10 && data[6] == 26 && data[7] == 10 ) { // ok // the IHDR chunk should be the first if (!strncmp((char*)&data[12], "IHDR", 4)) { // we found it, get the dimensions ulong x,y; x = (data[16]<<24) + (data[17]<<16) + (data[18]<<8) + data[19]; y = (data[20]<<24) + (data[21]<<16) + (data[22]<<8) + data[23]; uint type = data[25]; uint bpp = data[24]; kdDebug(7034) << "dimensions " << x << "*" << y << endl; // the bpp are only per channel, so we need to multiply the with // the channel count switch (type) { case 0: break; // Grayscale case 2: bpp *= 3; break; // RGB case 3: break; // palette case 4: bpp *= 2; break; // grayscale w. alpha case 6: bpp *= 4; break; // RGBA default: // we don't get any sensible value here bpp = 0; } KFileMetaInfoGroup techgroup = appendGroup(info, "Technical"); appendItem(techgroup, "Dimensions", QSize(x, y)); appendItem(techgroup, "BitDepth", bpp); appendItem(techgroup, "ColorMode", (type < sizeof(colors)/sizeof(colors[0])) ? i18n(colors[data[25]]) : i18n("Unknown")); appendItem(techgroup, "Compression", (data[26] < sizeof(compressions)/sizeof(compressions[0])) ? i18n(compressions[data[26]]) : i18n("Unknown")); appendItem(techgroup, "InterlaceMode", (data[28] < sizeof(interlaceModes)/sizeof(interlaceModes[0])) ? i18n(interlaceModes[data[28]]) : i18n("Unknown")); } // look for a tEXt chunk if (readComments) { uint index = 8; index += CHUNK_SIZE(data, index) + CHUNK_HEADER_SIZE; KFileMetaInfoGroup commentGroup = appendGroup(info, "Comment"); while(index<fileSize-12) { while (index < fileSize - 12 && strncmp((char*)CHUNK_TYPE(data,index), "tEXt", 4) && strncmp((char*)CHUNK_TYPE(data,index), "zTXt", 4)) { if (!strncmp((char*)CHUNK_TYPE(data,index), "IEND", 4)) goto end; index += CHUNK_SIZE(data, index) + CHUNK_HEADER_SIZE; } if (index < fileSize - 12) { // we found a tEXt or zTXt field // get the key, it's a null terminated string at the // chunk start uchar* key = &CHUNK_DATA(data,index,0); int keysize=0; for (;key[keysize]!=0; keysize++) // look if we reached the end of the file // (it might be corrupted) if (8+index+keysize>=fileSize) goto end; QByteArray arr; if(!strncmp((char*)CHUNK_TYPE(data,index), "zTXt", 4)) { kdDebug(7034) << "We found a zTXt field\n"; // we get the compression method after the key uchar* compressionMethod = &CHUNK_DATA(data,index,keysize+1); if ( *compressionMethod != 0x00 ) { // then it isn't zlib compressed and we are sunk kdDebug(7034) << "Non-standard compression method." << endl; goto end; } // compressed string after the compression technique spec uchar* compressedText = &CHUNK_DATA(data, index, keysize+2); uint compressedTextSize = CHUNK_SIZE(data, index)-keysize-2; // security check, also considering overflow wraparound from the addition -- // we may endup with a /smaller/ index if we wrap all the way around uint firstIndex = (uint)(compressedText - data); uint onePastLastIndex = firstIndex + compressedTextSize; if ( onePastLastIndex > fileSize || onePastLastIndex <= firstIndex) goto end; uLongf uncompressedLen = compressedTextSize * 2; // just a starting point int zlibResult; do { arr.resize(uncompressedLen); zlibResult = uncompress((Bytef*)arr.data(), &uncompressedLen, compressedText, compressedTextSize); if (Z_OK == zlibResult) { // then it is all OK arr.resize(uncompressedLen); } else if (Z_BUF_ERROR == zlibResult) { // the uncompressedArray needs to be larger // kdDebug(7034) << "doubling size for decompression" << endl; uncompressedLen *= 2; // DoS protection. can't be bigger than 64k if ( uncompressedLen > 131072 ) break; } else { // something bad happened goto end; } } while (Z_BUF_ERROR == zlibResult); if (Z_OK != zlibResult) goto end; } else if (!strncmp((char*)CHUNK_TYPE(data,index), "tEXt", 4)) { kdDebug(7034) << "We found a tEXt field\n"; // the text comes after the key, but isn't null terminated uchar* text = &CHUNK_DATA(data,index, keysize+1); uint textsize = CHUNK_SIZE(data, index)-keysize-1; // security check, also considering overflow wraparound from the addition -- // we may endup with a /smaller/ index if we wrap all the way around uint firstIndex = (uint)(text - data); uint onePastLastIndex = firstIndex + textsize; if ( onePastLastIndex > fileSize || onePastLastIndex <= firstIndex) goto end; arr.resize(textsize); arr = QByteArray(textsize).duplicate((const char*)text, textsize); } else { kdDebug(7034) << "We found a field, not expected though\n"; goto end; } appendItem(commentGroup, QString(reinterpret_cast<char*>(key)), QString(arr)); kdDebug(7034) << "adding " << key << " / " << QString(arr) << endl; index += CHUNK_SIZE(data, index) + CHUNK_HEADER_SIZE; } } } } end: delete[] data; return true; }
static void __mem_free(zbx_mem_info_t *info, void *ptr) { void *chunk; void *prev_chunk, *next_chunk; zbx_uint64_t chunk_size; int prev_free, next_free; chunk = ptr - MEM_SIZE_FIELD; chunk_size = CHUNK_SIZE(chunk); info->used_size -= chunk_size; info->free_size += chunk_size; /* see if we can merge with previous and next chunks */ next_chunk = chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD; prev_free = (info->lo_bound < chunk && FREE_CHUNK(chunk - MEM_SIZE_FIELD)); next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk)); if (prev_free && next_free) { info->free_size += 4 * MEM_SIZE_FIELD; prev_chunk = chunk - MEM_SIZE_FIELD - CHUNK_SIZE(chunk - MEM_SIZE_FIELD) - MEM_SIZE_FIELD; chunk_size += 4 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk) + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, prev_chunk); mem_unlink_chunk(info, next_chunk); chunk = prev_chunk; mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } else if (prev_free) { info->free_size += 2 * MEM_SIZE_FIELD; prev_chunk = chunk - MEM_SIZE_FIELD - CHUNK_SIZE(chunk - MEM_SIZE_FIELD) - MEM_SIZE_FIELD; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk); mem_unlink_chunk(info, prev_chunk); chunk = prev_chunk; mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } else if (next_free) { info->free_size += 2 * MEM_SIZE_FIELD; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, next_chunk); mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } else { mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } }
static void *__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size) { void *chunk, *new_chunk, *next_chunk; zbx_uint64_t chunk_size, new_chunk_size; int next_free; size = mem_proper_alloc_size(size); chunk = old - MEM_SIZE_FIELD; chunk_size = CHUNK_SIZE(chunk); next_chunk = chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD; next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk)); if (size <= chunk_size) { /* do not reallocate if not much is freed */ /* we are likely to want more memory again */ if (size > chunk_size / 4) return chunk; if (next_free) { /* merge with next chunk */ info->used_size -= chunk_size; info->used_size += size; info->free_size += chunk_size + 2 * MEM_SIZE_FIELD; info->free_size -= size + 2 * MEM_SIZE_FIELD; new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size); mem_unlink_chunk(info, next_chunk); mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } else { /* split the current one */ info->used_size -= chunk_size; info->used_size += size; info->free_size += chunk_size; info->free_size -= size + 2 * MEM_SIZE_FIELD; new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } return chunk; } if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size) { info->used_size -= chunk_size; info->free_size += chunk_size; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, next_chunk); /* either use the full next_chunk or split it */ if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC) { info->used_size += chunk_size; info->free_size -= chunk_size; mem_set_used_chunk_size(chunk, chunk_size); } else { new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); info->used_size += size; info->free_size -= chunk_size; info->free_size += new_chunk_size; mem_set_used_chunk_size(chunk, size); } return chunk; } else if (NULL != (new_chunk = __mem_malloc(info, size))) { memcpy(new_chunk + MEM_SIZE_FIELD, chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); return new_chunk; } else { void *tmp = NULL; tmp = zbx_malloc(tmp, chunk_size); memcpy(tmp, chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); new_chunk = __mem_malloc(info, size); if (NULL != new_chunk) { memcpy(new_chunk + MEM_SIZE_FIELD, tmp, chunk_size); } else { int index; void *last_chunk; index = mem_bucket_by_size(chunk_size); last_chunk = info->buckets[index]; mem_unlink_chunk(info, last_chunk); if (chunk != last_chunk) { /* The chunk was merged with a free space on the left during */ /* __mem_free() operation. The left chunk must be restored to */ /* its previous state to avoid memory leaks. */ /* We can safely ignore if the chunk was merged on the right */ /* as it will just increase the size of allocated chunk. */ zbx_uint64_t left_size; left_size = chunk - last_chunk - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(chunk, CHUNK_SIZE(chunk) - left_size - 2 * MEM_SIZE_FIELD); mem_set_chunk_size(last_chunk, left_size); mem_link_chunk(info, last_chunk); } memcpy(chunk + MEM_SIZE_FIELD, tmp, chunk_size); } zbx_free(tmp); return new_chunk; } }
static void *__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size) { int index; void *chunk; zbx_uint64_t chunk_size; size = mem_proper_alloc_size(size); /* try to find an appropriate chunk in special buckets */ index = mem_bucket_by_size(size); while (index < MEM_BUCKET_COUNT - 1 && NULL == info->buckets[index]) index++; chunk = info->buckets[index]; if (index == MEM_BUCKET_COUNT - 1) { /* otherwise, find a chunk big enough according to first-fit strategy */ int counter = 0; zbx_uint64_t skip_min = __UINT64_C(0xffffffffffffffff), skip_max = __UINT64_C(0); while (NULL != chunk && CHUNK_SIZE(chunk) < size) { counter++; skip_min = MIN(skip_min, CHUNK_SIZE(chunk)); skip_max = MAX(skip_max, CHUNK_SIZE(chunk)); chunk = mem_get_next_chunk(chunk); } /* don't log errors if malloc can return null in low memory situations */ if (0 == info->allow_oom) { if (NULL == chunk) zabbix_log(LOG_LEVEL_CRIT, "__mem_malloc: skipped %d asked %u skip_min %u skip_max %u", counter, size, skip_min, skip_max); else if (counter >= 100) zabbix_log(LOG_LEVEL_DEBUG, "__mem_malloc: skipped %d asked %u skip_min %u skip_max %u size %u", counter, size, skip_min, skip_max, CHUNK_SIZE(chunk)); } } if (NULL == chunk) return NULL; chunk_size = CHUNK_SIZE(chunk); mem_unlink_chunk(info, chunk); /* either use the full chunk or split it */ if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC) { info->used_size += chunk_size; info->free_size -= chunk_size; mem_set_used_chunk_size(chunk, chunk_size); } else { void *new_chunk; zbx_uint64_t new_chunk_size; new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); info->used_size += size; info->free_size -= chunk_size; info->free_size += new_chunk_size; mem_set_used_chunk_size(chunk, size); } return chunk; }
static void *__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size) { void *chunk, *new_chunk, *next_chunk; zbx_uint64_t chunk_size, new_chunk_size; int next_free; size = mem_proper_alloc_size(size); chunk = (void *)((char *)old - MEM_SIZE_FIELD); chunk_size = CHUNK_SIZE(chunk); next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD); next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk)); if (size <= chunk_size) { /* do not reallocate if not much is freed */ /* we are likely to want more memory again */ if (size > chunk_size / 4) return chunk; if (next_free) { /* merge with next chunk */ info->used_size -= chunk_size - size; info->free_size += chunk_size - size; new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD); new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size); mem_unlink_chunk(info, next_chunk); mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } else { /* split the current one */ info->used_size -= chunk_size - size; info->free_size += chunk_size - size - 2 * MEM_SIZE_FIELD; new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD); new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } return chunk; } if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size) { info->used_size -= chunk_size; info->free_size += chunk_size + 2 * MEM_SIZE_FIELD; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, next_chunk); /* either use the full next_chunk or split it */ if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC) { info->used_size += chunk_size; info->free_size -= chunk_size; mem_set_used_chunk_size(chunk, chunk_size); } else { new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD); new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); info->used_size += size; info->free_size -= chunk_size; info->free_size += new_chunk_size; mem_set_used_chunk_size(chunk, size); } return chunk; } else if (NULL != (new_chunk = __mem_malloc(info, size))) { memcpy((char *)new_chunk + MEM_SIZE_FIELD, (char *)chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); return new_chunk; } else { void *tmp = NULL; /* check if there would be enough space if the current chunk */ /* would be freed before allocating a new one */ new_chunk_size = chunk_size; if (0 != next_free) new_chunk_size += CHUNK_SIZE(next_chunk) + 2 * MEM_SIZE_FIELD; if (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD)) new_chunk_size += CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) + 2 * MEM_SIZE_FIELD; if (size > new_chunk_size) return NULL; tmp = zbx_malloc(tmp, chunk_size); memcpy(tmp, (char *)chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); if (NULL == (new_chunk = __mem_malloc(info, size))) { THIS_SHOULD_NEVER_HAPPEN; exit(EXIT_FAILURE); } memcpy((char *)new_chunk + MEM_SIZE_FIELD, tmp, chunk_size); zbx_free(tmp); return new_chunk; } }
int main (int argc, char *argv[]) { int i, n, fd, c; unsigned long chunk_size[2]; int rank, noProcessors, done; int error; off_t offset; char **chunk_buf; char *read_buf; struct stat stat_buf; ssize_t ret; char *filename = "/mnt/lustre/write_disjoint"; int numloops = 1000; int random = 0; error = MPI_Init(&argc, &argv); if (error != MPI_SUCCESS) rprintf(-1, -1, "MPI_Init failed: %d\n", error); /* Parse command line options */ while ((c = getopt(argc, argv, "f:n:")) != EOF) { switch (c) { case 'f': filename = optarg; break; case 'n': numloops = strtoul(optarg, NULL, 0); break; } } MPI_Comm_size(MPI_COMM_WORLD, &noProcessors); MPI_Comm_rank(MPI_COMM_WORLD, &rank); chunk_buf = malloc(noProcessors * sizeof(chunk_buf[0])); for (i=0; i < noProcessors; i++) { chunk_buf[i] = malloc(CHUNK_MAX_SIZE); memset(chunk_buf[i], 'A'+ i, CHUNK_MAX_SIZE); } read_buf = malloc(noProcessors * CHUNK_MAX_SIZE); if (rank == 0) { fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0666); if (fd < 0) rprintf(rank, -1, "open() returned %s\n", strerror(errno)); } MPI_Barrier(MPI_COMM_WORLD); fd = open(filename, O_RDWR); if (fd < 0) rprintf(rank, -1, "open() returned %s\n", strerror(errno)); for (n = 0; n < numloops; n++) { /* reset the environment */ if (rank == 0) { ret = truncate(filename, 0); if (ret != 0) rprintf(rank, n, "truncate() returned %s\n", strerror(errno) ); random = rand(); } MPI_Bcast(&random, 1, MPI_INT, 0, MPI_COMM_WORLD); CHUNK_SIZE(n) = random % CHUNK_MAX_SIZE; if (n % 1000 == 0 && rank == 0) printf("loop %d: chunk_size %lu\n", n, CHUNK_SIZE(n)); if (stat(filename, &stat_buf) < 0) rprintf(rank, n, "error stating %s: %s\n", filename, strerror(errno)); if (stat_buf.st_size != 0) rprintf(rank, n, "filesize = %lu. " "Should be zero after truncate\n", stat_buf.st_size); MPI_Barrier(MPI_COMM_WORLD); /* Do the race */ offset = rank * CHUNK_SIZE(n); lseek(fd, offset, SEEK_SET); done = 0; do { ret = write(fd, chunk_buf[rank] + done, CHUNK_SIZE(n) - done); if (ret < 0 && errno != EINTR) rprintf(rank, n, "write() returned %s\n", strerror(errno)); if (ret > 0) done += ret; } while (done != CHUNK_SIZE(n)); MPI_Barrier(MPI_COMM_WORLD); /* Check the result */ if (stat(filename, &stat_buf) < 0) rprintf(rank, n, "error stating %s: %s\n", filename, strerror(errno)); if (stat_buf.st_size != CHUNK_SIZE(n) * noProcessors) { if (n > 0) printf("loop %d: chunk_size %lu, " "file size was %lu\n", n - 1, CHUNK_SIZE(n - 1), CHUNK_SIZE(n - 1) *noProcessors); rprintf(rank, n, "invalid file size %lu" " instead of %lu = %lu * %u\n", (unsigned long)stat_buf.st_size, CHUNK_SIZE(n) * noProcessors, CHUNK_SIZE(n), noProcessors); } if (rank == 0) { if (lseek(fd, 0, SEEK_SET) < 0) rprintf(rank, n, "error seeking to 0: %s\n", strerror(errno)); done = 0; do { ret = read(fd, read_buf + done, CHUNK_SIZE(n) * noProcessors - done); if (ret < 0) rprintf(rank, n, "read returned %s\n", strerror(errno)); done += ret; } while (done != CHUNK_SIZE(n) * noProcessors); for (i = 0; i < noProcessors; i++) { char command[4096]; int j, rc; if (!memcmp(read_buf + (i * CHUNK_SIZE(n)), chunk_buf[i], CHUNK_SIZE(n))) continue; /* print out previous chunk sizes */ if (n > 0) printf("loop %d: chunk_size %lu\n", n - 1, CHUNK_SIZE(n - 1)); printf("loop %d: chunk %d corrupted " "with chunk_size %lu, page_size %d\n", n, i, CHUNK_SIZE(n), getpagesize()); printf("ranks:\tpage boundry\tchunk boundry\t" "page boundry\n"); for (j = 1 ; j < noProcessors; j++) { int b = j * CHUNK_SIZE(n); printf("%c -> %c:\t%d\t%d\t%d\n", 'A' + j - 1, 'A' + j, b & ~(getpagesize()-1), b, (b + getpagesize()) & ~(getpagesize()-1)); } sprintf(command, "od -Ad -a %s", filename); rc = system(command); rprintf(0, n, "data check error - exiting\n"); } } MPI_Barrier(MPI_COMM_WORLD); } printf("Finished after %d loops\n", n); MPI_Finalize(); return 0; }