int Zoltan_Block( ZZ *zz, /* The Zoltan structure. */ float *part_sizes, /* Input: Array of size zz->LB.Num_Global_Parts containing the percentage of work to be assigned to each partition. */ int *num_import, /* Return -1. We use only export lists. */ ZOLTAN_ID_PTR *import_global_ids, /* Not used. */ ZOLTAN_ID_PTR *import_local_ids, /* Not used. */ int **import_procs, /* Not used. */ int **import_to_part, /* Not used. */ int *num_export, /* Output: Number of objects to export. */ ZOLTAN_ID_PTR *export_global_ids, /* Output: GIDs to export. */ ZOLTAN_ID_PTR *export_local_ids, /* Output: LIDs to export. */ int **export_procs, /* Output: Processsors to export to. */ int **export_to_part /* Output: Partitions to export to. */ ) { int ierr = ZOLTAN_OK; int i, count, num_obj; int wtflag = 0; ZOLTAN_ID_PTR global_ids = NULL; ZOLTAN_ID_PTR local_ids = NULL; int *parts = NULL; int *newparts = NULL; float *wgts = NULL; static char *yo = "Zoltan_Block"; ZOLTAN_TRACE_ENTER(zz, yo); /* No import lists computed. */ *num_import = -1; *export_global_ids = *export_local_ids = NULL; *export_procs = *export_to_part = NULL; /* Get list of local objects. */ if (zz->Obj_Weight_Dim > 1) { ierr = ZOLTAN_FATAL; ZOLTAN_PRINT_ERROR(zz->Proc, yo, "OBJ_WEIGHT_DIM > 1 not supported by LB_METHOD BLOCK."); goto End; } wtflag = (zz->Obj_Weight_Dim>0 ? 1 : 0); ierr = Zoltan_Get_Obj_List(zz, &num_obj, &global_ids, &local_ids, wtflag, &wgts, &parts); /* Compute the new partition numbers. */ newparts = (int *) ZOLTAN_MALLOC(num_obj * sizeof(int)); if (num_obj && (!newparts)){ ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Memory error."); ierr = ZOLTAN_MEMERR; goto End; } block_part(zz, num_obj, wtflag, wgts, part_sizes, newparts); /* Check how many partition numbers changed. */ count=0; for (i=0; i<num_obj; i++){ if (newparts[i] != parts[i]) ++count; } (*num_export) = count; /* Allocate export lists. */ if ((*num_export) > 0) { if (!Zoltan_Special_Malloc(zz, (void **)export_global_ids, (*num_export), ZOLTAN_SPECIAL_MALLOC_GID) || !Zoltan_Special_Malloc(zz, (void **)export_local_ids, (*num_export), ZOLTAN_SPECIAL_MALLOC_LID) || !Zoltan_Special_Malloc(zz, (void **)export_procs, (*num_export), ZOLTAN_SPECIAL_MALLOC_INT) || !Zoltan_Special_Malloc(zz, (void **)export_to_part, (*num_export), ZOLTAN_SPECIAL_MALLOC_INT)) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Memory error."); ierr = ZOLTAN_MEMERR; goto End; } } /* Loop over objects and fill export lists. */ count=0; for (i=0; i<num_obj; i++){ if (newparts[i] != parts[i]){ /* export_global_ids[count] = global_ids[i]; */ ZOLTAN_SET_GID(zz, &((*export_global_ids)[count*zz->Num_GID]), &global_ids[i*zz->Num_GID]); if (local_ids) /* export_local_ids[count] = local_ids[i]; */ ZOLTAN_SET_LID(zz, &((*export_local_ids)[count*zz->Num_LID]), &local_ids[i*zz->Num_LID]); /* Set new partition number. */ (*export_to_part)[count] = newparts[i]; /* Processor is derived from partition number. */ (*export_procs)[count] = Zoltan_LB_Part_To_Proc(zz, (*export_to_part)[count], &global_ids[i*zz->Num_GID]); ++count; } } End: /* Free local memory, but not export lists. */ ZOLTAN_FREE(&global_ids); ZOLTAN_FREE(&local_ids); ZOLTAN_FREE(&parts); ZOLTAN_FREE(&newparts); if (wtflag) ZOLTAN_FREE(&wgts); ZOLTAN_TRACE_EXIT(zz, yo); return ierr; }
/*===========================================================================* * block_transfer * *===========================================================================*/ static int block_transfer(dev_t minor, /* minor device number */ int do_write, /* read or write? */ u64_t position, /* offset on device to read or write */ endpoint_t endpt, /* process doing the request */ iovec_t * iov, /* pointer to read or write request vector */ unsigned int nr_req, /* length of request vector */ int flags /* transfer flags */ ) { unsigned long counter; iovec_t *ciov; /* Current IO Vector */ struct device *dev; /* The device used */ struct sd_slot *slot; /* The sd slot the requests is pointed to */ vir_bytes io_size; /* Size to read/write to/from the iov */ vir_bytes io_offset; /* Size to read/write to/from the iov */ vir_bytes bytes_written; int r, blk_size, i; /* Get the current "device" geometry */ dev = block_part(minor); if (dev == NULL) { mmc_log_warn(&log, "Transfer requested on unknown device minor(%d)\n", minor); /* Unknown device */ return ENXIO; } mmc_log_trace(&log, "I/O on minor(%d) %s at 0x%016llx\n", minor, (do_write) ? "Write" : "Read", position); slot = get_slot(minor); assert(slot); if (slot->card.blk_size == 0) { mmc_log_warn(&log, "Request on a card with block size of 0\n"); return EINVAL; } if (slot->card.blk_size > COPYBUFF_SIZE) { mmc_log_warn(&log, "Card block size (%d) exceeds internal buffer size %d\n", slot->card.blk_size, COPYBUFF_SIZE); return EINVAL; } /* It is fully up to the driver to decide on restrictions for the * parameters of transfers, in those cases we return EINVAL */ if (position % slot->card.blk_size != 0) { /* Starting at a block boundary */ mmc_log_warn(&log, "Requests must start at a block boundary" "(start,block size)=(%016llx,%08x)\n", position, slot->card.blk_size); return EINVAL; } blk_size = slot->card.blk_size; bytes_written = 0; /* Are we trying to start reading past the end */ if (position >= dev->dv_size) { mmc_log_warn(&log, "start reading past drive size\n"); return 0; }; ciov = iov; /* do some more validation */ for (counter = 0; counter < nr_req; counter++) { assert(ciov != NULL); if (ciov->iov_size % blk_size != 0) { /* transfer a multiple of blk_size */ mmc_log_warn(&log, "Requests must start at a block boundary " "(start,block size)=(%016llx,%08x)\n", position, slot->card.blk_size); return EINVAL; } if (ciov->iov_size <= 0) { mmc_log_warn(&log, "Invalid iov size for iov %d of %d size\n", counter, nr_req, ciov->iov_size); return EINVAL; } ciov++; } ciov = iov; for (counter = 0; counter < nr_req; counter++) { /* Assume we are to transfer the amount of data given in the * input/output vector but ensure we are not doing i/o past * our own boundaries */ io_size = ciov->iov_size; io_offset = position + bytes_written; /* Check we are not reading/writing past the end */ if (position + bytes_written + io_size > dev->dv_size) { io_size = dev->dv_size - (position + bytes_written); }; mmc_log_trace(&log, "I/O %s request(%d/%d) iov(grant,size,iosize," "offset)=(%d,%d,%d,%d)\n", (do_write) ? "write" : "read", counter + 1, nr_req, ciov->iov_addr, ciov->iov_size, io_size, io_offset); /* transfer max one block at the time */ for (i = 0; i < io_size / blk_size; i++) { if (do_write) { /* Read io_size bytes from i/o vector starting * at 0 and write it to out buffer at the * correct offset */ r = copyfrom(endpt, ciov->iov_addr, i * blk_size, (vir_bytes) copybuff, blk_size); if (r != OK) { mmc_log_warn(&log, "I/O write error: %s iov(base,size)=(%d,%d)" " at offset=%d\n", strerror(_SIGN r), ciov->iov_addr, ciov->iov_size, io_offset); return EINVAL; } /* write a single block */ slot->host->write(&slot->card, (dev->dv_base / blk_size) + (io_offset / blk_size) + i, 1, copybuff); bytes_written += blk_size; } else { /* read a single block info copybuff */ slot->host->read(&slot->card, (dev->dv_base / blk_size) + (io_offset / blk_size) + i, 1, copybuff); /* Read io_size bytes from our data at the * correct offset and write it to the output * buffer at 0 */ r = copyto(endpt, ciov->iov_addr, i * blk_size, (vir_bytes) copybuff, blk_size); if (r != OK) { mmc_log_warn(&log, "I/O read error: %s iov(base,size)=(%d,%d)" " at offset=%d\n", strerror(_SIGN r), ciov->iov_addr, ciov->iov_size, io_offset); return EINVAL; } bytes_written += blk_size; } } ciov++; } return bytes_written; }