index_type sstride[GFC_MAX_DIMENSIONS]; index_type dstride[GFC_MAX_DIMENSIONS]; const GFC_INTEGER_16 * restrict base; GFC_INTEGER_16 * restrict dest; index_type rank; index_type n; index_type len; index_type delta; index_type dim; int continue_loop; /* Make dim zero based to avoid confusion. */ dim = (*pdim) - 1; rank = GFC_DESCRIPTOR_RANK (array) - 1; len = GFC_DESCRIPTOR_EXTENT(array,dim); if (len < 0) len = 0; delta = GFC_DESCRIPTOR_STRIDE(array,dim); for (n = 0; n < dim; n++) { sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n); extent[n] = GFC_DESCRIPTOR_EXTENT(array,n); if (extent[n] < 0) extent[n] = 0; } for (n = dim; n < rank; n++) { sstride[n] = GFC_DESCRIPTOR_STRIDE(array, n + 1);
void pack_c8 (gfc_array_c8 *ret, const gfc_array_c8 *array, const gfc_array_l1 *mask, const gfc_array_c8 *vector) { /* r.* indicates the return array. */ index_type rstride0; GFC_COMPLEX_8 * restrict rptr; /* s.* indicates the source array. */ index_type sstride[GFC_MAX_DIMENSIONS]; index_type sstride0; const GFC_COMPLEX_8 *sptr; /* m.* indicates the mask array. */ index_type mstride[GFC_MAX_DIMENSIONS]; index_type mstride0; const GFC_LOGICAL_1 *mptr; index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; int zero_sized; index_type n; index_type dim; index_type nelem; index_type total; int mask_kind; dim = GFC_DESCRIPTOR_RANK (array); mptr = mask->base_addr; /* Use the same loop for all logical types, by using GFC_LOGICAL_1 and using shifting to address size and endian issues. */ mask_kind = GFC_DESCRIPTOR_SIZE (mask); if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 #ifdef HAVE_GFC_LOGICAL_16 || mask_kind == 16 #endif ) { /* Do not convert a NULL pointer as we use test for NULL below. */ if (mptr) mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); } else runtime_error ("Funny sized logical array"); zero_sized = 0; for (n = 0; n < dim; n++) { count[n] = 0; extent[n] = GFC_DESCRIPTOR_EXTENT(array,n); if (extent[n] <= 0) zero_sized = 1; sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n); mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n); } if (sstride[0] == 0) sstride[0] = 1; if (mstride[0] == 0) mstride[0] = mask_kind; if (zero_sized) sptr = NULL; else sptr = array->base_addr; if (ret->base_addr == NULL || unlikely (compile_options.bounds_check)) { /* Count the elements, either for allocating memory or for bounds checking. */ if (vector != NULL) { /* The return array will have as many elements as there are in VECTOR. */ total = GFC_DESCRIPTOR_EXTENT(vector,0); if (total < 0) { total = 0; vector = NULL; } } else { /* We have to count the true elements in MASK. */ total = count_0 (mask); } if (ret->base_addr == NULL) { /* Setup the array descriptor. */ GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; /* xmalloc allocates a single byte for zero size. */ ret->base_addr = xmalloc (sizeof (GFC_COMPLEX_8) * total); if (total == 0) return; } else { /* We come here because of range checking. */ index_type ret_extent; ret_extent = GFC_DESCRIPTOR_EXTENT(ret,0); if (total != ret_extent) runtime_error ("Incorrect extent in return value of PACK intrinsic;" " is %ld, should be %ld", (long int) total, (long int) ret_extent); } } rstride0 = GFC_DESCRIPTOR_STRIDE(ret,0); if (rstride0 == 0) rstride0 = 1; sstride0 = sstride[0]; mstride0 = mstride[0]; rptr = ret->base_addr; while (sptr && mptr) { /* Test this element. */ if (*mptr) { /* Add it. */ *rptr = *sptr; rptr += rstride0; } /* Advance to the next element. */ sptr += sstride0; mptr += mstride0; count[0]++; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ sptr -= sstride[n] * extent[n]; mptr -= mstride[n] * extent[n]; n++; if (n >= dim) { /* Break out of the loop. */ sptr = NULL; break; } else { count[n]++; sptr += sstride[n]; mptr += mstride[n]; } } } /* Add any remaining elements from VECTOR. */ if (vector) { n = GFC_DESCRIPTOR_EXTENT(vector,0); nelem = ((rptr - ret->base_addr) / rstride0); if (n > nelem) { sstride0 = GFC_DESCRIPTOR_STRIDE(vector,0); if (sstride0 == 0) sstride0 = 1; sptr = vector->base_addr + sstride0 * nelem; n -= nelem; while (n--) { *rptr = *sptr; rptr += rstride0; sptr += sstride0; } } } }
void internal_unpack_1 (gfc_array_i1 * d, const GFC_INTEGER_1 * src) { index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type stride[GFC_MAX_DIMENSIONS]; index_type stride0; index_type dim; index_type dsize; GFC_INTEGER_1 * restrict dest; int n; dest = d->base_addr; if (src == dest || !src) return; dim = GFC_DESCRIPTOR_RANK (d); dsize = 1; for (n = 0; n < dim; n++) { count[n] = 0; stride[n] = GFC_DESCRIPTOR_STRIDE(d,n); extent[n] = GFC_DESCRIPTOR_EXTENT(d,n); if (extent[n] <= 0) return; if (dsize == stride[n]) dsize *= extent[n]; else dsize = 0; } if (dsize != 0) { memcpy (dest, src, dsize * sizeof (GFC_INTEGER_1)); return; } stride0 = stride[0]; while (dest) { /* Copy the data. */ *dest = *(src++); /* Advance to the next element. */ dest += stride0; count[0]++; /* Advance to the next source element. */ n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ dest -= stride[n] * extent[n]; n++; if (n == dim) { dest = NULL; break; } else { count[n]++; dest += stride[n]; } } } }
extern GFC_INTEGER_4 minloc2_4_s4 (gfc_array_s4 * const restrict, GFC_LOGICAL_4 back, gfc_charlen_type); export_proto(minloc2_4_s4); GFC_INTEGER_4 minloc2_4_s4 (gfc_array_s4 * const restrict array, GFC_LOGICAL_4 back, gfc_charlen_type len) { index_type ret; index_type sstride; index_type extent; const GFC_INTEGER_4 *src; const GFC_INTEGER_4 *minval; index_type i; extent = GFC_DESCRIPTOR_EXTENT(array,0); if (extent <= 0) return 0; sstride = GFC_DESCRIPTOR_STRIDE(array,0) * len; ret = 1; src = array->base_addr; minval = NULL; for (i=1; i<=extent; i++) { if (minval == NULL || (back ? compare_fcn (src, minval, len) <= 0 : compare_fcn (src, minval, len) < 0)) { ret = i; minval = src;
void spread_r4 (gfc_array_r4 *ret, const gfc_array_r4 *source, const index_type along, const index_type pncopies) { /* r.* indicates the return array. */ index_type rstride[GFC_MAX_DIMENSIONS]; index_type rstride0; index_type rdelta = 0; index_type rrank; index_type rs; GFC_REAL_4 *rptr; GFC_REAL_4 * restrict dest; /* s.* indicates the source array. */ index_type sstride[GFC_MAX_DIMENSIONS]; index_type sstride0; index_type srank; const GFC_REAL_4 *sptr; index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type n; index_type dim; index_type ncopies; srank = GFC_DESCRIPTOR_RANK(source); rrank = srank + 1; if (rrank > GFC_MAX_DIMENSIONS) runtime_error ("return rank too large in spread()"); if (along > rrank) runtime_error ("dim outside of rank in spread()"); ncopies = pncopies; if (ret->base_addr == NULL) { size_t ub, stride; /* The front end has signalled that we need to populate the return array descriptor. */ ret->dtype = (source->dtype & ~GFC_DTYPE_RANK_MASK) | rrank; dim = 0; rs = 1; for (n = 0; n < rrank; n++) { stride = rs; if (n == along - 1) { ub = ncopies - 1; rdelta = rs; rs *= ncopies; } else { count[dim] = 0; extent[dim] = GFC_DESCRIPTOR_EXTENT(source,dim); sstride[dim] = GFC_DESCRIPTOR_STRIDE(source,dim); rstride[dim] = rs; ub = extent[dim] - 1; rs *= extent[dim]; dim++; } GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; /* xmallocarray allocates a single byte for zero size. */ ret->base_addr = xmallocarray (rs, sizeof(GFC_REAL_4)); if (rs <= 0) return; } else { int zero_sized; zero_sized = 0; dim = 0; if (GFC_DESCRIPTOR_RANK(ret) != rrank) runtime_error ("rank mismatch in spread()"); if (unlikely (compile_options.bounds_check)) { for (n = 0; n < rrank; n++) { index_type ret_extent; ret_extent = GFC_DESCRIPTOR_EXTENT(ret,n); if (n == along - 1) { rdelta = GFC_DESCRIPTOR_STRIDE(ret,n); if (ret_extent != ncopies) runtime_error("Incorrect extent in return value of SPREAD" " intrinsic in dimension %ld: is %ld," " should be %ld", (long int) n+1, (long int) ret_extent, (long int) ncopies); } else { count[dim] = 0; extent[dim] = GFC_DESCRIPTOR_EXTENT(source,dim); if (ret_extent != extent[dim]) runtime_error("Incorrect extent in return value of SPREAD" " intrinsic in dimension %ld: is %ld," " should be %ld", (long int) n+1, (long int) ret_extent, (long int) extent[dim]); if (extent[dim] <= 0) zero_sized = 1; sstride[dim] = GFC_DESCRIPTOR_STRIDE(source,dim); rstride[dim] = GFC_DESCRIPTOR_STRIDE(ret,n); dim++; } } } else { for (n = 0; n < rrank; n++) { if (n == along - 1) { rdelta = GFC_DESCRIPTOR_STRIDE(ret,n); } else { count[dim] = 0; extent[dim] = GFC_DESCRIPTOR_EXTENT(source,dim); if (extent[dim] <= 0) zero_sized = 1; sstride[dim] = GFC_DESCRIPTOR_STRIDE(source,dim); rstride[dim] = GFC_DESCRIPTOR_STRIDE(ret,n); dim++; } } } if (zero_sized) return; if (sstride[0] == 0) sstride[0] = 1; } sstride0 = sstride[0]; rstride0 = rstride[0]; rptr = ret->base_addr; sptr = source->base_addr; while (sptr) { /* Spread this element. */ dest = rptr; for (n = 0; n < ncopies; n++) { *dest = *sptr; dest += rdelta; } /* Advance to the next element. */ sptr += sstride0; rptr += rstride0; count[0]++; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ sptr -= sstride[n] * extent[n]; rptr -= rstride[n] * extent[n]; n++; if (n >= srank) { /* Break out of the loop. */ sptr = NULL; break; } else { count[n]++; sptr += sstride[n]; rptr += rstride[n]; } } } }
void unpack0_i8 (gfc_array_i8 *ret, const gfc_array_i8 *vector, const gfc_array_l1 *mask, const GFC_INTEGER_8 *fptr) { /* r.* indicates the return array. */ index_type rstride[GFC_MAX_DIMENSIONS]; index_type rstride0; index_type rs; GFC_INTEGER_8 * restrict rptr; /* v.* indicates the vector array. */ index_type vstride0; GFC_INTEGER_8 *vptr; /* Value for field, this is constant. */ const GFC_INTEGER_8 fval = *fptr; /* m.* indicates the mask array. */ index_type mstride[GFC_MAX_DIMENSIONS]; index_type mstride0; const GFC_LOGICAL_1 *mptr; index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type n; index_type dim; int empty; int mask_kind; empty = 0; mptr = mask->base_addr; /* Use the same loop for all logical types, by using GFC_LOGICAL_1 and using shifting to address size and endian issues. */ mask_kind = GFC_DESCRIPTOR_SIZE (mask); if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 #ifdef HAVE_GFC_LOGICAL_16 || mask_kind == 16 #endif ) { /* Do not convert a NULL pointer as we use test for NULL below. */ if (mptr) mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); } else runtime_error ("Funny sized logical array"); if (ret->base_addr == NULL) { /* The front end has signalled that we need to populate the return array descriptor. */ dim = GFC_DESCRIPTOR_RANK (mask); rs = 1; for (n = 0; n < dim; n++) { count[n] = 0; GFC_DIMENSION_SET(ret->dim[n], 0, GFC_DESCRIPTOR_EXTENT(mask,n) - 1, rs); extent[n] = GFC_DESCRIPTOR_EXTENT(ret,n); empty = empty || extent[n] <= 0; rstride[n] = GFC_DESCRIPTOR_STRIDE(ret,n); mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n); rs *= extent[n]; } ret->offset = 0; ret->base_addr = xmallocarray (rs, sizeof (GFC_INTEGER_8)); } else { dim = GFC_DESCRIPTOR_RANK (ret); for (n = 0; n < dim; n++) { count[n] = 0; extent[n] = GFC_DESCRIPTOR_EXTENT(ret,n); empty = empty || extent[n] <= 0; rstride[n] = GFC_DESCRIPTOR_STRIDE(ret,n); mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n); } if (rstride[0] == 0) rstride[0] = 1; } if (empty) return; if (mstride[0] == 0) mstride[0] = 1; vstride0 = GFC_DESCRIPTOR_STRIDE(vector,0); if (vstride0 == 0) vstride0 = 1; rstride0 = rstride[0]; mstride0 = mstride[0]; rptr = ret->base_addr; vptr = vector->base_addr; while (rptr) { if (*mptr) { /* From vector. */ *rptr = *vptr; vptr += vstride0; } else { /* From field. */ *rptr = fval; } /* Advance to the next element. */ rptr += rstride0; mptr += mstride0; count[0]++; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ rptr -= rstride[n] * extent[n]; mptr -= mstride[n] * extent[n]; n++; if (n >= dim) { /* Break out of the loop. */ rptr = NULL; break; } else { count[n]++; rptr += rstride[n]; mptr += mstride[n]; } } } }
void date_and_time (char *__date, char *__time, char *__zone, gfc_array_i4 *__values, GFC_INTEGER_4 __date_len, GFC_INTEGER_4 __time_len, GFC_INTEGER_4 __zone_len) { int i; char date[DATE_LEN + 1]; char timec[TIME_LEN + 1]; char zone[ZONE_LEN + 1]; GFC_INTEGER_4 values[VALUES_SIZE]; time_t lt; struct tm local_time; struct tm UTC_time; long usecs; if (!gf_gettime (<, &usecs)) { values[7] = usecs / 1000; localtime_r (<, &local_time); gmtime_r (<, &UTC_time); /* All arguments can be derived from VALUES. */ values[0] = 1900 + local_time.tm_year; values[1] = 1 + local_time.tm_mon; values[2] = local_time.tm_mday; values[3] = (local_time.tm_min - UTC_time.tm_min + 60 * (local_time.tm_hour - UTC_time.tm_hour + 24 * (local_time.tm_yday - UTC_time.tm_yday))); values[4] = local_time.tm_hour; values[5] = local_time.tm_min; values[6] = local_time.tm_sec; if (__date) snprintf (date, DATE_LEN + 1, "%04d%02d%02d", values[0], values[1], values[2]); if (__time) snprintf (timec, TIME_LEN + 1, "%02d%02d%02d.%03d", values[4], values[5], values[6], values[7]); if (__zone) snprintf (zone, ZONE_LEN + 1, "%+03d%02d", values[3] / 60, abs (values[3] % 60)); } else { memset (date, ' ', DATE_LEN); date[DATE_LEN] = '\0'; memset (timec, ' ', TIME_LEN); timec[TIME_LEN] = '\0'; memset (zone, ' ', ZONE_LEN); zone[ZONE_LEN] = '\0'; for (i = 0; i < VALUES_SIZE; i++) values[i] = - GFC_INTEGER_4_HUGE; } /* Copy the values into the arguments. */ if (__values) { index_type len, delta, elt_size; elt_size = GFC_DESCRIPTOR_SIZE (__values); len = GFC_DESCRIPTOR_EXTENT(__values,0); delta = GFC_DESCRIPTOR_STRIDE(__values,0); if (delta == 0) delta = 1; if (unlikely (len < VALUES_SIZE)) runtime_error ("Incorrect extent in VALUE argument to" " DATE_AND_TIME intrinsic: is %ld, should" " be >=%ld", (long int) len, (long int) VALUES_SIZE); /* Cope with different type kinds. */ if (elt_size == 4) { GFC_INTEGER_4 *vptr4 = __values->base_addr; for (i = 0; i < VALUES_SIZE; i++, vptr4 += delta) *vptr4 = values[i]; } else if (elt_size == 8) { GFC_INTEGER_8 *vptr8 = (GFC_INTEGER_8 *)__values->base_addr; for (i = 0; i < VALUES_SIZE; i++, vptr8 += delta) { if (values[i] == - GFC_INTEGER_4_HUGE) *vptr8 = - GFC_INTEGER_8_HUGE; else *vptr8 = values[i]; } } else abort (); } if (__zone) fstrcpy (__zone, __zone_len, zone, ZONE_LEN); if (__time) fstrcpy (__time, __time_len, timec, TIME_LEN); if (__date) fstrcpy (__date, __date_len, date, DATE_LEN); }
static void stat_i4_sub_0 (char *name, gfc_array_i4 *sarray, GFC_INTEGER_4 *status, gfc_charlen_type name_len, int is_lstat __attribute__ ((unused))) { int val; char *str; struct stat sb; /* If the rank of the array is not 1, abort. */ if (GFC_DESCRIPTOR_RANK (sarray) != 1) runtime_error ("Array rank of SARRAY is not 1."); /* If the array is too small, abort. */ if (GFC_DESCRIPTOR_EXTENT(sarray,0) < 13) runtime_error ("Array size of SARRAY is too small."); /* Trim trailing spaces from name. */ while (name_len > 0 && name[name_len - 1] == ' ') name_len--; /* Make a null terminated copy of the string. */ str = gfc_alloca (name_len + 1); memcpy (str, name, name_len); str[name_len] = '\0'; /* On platforms that don't provide lstat(), we use stat() instead. */ #ifdef HAVE_LSTAT if (is_lstat) val = lstat(str, &sb); else #endif val = stat(str, &sb); if (val == 0) { index_type stride = GFC_DESCRIPTOR_STRIDE(sarray,0); /* Device ID */ sarray->base_addr[0 * stride] = sb.st_dev; /* Inode number */ sarray->base_addr[1 * stride] = sb.st_ino; /* File mode */ sarray->base_addr[2 * stride] = sb.st_mode; /* Number of (hard) links */ sarray->base_addr[3 * stride] = sb.st_nlink; /* Owner's uid */ sarray->base_addr[4 * stride] = sb.st_uid; /* Owner's gid */ sarray->base_addr[5 * stride] = sb.st_gid; /* ID of device containing directory entry for file (0 if not available) */ #if HAVE_STRUCT_STAT_ST_RDEV sarray->base_addr[6 * stride] = sb.st_rdev; #else sarray->base_addr[6 * stride] = 0; #endif /* File size (bytes) */ sarray->base_addr[7 * stride] = sb.st_size; /* Last access time */ sarray->base_addr[8 * stride] = sb.st_atime; /* Last modification time */ sarray->base_addr[9 * stride] = sb.st_mtime; /* Last file status change time */ sarray->base_addr[10 * stride] = sb.st_ctime; /* Preferred I/O block size (-1 if not available) */ #if HAVE_STRUCT_STAT_ST_BLKSIZE sarray->base_addr[11 * stride] = sb.st_blksize; #else sarray->base_addr[11 * stride] = -1; #endif /* Number of blocks allocated (-1 if not available) */ #if HAVE_STRUCT_STAT_ST_BLOCKS sarray->base_addr[12 * stride] = sb.st_blocks; #else sarray->base_addr[12 * stride] = -1; #endif } if (status != NULL) *status = (val == 0) ? 0 : errno; }
GFC_INTEGER_1 * internal_pack_1 (gfc_array_i1 * source) { index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type stride[GFC_MAX_DIMENSIONS]; index_type stride0; index_type dim; index_type ssize; const GFC_INTEGER_1 *src; GFC_INTEGER_1 * restrict dest; GFC_INTEGER_1 *destptr; int n; int packed; /* TODO: Investigate how we can figure out if this is a temporary since the stride=0 thing has been removed from the frontend. */ dim = GFC_DESCRIPTOR_RANK (source); ssize = 1; packed = 1; for (n = 0; n < dim; n++) { count[n] = 0; stride[n] = GFC_DESCRIPTOR_STRIDE(source,n); extent[n] = GFC_DESCRIPTOR_EXTENT(source,n); if (extent[n] <= 0) { /* Do nothing. */ packed = 1; break; } if (ssize != stride[n]) packed = 0; ssize *= extent[n]; } if (packed) return source->base_addr; /* Allocate storage for the destination. */ destptr = xmallocarray (ssize, sizeof (GFC_INTEGER_1)); dest = destptr; src = source->base_addr; stride0 = stride[0]; while (src) { /* Copy the data. */ *(dest++) = *src; /* Advance to the next element. */ src += stride0; count[0]++; /* Advance to the next source element. */ n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ src -= stride[n] * extent[n]; n++; if (n == dim) { src = NULL; break; } else { count[n]++; src += stride[n]; } } } return destptr; }
void fstat_i8_sub (GFC_INTEGER_8 *unit, gfc_array_i8 *sarray, GFC_INTEGER_8 *status) { int val; struct stat sb; /* If the rank of the array is not 1, abort. */ if (GFC_DESCRIPTOR_RANK (sarray) != 1) runtime_error ("Array rank of SARRAY is not 1."); /* If the array is too small, abort. */ if (GFC_DESCRIPTOR_EXTENT(sarray,0) < 13) runtime_error ("Array size of SARRAY is too small."); /* Convert Fortran unit number to C file descriptor. */ val = unit_to_fd ((int) *unit); if (val >= 0) val = fstat(val, &sb); if (val == 0) { index_type stride = GFC_DESCRIPTOR_STRIDE(sarray,0); /* Device ID */ sarray->base_addr[0] = sb.st_dev; /* Inode number */ sarray->base_addr[stride] = sb.st_ino; /* File mode */ sarray->base_addr[2 * stride] = sb.st_mode; /* Number of (hard) links */ sarray->base_addr[3 * stride] = sb.st_nlink; /* Owner's uid */ sarray->base_addr[4 * stride] = sb.st_uid; /* Owner's gid */ sarray->base_addr[5 * stride] = sb.st_gid; /* ID of device containing directory entry for file (0 if not available) */ #if HAVE_STRUCT_STAT_ST_RDEV sarray->base_addr[6 * stride] = sb.st_rdev; #else sarray->base_addr[6 * stride] = 0; #endif /* File size (bytes) */ sarray->base_addr[7 * stride] = sb.st_size; /* Last access time */ sarray->base_addr[8 * stride] = sb.st_atime; /* Last modification time */ sarray->base_addr[9 * stride] = sb.st_mtime; /* Last file status change time */ sarray->base_addr[10 * stride] = sb.st_ctime; /* Preferred I/O block size (-1 if not available) */ #if HAVE_STRUCT_STAT_ST_BLKSIZE sarray->base_addr[11 * stride] = sb.st_blksize; #else sarray->base_addr[11 * stride] = -1; #endif /* Number of blocks allocated (-1 if not available) */ #if HAVE_STRUCT_STAT_ST_BLOCKS sarray->base_addr[12 * stride] = sb.st_blocks; #else sarray->base_addr[12 * stride] = -1; #endif } if (status != NULL) *status = (val == 0) ? 0 : errno; }
static void cshift0 (gfc_array_char * ret, const gfc_array_char * array, ssize_t shift, int which, index_type size) { /* r.* indicates the return array. */ index_type rstride[GFC_MAX_DIMENSIONS]; index_type rstride0; index_type roffset; char *rptr; /* s.* indicates the source array. */ index_type sstride[GFC_MAX_DIMENSIONS]; index_type sstride0; index_type soffset; const char *sptr; index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type dim; index_type len; index_type n; index_type arraysize; index_type type_size; if (which < 1 || which > GFC_DESCRIPTOR_RANK (array)) runtime_error ("Argument 'DIM' is out of range in call to 'CSHIFT'"); arraysize = size0 ((array_t *) array); if (ret->data == NULL) { int i; ret->offset = 0; ret->dtype = array->dtype; for (i = 0; i < GFC_DESCRIPTOR_RANK (array); i++) { index_type ub, str; ub = GFC_DESCRIPTOR_EXTENT(array,i) - 1; if (i == 0) str = 1; else str = GFC_DESCRIPTOR_EXTENT(ret,i-1) * GFC_DESCRIPTOR_STRIDE(ret,i-1); GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } if (arraysize > 0) ret->data = internal_malloc_size (size * arraysize); else ret->data = internal_malloc_size (1); } else if (unlikely (compile_options.bounds_check)) { bounds_equal_extents ((array_t *) ret, (array_t *) array, "return value", "CSHIFT"); } if (arraysize == 0) return; type_size = GFC_DTYPE_TYPE_SIZE (array); switch(type_size) { case GFC_DTYPE_LOGICAL_1: case GFC_DTYPE_INTEGER_1: case GFC_DTYPE_DERIVED_1: cshift0_i1 ((gfc_array_i1 *)ret, (gfc_array_i1 *) array, shift, which); return; case GFC_DTYPE_LOGICAL_2: case GFC_DTYPE_INTEGER_2: cshift0_i2 ((gfc_array_i2 *)ret, (gfc_array_i2 *) array, shift, which); return; case GFC_DTYPE_LOGICAL_4: case GFC_DTYPE_INTEGER_4: cshift0_i4 ((gfc_array_i4 *)ret, (gfc_array_i4 *) array, shift, which); return; case GFC_DTYPE_LOGICAL_8: case GFC_DTYPE_INTEGER_8: cshift0_i8 ((gfc_array_i8 *)ret, (gfc_array_i8 *) array, shift, which); return; #ifdef HAVE_GFC_INTEGER_16 case GFC_DTYPE_LOGICAL_16: case GFC_DTYPE_INTEGER_16: cshift0_i16 ((gfc_array_i16 *)ret, (gfc_array_i16 *) array, shift, which); return; #endif case GFC_DTYPE_REAL_4: cshift0_r4 ((gfc_array_r4 *)ret, (gfc_array_r4 *) array, shift, which); return; case GFC_DTYPE_REAL_8: cshift0_r8 ((gfc_array_r8 *)ret, (gfc_array_r8 *) array, shift, which); return; #ifdef HAVE_GFC_REAL_10 case GFC_DTYPE_REAL_10: cshift0_r10 ((gfc_array_r10 *)ret, (gfc_array_r10 *) array, shift, which); return; #endif #ifdef HAVE_GFC_REAL_16 case GFC_DTYPE_REAL_16: cshift0_r16 ((gfc_array_r16 *)ret, (gfc_array_r16 *) array, shift, which); return; #endif case GFC_DTYPE_COMPLEX_4: cshift0_c4 ((gfc_array_c4 *)ret, (gfc_array_c4 *) array, shift, which); return; case GFC_DTYPE_COMPLEX_8: cshift0_c8 ((gfc_array_c8 *)ret, (gfc_array_c8 *) array, shift, which); return; #ifdef HAVE_GFC_COMPLEX_10 case GFC_DTYPE_COMPLEX_10: cshift0_c10 ((gfc_array_c10 *)ret, (gfc_array_c10 *) array, shift, which); return; #endif #ifdef HAVE_GFC_COMPLEX_16 case GFC_DTYPE_COMPLEX_16: cshift0_c16 ((gfc_array_c16 *)ret, (gfc_array_c16 *) array, shift, which); return; #endif default: break; } switch (size) { /* Let's check the actual alignment of the data pointers. If they are suitably aligned, we can safely call the unpack functions. */ case sizeof (GFC_INTEGER_1): cshift0_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) array, shift, which); break; case sizeof (GFC_INTEGER_2): if (GFC_UNALIGNED_2(ret->data) || GFC_UNALIGNED_2(array->data)) break; else { cshift0_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) array, shift, which); return; } case sizeof (GFC_INTEGER_4): if (GFC_UNALIGNED_4(ret->data) || GFC_UNALIGNED_4(array->data)) break; else { cshift0_i4 ((gfc_array_i4 *)ret, (gfc_array_i4 *) array, shift, which); return; } case sizeof (GFC_INTEGER_8): if (GFC_UNALIGNED_8(ret->data) || GFC_UNALIGNED_8(array->data)) { /* Let's try to use the complex routines. First, a sanity check that the sizes match; this should be optimized to a no-op. */ if (sizeof(GFC_INTEGER_8) != sizeof(GFC_COMPLEX_4)) break; if (GFC_UNALIGNED_C4(ret->data) || GFC_UNALIGNED_C4(array->data)) break; cshift0_c4 ((gfc_array_c4 *) ret, (gfc_array_c4 *) array, shift, which); return; } else { cshift0_i8 ((gfc_array_i8 *)ret, (gfc_array_i8 *) array, shift, which); return; } #ifdef HAVE_GFC_INTEGER_16 case sizeof (GFC_INTEGER_16): if (GFC_UNALIGNED_16(ret->data) || GFC_UNALIGNED_16(array->data)) { /* Let's try to use the complex routines. First, a sanity check that the sizes match; this should be optimized to a no-op. */ if (sizeof(GFC_INTEGER_16) != sizeof(GFC_COMPLEX_8)) break; if (GFC_UNALIGNED_C8(ret->data) || GFC_UNALIGNED_C8(array->data)) break; cshift0_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) array, shift, which); return; } else { cshift0_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) array, shift, which); return; } #else case sizeof (GFC_COMPLEX_8): if (GFC_UNALIGNED_C8(ret->data) || GFC_UNALIGNED_C8(array->data)) break; else { cshift0_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) array, shift, which); return; } #endif default: break; } which = which - 1; sstride[0] = 0; rstride[0] = 0; extent[0] = 1; count[0] = 0; n = 0; /* Initialized for avoiding compiler warnings. */ roffset = size; soffset = size; len = 0; for (dim = 0; dim < GFC_DESCRIPTOR_RANK (array); dim++) { if (dim == which) { roffset = GFC_DESCRIPTOR_STRIDE_BYTES(ret,dim); if (roffset == 0) roffset = size; soffset = GFC_DESCRIPTOR_STRIDE_BYTES(array,dim); if (soffset == 0) soffset = size; len = GFC_DESCRIPTOR_EXTENT(array,dim); } else { count[n] = 0; extent[n] = GFC_DESCRIPTOR_EXTENT(array,dim); rstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(ret,dim); sstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(array,dim); n++; } } if (sstride[0] == 0) sstride[0] = size; if (rstride[0] == 0) rstride[0] = size; dim = GFC_DESCRIPTOR_RANK (array); rstride0 = rstride[0]; sstride0 = sstride[0]; rptr = ret->data; sptr = array->data; shift = len == 0 ? 0 : shift % (ssize_t)len; if (shift < 0) shift += len; while (rptr) { /* Do the shift for this dimension. */ /* If elements are contiguous, perform the operation in two block moves. */ if (soffset == size && roffset == size) { size_t len1 = shift * size; size_t len2 = (len - shift) * size; memcpy (rptr, sptr + len1, len2); memcpy (rptr + len2, sptr, len1); } else { /* Otherwise, we'll have to perform the copy one element at a time. */ char *dest = rptr; const char *src = &sptr[shift * soffset]; for (n = 0; n < len - shift; n++) { memcpy (dest, src, size); dest += roffset; src += soffset; } for (src = sptr, n = 0; n < shift; n++) { memcpy (dest, src, size); dest += roffset; src += soffset; } } /* Advance to the next section. */ rptr += rstride0; sptr += sstride0; count[0]++; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ rptr -= rstride[n] * extent[n]; sptr -= sstride[n] * extent[n]; n++; if (n >= dim - 1) { /* Break out of the loop. */ rptr = NULL; break; } else { count[n]++; rptr += rstride[n]; sptr += sstride[n]; } } } }
void cshift0_r4 (gfc_array_r4 *ret, const gfc_array_r4 *array, ssize_t shift, int which) { /* r.* indicates the return array. */ index_type rstride[GFC_MAX_DIMENSIONS]; index_type rstride0; index_type roffset; GFC_REAL_4 *rptr; /* s.* indicates the source array. */ index_type sstride[GFC_MAX_DIMENSIONS]; index_type sstride0; index_type soffset; const GFC_REAL_4 *sptr; index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type dim; index_type len; index_type n; which = which - 1; sstride[0] = 0; rstride[0] = 0; extent[0] = 1; count[0] = 0; n = 0; /* Initialized for avoiding compiler warnings. */ roffset = 1; soffset = 1; len = 0; for (dim = 0; dim < GFC_DESCRIPTOR_RANK (array); dim++) { if (dim == which) { roffset = GFC_DESCRIPTOR_STRIDE(ret,dim); if (roffset == 0) roffset = 1; soffset = GFC_DESCRIPTOR_STRIDE(array,dim); if (soffset == 0) soffset = 1; len = GFC_DESCRIPTOR_EXTENT(array,dim); } else { count[n] = 0; extent[n] = GFC_DESCRIPTOR_EXTENT(array,dim); rstride[n] = GFC_DESCRIPTOR_STRIDE(ret,dim); sstride[n] = GFC_DESCRIPTOR_STRIDE(array,dim); n++; } } if (sstride[0] == 0) sstride[0] = 1; if (rstride[0] == 0) rstride[0] = 1; dim = GFC_DESCRIPTOR_RANK (array); rstride0 = rstride[0]; sstride0 = sstride[0]; rptr = ret->data; sptr = array->data; shift = len == 0 ? 0 : shift % (ssize_t)len; if (shift < 0) shift += len; while (rptr) { /* Do the shift for this dimension. */ /* If elements are contiguous, perform the operation in two block moves. */ if (soffset == 1 && roffset == 1) { size_t len1 = shift * sizeof (GFC_REAL_4); size_t len2 = (len - shift) * sizeof (GFC_REAL_4); memcpy (rptr, sptr + shift, len2); memcpy (rptr + (len - shift), sptr, len1); } else { /* Otherwise, we will have to perform the copy one element at a time. */ GFC_REAL_4 *dest = rptr; const GFC_REAL_4 *src = &sptr[shift * soffset]; for (n = 0; n < len - shift; n++) { *dest = *src; dest += roffset; src += soffset; } for (src = sptr, n = 0; n < shift; n++) { *dest = *src; dest += roffset; src += soffset; } } /* Advance to the next section. */ rptr += rstride0; sptr += sstride0; count[0]++; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ rptr -= rstride[n] * extent[n]; sptr -= sstride[n] * extent[n]; n++; if (n >= dim - 1) { /* Break out of the loop. */ rptr = NULL; break; } else { count[n]++; rptr += rstride[n]; sptr += sstride[n]; } } } return; }
const GFC_REAL_4 *sptr; /* p.* indicates the pad array. */ index_type pcount[GFC_MAX_DIMENSIONS]; index_type pextent[GFC_MAX_DIMENSIONS]; index_type pstride[GFC_MAX_DIMENSIONS]; index_type pdim; index_type psize; const GFC_REAL_4 *pptr; const GFC_REAL_4 *src; int n; int dim; int sempty, pempty, shape_empty; index_type shape_data[GFC_MAX_DIMENSIONS]; rdim = GFC_DESCRIPTOR_EXTENT(shape,0); if (rdim != GFC_DESCRIPTOR_RANK(ret)) runtime_error("rank of return array incorrect in RESHAPE intrinsic"); shape_empty = 0; for (n = 0; n < rdim; n++) { shape_data[n] = shape->base_addr[n * GFC_DESCRIPTOR_STRIDE(shape,0)]; if (shape_data[n] <= 0) { shape_data[n] = 0; shape_empty = 1; } }
static void transpose_internal (gfc_array_char *ret, gfc_array_char *source) { /* r.* indicates the return array. */ index_type rxstride, rystride; char *rptr; /* s.* indicates the source array. */ index_type sxstride, systride; const char *sptr; index_type xcount, ycount; index_type x, y; index_type size; assert (GFC_DESCRIPTOR_RANK (source) == 2 && GFC_DESCRIPTOR_RANK (ret) == 2); size = GFC_DESCRIPTOR_SIZE(ret); if (ret->base_addr == NULL) { assert (ret->dtype == source->dtype); GFC_DIMENSION_SET(ret->dim[0], 0, GFC_DESCRIPTOR_EXTENT(source,1) - 1, 1); GFC_DIMENSION_SET(ret->dim[1], 0, GFC_DESCRIPTOR_EXTENT(source,0) - 1, GFC_DESCRIPTOR_EXTENT(source, 1)); ret->base_addr = xmallocarray (size0 ((array_t*)ret), size); ret->offset = 0; } else if (unlikely (compile_options.bounds_check)) { index_type ret_extent, src_extent; ret_extent = GFC_DESCRIPTOR_EXTENT(ret,0); src_extent = GFC_DESCRIPTOR_EXTENT(source,1); if (src_extent != ret_extent) runtime_error ("Incorrect extent in return value of TRANSPOSE" " intrinsic in dimension 1: is %ld," " should be %ld", (long int) src_extent, (long int) ret_extent); ret_extent = GFC_DESCRIPTOR_EXTENT(ret,1); src_extent = GFC_DESCRIPTOR_EXTENT(source,0); if (src_extent != ret_extent) runtime_error ("Incorrect extent in return value of TRANSPOSE" " intrinsic in dimension 2: is %ld," " should be %ld", (long int) src_extent, (long int) ret_extent); } sxstride = GFC_DESCRIPTOR_STRIDE_BYTES(source,0); systride = GFC_DESCRIPTOR_STRIDE_BYTES(source,1); xcount = GFC_DESCRIPTOR_EXTENT(source,0); ycount = GFC_DESCRIPTOR_EXTENT(source,1); rxstride = GFC_DESCRIPTOR_STRIDE_BYTES(ret,0); rystride = GFC_DESCRIPTOR_STRIDE_BYTES(ret,1); rptr = ret->base_addr; sptr = source->base_addr; for (y = 0; y < ycount; y++) { for (x = 0; x < xcount; x++) { memcpy (rptr, sptr, size); sptr += sxstride; rptr += rystride; } sptr += systride - (sxstride * xcount); rptr += rxstride - (rystride * xcount); } }