int main(void) { int result; int a[] = {1,2,3,0,0}; result = array_sum(a); printf("Input submitted to the function: {1, 2, 3, 0, 0}"); check(6, result); int b[] = {1,2,3,4,5}; result = array_sum(b); printf("Input submitted to the function: {1, 2, 3, 4, 5}"); check(15,result); printf("All Correct\n"); return 0; }
int main() { /* Pointer Sum with two input parameters */ int val1 = 4; int val2 = 5; number_swap(&val1, &val2); if (val1 == 5) { printf("Great, 2.1 worked!\n"); } else { printf("Invalid result for 2.1\n"); } /* Extending Pointer sum */ int valarray[] = { 10, 100, 1000 }; int sum = array_sum(valarray, 3); if (sum == 1110) { printf("2.2 worked\n"); } else { printf("Invalid result for 2.2"); } /* Array Reader */ int ints[10]; printf("Read %d integers!\n", array_reader(ints, 10)); return 0; }
float movingaveragefilter(float *vec, int size, float newelement) { array_shiftleft(vec, size, 1); vec[size-1] = newelement; float out = array_sum(size, vec)/(float)size; vec[size-1] = out; return out; }
int main(void) { int array_sum(void); printf("Array sum is %i\n", array_sum()); return 0; }
int main (void) { int a[50]; foo (a, 50); int res = array_sum (a); if (res != 49) abort (); return 0; }
int main (void) { int a[51]; /* NB This size allows foo's first iteration to write to a[50]. */ foo (a, 50); int res = array_sum (a); if (res != 49) abort (); return 0; }
END_TEST /* Array-array operations */ START_TEST(test_array_sum) { fail_unless(array_sum(NULL, data2, length) == -1, "The 1st arg in array_sum is not valid"); fail_unless(array_sum(data1, NULL, length) == -2, "The 2nd arg in array_sum is not valid"); fail_unless(array_sum(data1, data2, 0) == -3, "The 3rd arg in array_sum is not valid"); double *aux = (double*) malloc (length * sizeof(double)); memcpy(aux, data1, length * sizeof(double)); int ret = array_sum(aux, data2, length); fail_unless(ret == 0, "An error in array_sum has occurred"); for (int i = 0; i < length; i++) { fail_if(aux[i] - (data1[i] + data2[i]) > 0, "The input array elements must have been added"); } free(aux); }
int main(){ int * array = malloc(64*sizeof(int)); int sum; for(int i = 0; i < C_LENGTH; i++){ array[i] = i; } sum = array_sum(array, C_LENGTH); printf("Sum returned: %d\n", sum); if(sum == (C_LENGTH-1)*(C_LENGTH)/2){ return 0; } else { return 1; } }
int benchmark(int p_times, int p_runner_times, void (*func)()) { int times = p_times; int results[times]; while(times != 0) { int runner_times = p_runner_times; int startTime = millitime(); while(runner_times != 0) { func(); runner_times--; } int endTime = millitime(); times--; results[times] = endTime - startTime; } return array_sum(results, p_times) / p_times; }
int main(int argc, char** argv) { // I don't think this is the best way to work but... int size = 1000; int multiple[sizeof(size)] = {0}; // First of all we need to iterate trough 1000 numbers for( int i = 0; i < size; i++) { if( (i % 3 == 0) || (i % 5 == 0) ) multiple[i] = i; } // Since C does not have a builtin function I wrote something similar :) // And now the final part int total = array_sum(multiple); return 0; }
int in_lookup(struct poisson *thePoisson,int k,int i,int j){ if(i>=thePoisson->N_r_glob||k>=thePoisson->N_z_glob||j>=thePoisson->N_k){ printf("in_lookup index out of bound\n"); return -1; } double *data=thePoisson->density; // int p=i/thePoisson->N_r; int q=k/thePoisson->N_z; // int ip=i-p*thePoisson->N_r; int kp=k-q*thePoisson->N_z; int p=0; while(thePoisson->N0_r[p+1]<=i) p++; int ip=i-thePoisson->N0_r[p]; int rank=p*thePoisson->z_size+q; int index=array_sum(thePoisson->recv_elements,rank)+(kp*thePoisson->N_r_array[rank]+ip)*thePoisson->N_k+j; return index; }
int main(void) { // testing const int size = 5; int fst_arr[] = {1, 2, 3, 4, 5}; int snd_arr[] = {5, 4, 3, 2, 1}; int res[size]; array_sum(fst_arr, snd_arr, res, size); // output result print_array(fst_arr, size); print_array(snd_arr, size); print_array(res, size); // allocate an array from heap int* dynamic = (int*) malloc (sizeof(int) * size); int i; for (i = 0; i < size; ++i) dynamic[i] = i; print_array(dynamic, size); // delete a value in a staticly declared array delete_shift(fst_arr, 2, size); print_array(fst_arr, size - 1); // delete a value in a dynamicly allocated array delete_realloc(dynamic, 1, size); print_array(dynamic, size - 1); // free memory free(dynamic); reset_array(fst_arr, size); print_array(fst_arr, size); inverse(snd_arr, size); print_array(snd_arr, size); square(res, size); print_array(res, size); return 0; }
int main(void) { int val[] = {2,3,4,5,6,7,6}; array_sum(val,7); printf("%i", sum); }
/** * @brief erase job related stuff only for server * * @see s_job_t struct * @note * @author auxten <*****@*****.**> <*****@*****.**> * @date 2011-8-1 **/ int erase_job(std::string &uri_string) { int ret; s_job_t *jo; GKO_INT64 progress; GKO_INT64 percent; /// jobs map std::map<std::string, s_job_t *>::iterator it; { pthread_mutex_lock(&g_grand_lock); if ((it = g_m_jobs.find(uri_string)) == g_m_jobs.end()) { ret = -1; gko_log(FATAL, "erase %s fail", uri_string.c_str()); pthread_mutex_unlock(&g_grand_lock); goto ERASE_RET; } jo = it->second; jo->job_state = JOB_TO_BE_ERASED; /** then erase job for the job map **/ g_m_jobs.erase(uri_string); gko_log(NOTICE, "g_m_jobs.size: %lld", (GKO_UINT64) g_m_jobs.size()); pthread_mutex_unlock(&g_grand_lock); } /** cancel the hash threads if any progress < 99% **/ progress = array_sum(jo->hash_progress, XOR_HASH_TNUM); percent = jo->total_size ? progress * 100 / jo->total_size : 100; if (percent < 100) { for (int i = 0; i < XOR_HASH_TNUM; i++) { if (! pthread_cancel(jo->hash_worker[i])) { gko_log(NOTICE, "hash thread %d canceling", i); } else { gko_log(FATAL, "hash thread %d cancel failed", i); } } } /** * sleep for about write timeout time + 2 * we must wait for the send thread call write * timeout and stop sending blocks. then we * can erase the job successfully **/ sleep(ERASE_JOB_MEM_WAIT); /** clean the job struct **/ pthread_mutex_lock(&g_job_lock[jo->lock_id].lock); if (jo->blocks && jo->block_count) { delete [](jo->blocks); jo->blocks = NULL; } if (jo->files && jo->file_count) { delete [](jo->files); jo->files = NULL; } if (jo->host_set) { delete jo->host_set; jo->host_set = NULL; } for (int i = 0; i < XOR_HASH_TNUM; i++) { if (jo->hash_buf[i]) { delete [](jo->hash_buf[i]); jo->hash_buf[i] = NULL; } } pthread_mutex_unlock(&g_job_lock[jo->lock_id].lock); /** for safety re-init the rwlock **/ pthread_mutex_destroy(&(g_job_lock[jo->lock_id].lock)); pthread_mutex_init(&(g_job_lock[jo->lock_id].lock), NULL); g_job_lock[jo->lock_id].state = LK_FREE; delete jo; gko_log(NOTICE, "job '%s' erased", uri_string.c_str()); ret = 0; ERASE_RET: return ret; }
GLOBAL DATATYPE array_mean(array *thisarray) { return (array_sum(thisarray)/thisarray->nr_of_elements); }
void _smp__ol_main_0(_nx_data_env_0_t * _args) { array_sum((_args->l_array_of_arrays_0)[(_args->l_i_0)], &((_args->l_partial_sums_0)[(_args->l_i_0)]), (_args->l_i_0)); }
/*{{{ scale_by(transform_info_ptr tinfo)*/ METHODDEF DATATYPE * scale_by(transform_info_ptr tinfo) { struct scale_by_storage *local_arg=(struct scale_by_storage *)tinfo->methods->local_storage; DATATYPE factor; int itempart; array indata; tinfo_array(tinfo, &indata); switch (local_arg->type) { /* Operations which are done on maps */ case SCALE_BY_XDATA: case SCALE_BY_INVXDATA: if (tinfo->xdata==NULL) create_xaxis(tinfo, NULL); case SCALE_BY_NORMALIZE: case SCALE_BY_INVNORM: case SCALE_BY_INVSQUARENORM: case SCALE_BY_INVSUM: case SCALE_BY_INVMAX: case SCALE_BY_INVMAXABS: case SCALE_BY_INVQUANTILE: array_transpose(&indata); /* Vectors are maps */ if (local_arg->have_channel_list) { ERREXIT(tinfo->emethods, "scale_by: Channel subsets are not supported for map operations.\n"); } break; /* Operations which are done on channels */ case SCALE_BY_INVPOINTNORM: case SCALE_BY_INVPOINTSQUARENORM: case SCALE_BY_INVPOINTSUM: case SCALE_BY_INVPOINTMAX: case SCALE_BY_INVPOINTMAXABS: case SCALE_BY_INVPOINTQUANTILE: case SCALE_BY_FACTOR: break; /* Operations which involve a special but constant factor */ case SCALE_BY_PI: local_arg->factor= M_PI; break; case SCALE_BY_INVPI: local_arg->factor= 1.0/M_PI; break; case SCALE_BY_SFREQ: local_arg->factor= tinfo->sfreq; break; case SCALE_BY_INVSFREQ: local_arg->factor= 1.0/tinfo->sfreq; break; case SCALE_BY_NR_OF_POINTS: local_arg->factor= (tinfo->data_type==FREQ_DATA ? tinfo->nroffreq : tinfo->nr_of_points); break; case SCALE_BY_INVNR_OF_POINTS: local_arg->factor= 1.0/(tinfo->data_type==FREQ_DATA ? tinfo->nroffreq : tinfo->nr_of_points); break; case SCALE_BY_NR_OF_CHANNELS: local_arg->factor= tinfo->nr_of_channels; break; case SCALE_BY_INVNR_OF_CHANNELS: local_arg->factor= 1.0/tinfo->nr_of_channels; break; case SCALE_BY_NROFAVERAGES: local_arg->factor= tinfo->nrofaverages; break; case SCALE_BY_INVNROFAVERAGES: local_arg->factor= 1.0/tinfo->nrofaverages; break; case SCALE_BY_SQRTNROFAVERAGES: local_arg->factor= sqrt(tinfo->nrofaverages); break; case SCALE_BY_INVSQRTNROFAVERAGES: local_arg->factor= 1.0/sqrt(tinfo->nrofaverages); break; } for (itempart=local_arg->fromitem; itempart<=local_arg->toitem; itempart++) { array_use_item(&indata, itempart); do { if (local_arg->have_channel_list && !is_in_channellist(indata.current_vector+1, local_arg->channel_list)) { array_nextvector(&indata); continue; } switch (local_arg->type) { case SCALE_BY_NORMALIZE: case SCALE_BY_INVNORM: case SCALE_BY_INVPOINTNORM: factor=array_abs(&indata); if (factor==0.0) factor=1.0; factor=1.0/factor; array_previousvector(&indata); break; case SCALE_BY_INVSQUARENORM: case SCALE_BY_INVPOINTSQUARENORM: factor=array_square(&indata); if (factor==0.0) factor=1.0; factor=1.0/factor; array_previousvector(&indata); break; case SCALE_BY_INVSUM: case SCALE_BY_INVPOINTSUM: factor=array_sum(&indata); if (factor==0.0) factor=1.0; factor=1.0/factor; array_previousvector(&indata); break; case SCALE_BY_INVMAX: case SCALE_BY_INVPOINTMAX: factor=array_max(&indata); if (factor==0.0) factor=1.0; factor=1.0/factor; array_previousvector(&indata); break; case SCALE_BY_INVMAXABS: case SCALE_BY_INVPOINTMAXABS: { DATATYPE amax=fabs(array_scan(&indata)), hold; while (indata.message==ARRAY_CONTINUE) { hold=fabs(array_scan(&indata)); if (hold>amax) amax=hold; } factor=amax; } if (factor==0.0) factor=1.0; factor=1.0/factor; array_previousvector(&indata); break; case SCALE_BY_INVQUANTILE: case SCALE_BY_INVPOINTQUANTILE: factor=array_quantile(&indata,local_arg->factor); if (factor==0.0) factor=1.0; factor=1.0/factor; break; case SCALE_BY_XDATA: factor=tinfo->xdata[indata.current_vector]; break; case SCALE_BY_INVXDATA: factor=1.0/tinfo->xdata[indata.current_vector]; break; case SCALE_BY_PI: case SCALE_BY_INVPI: case SCALE_BY_SFREQ: case SCALE_BY_INVSFREQ: case SCALE_BY_NR_OF_POINTS: case SCALE_BY_INVNR_OF_POINTS: case SCALE_BY_NR_OF_CHANNELS: case SCALE_BY_INVNR_OF_CHANNELS: case SCALE_BY_NROFAVERAGES: case SCALE_BY_INVNROFAVERAGES: case SCALE_BY_SQRTNROFAVERAGES: case SCALE_BY_INVSQRTNROFAVERAGES: case SCALE_BY_FACTOR: factor=local_arg->factor; break; default: continue; } array_scale(&indata, factor); } while (indata.message!=ARRAY_ENDOFSCAN); } return tinfo->tsdata; }