static CYTHON_INLINE long ZZX_degree(struct ZZX* x) { return deg(*x); }
void PAlgebraModDerived<type>::mapToSlots(MappingData<type>& mappingData, const RX& G) const { assert(deg(G) > 0 && zMStar.getOrdP() % deg(G) == 0); assert(LeadCoeff(G) == 1); mappingData.G = G; mappingData.degG = deg(G); long nSlots = zMStar.getNSlots(); long m = zMStar.getM(); mappingData.maps.resize(nSlots); mapToF1(mappingData.maps[0],G); // mapping from base-G to base-F1 for (long i=1; i<nSlots; i++) mapToFt(mappingData.maps[i], G, zMStar.ith_rep(i), &(mappingData.maps[0])); if (deg(G)==1) return; REBak bak; bak.save(); RE::init(G); mappingData.contextForG.save(); mappingData.rmaps.resize(nSlots); if (G == factors[0]) { // an important special case for (long i = 0; i < nSlots; i++) { long t = zMStar.ith_rep(i); long tInv = InvMod(t, m); RX ct_rep; PowerXMod(ct_rep, tInv, G); RE ct; conv(ct, ct_rep); REX Qi; SetCoeff(Qi, 1, 1); SetCoeff(Qi, 0, -ct); mappingData.rmaps[i] = Qi; } } else { // the general case: currently only works when r == 1 assert(r == 1); vec_REX FRts; for (long i=0; i<nSlots; i++) { // We need to lift Fi from R[Y] to (R[X]/G(X))[Y] REX Qi; long t, tInv=0; if (i == 0) { conv(Qi,factors[i]); FRts=EDF(Qi, FrobeniusMap(Qi), deg(Qi)/deg(G)); // factor Fi over GF(p)[X]/G(X) } else { t = zMStar.ith_rep(i); tInv = InvMod(t, m); } // need to choose the right factor, the one that gives us back X long j; for (j=0; j<FRts.length(); j++) { // lift maps[i] to (R[X]/G(X))[Y] and reduce mod j'th factor of Fi REX FRtsj; if (i == 0) FRtsj = FRts[j]; else { REX X2tInv = PowerXMod(tInv, FRts[j]); IrredPolyMod(FRtsj, X2tInv, FRts[j]); } // FRtsj is the jth factor of factors[i] over the extension field. // For j > 0, we save some time by computing it from the jth factor // of factors[0] via a minimal polynomial computation. REX GRti; conv(GRti, mappingData.maps[i]); GRti %= FRtsj; if (IsX(rep(ConstTerm(GRti)))) { // is GRti == X? Qi = FRtsj; // If so, we found the right factor break; } // If this does not happen then move to the next factor of Fi } assert(j < FRts.length()); mappingData.rmaps[i] = Qi; } } }
void save_screen_image_and_viewpoint_data (void) { char filename[100], large_image_filename[100], small_image_filename[100], viewpoint_data_filename[100]; FILE *fp; int x_sec, z_sec; // // find first screen shot index // if (!found_first_screen_shot_index) { while (TRUE) { sprintf (large_image_filename, "%sIMAGE%03d.TGA", LARGE_IMAGE_PATH, screen_shot_index); if (file_exist (large_image_filename)) { screen_shot_index++; if (screen_shot_index == 1000) { break; } } else { found_first_screen_shot_index = TRUE; break; } } } // // write screen files and viewpoint data file // if (screen_shot_index <= MAX_SCREEN_SHOT_INDEX) { sprintf (filename, "IMAGE%03d", screen_shot_index); debug_log ("Saving screen image (%s)", filename); sprintf (large_image_filename, "%s%s.TGA", LARGE_IMAGE_PATH, filename); sprintf (small_image_filename, "%s%s.TGA", SMALL_IMAGE_PATH, filename); sprintf (viewpoint_data_filename, "%s%s.TXT", VIEWPOINT_DATA_PATH, filename); //////////////////////////////////////// if (lock_screen (video_screen)) { save_tga_screen_with_thumbnail (large_image_filename, small_image_filename); unlock_screen (video_screen); } //////////////////////////////////////// fp = safe_fopen (viewpoint_data_filename, "w"); fprintf (fp, "Image viewpoint data:\n\n"); fprintf (fp, "Map : unknown\n"); fprintf (fp, "X : %.2f\n", main_vp.x); fprintf (fp, "Y : %.2f\n", main_vp.y); fprintf (fp, "Z : %.2f\n", main_vp.z); get_terrain_3d_sector (main_vp.x, main_vp.z, &x_sec, &z_sec); fprintf (fp, "X sector (3D) : %d\n", x_sec); fprintf (fp, "Z sector (3D) : %d\n", z_sec); get_x_sector (x_sec, main_vp.x); get_z_sector (z_sec, main_vp.z); fprintf (fp, "X sector (AI) : %d\n", x_sec); fprintf (fp, "Z sector (AI) : %d\n", z_sec); fprintf (fp, "Heading (degs): %.2f\n", deg (get_heading_from_attitude_matrix (main_vp.attitude))); fprintf (fp, "Pitch (degs) : %.2f\n", deg (get_pitch_from_attitude_matrix (main_vp.attitude))); fprintf (fp, "Roll (degs) : %.2f\n", deg (get_roll_from_attitude_matrix (main_vp.attitude))); safe_fclose (fp); //////////////////////////////////////// screen_shot_index++; } else { debug_colour_log (DEBUG_COLOUR_RED, "Exceeded screen image limit"); } }
int main(){ for(int i=1;i<23;i++) if(deg(i,23)>20) cout << i << "has deg:" << deg(i,23) << endl; return 0; }
size_t BronKerbosch::computeDegeneracy(NodeList& order) { // Requires O(|V| + |E|) time order.clear(); typedef typename Graph::template NodeMap<size_t> DegNodeMap; typedef typename Graph::template NodeMap<typename NodeList::iterator> NodeListItMap; BoolNodeMap present(_g, true); DegNodeMap deg(_g, 0); size_t maxDeg = 0; NodeListItMap it(_g); // compute node degrees, O(|E|) time for (NodeIt v(_g); v != lemon::INVALID; ++v) { size_t d = 0; for (IncEdgeIt e(_g, v); e != lemon::INVALID; ++e, ++d); deg[v] = d; if (d > maxDeg) maxDeg = d; } // fill T, O(d) time NodeListVector T(maxDeg + 1, NodeList()); for (NodeIt v(_g); v != lemon::INVALID; ++v) { size_t d = deg[v]; T[d].push_front(v); it[v] = T[d].begin(); } size_t degeneracy = 0; // O(|V|) time, Eppstein et al. (2010) const size_t n = T.size(); size_t i = 0; while (i < n) { NodeList& l = T[i]; if (T[i].size() > 0) { Node v = l.front(); l.pop_front(); order.push_back(v); present[v] = false; if (deg[v] > degeneracy) { degeneracy = deg[v]; } //std::cout << "Removed " << _g.id(v) << std::endl; for (IncEdgeIt e(_g, v); e != lemon::INVALID; ++e) { Node w = _g.oppositeNode(v, e); if (present[w]) { size_t deg_w = deg[w]; typename NodeList::iterator it_w = it[w]; T[deg_w - 1].splice(T[deg_w - 1].begin(), T[deg_w], it_w); deg[w]--; } } i = 0; } else { ++i; } } //std::cerr << "Degeneracy: " << degeneracy << std::endl; return degeneracy; }
polynom& minpoly(matrix const& A, basis const& V, gaussinfo const& U, polynom& m, vektor& v, vector<K>& f) { unsigned int dim_V = V.size(),i;//Die Anzahl der Vektoren, die V aufspannen. V kann auch // ein Untervektorraum sein. unsigned int dim_U = U.A.size(); //Anzal der Vektoren, die U ( <= V) aufspannen. vector<polynom> ordpol(dim_V);//Vektor mit Ordnungspolynomen // zu den Vektoren der Basis V polynom c,d,C,D,D2,t,T;//Diese Polynome werden für der Algorithmus weiter unten benötigt. vektor ret,ret2; // Dies sind lediglich Dummy Variablen, die für die // Funktion mult unten benutzt werden. // Einige Tests if( A[0].size() != A.size() ) {//Matrix quadraisch ? cout << "Fehler in minpoly(...):\nDie Matrix A ist" << " nicht quadratisch.\n"; exit(1); } if( V[0].size() != A.size() ) {//sind die Dimensionen der Matrix und der // Vektoren gleich? cout << "Fehler in minpoly(...):\nDie Matrix A und die Vektoren " << "der Basis V haben nicht die gleiche Dimension.\n" ; exit(1); } //Vorarbeit: Erzeuge das Ordnungspolynom für die // Vektoren einer Basis V for(i=0; i<dim_V ; i++ ) { ordpoly(A,V[i],U,ordpol[i],f); } //Initialisierung für den Algorithmus m = ordpol[0]; v = V[0]; //Dies ist der eigentliche Algorithmus um aus den //Ordnungspolynomen das Minimalpolynom zu berechnen. //Der Algorithmus könnte viel einfacher sein, wenn man //keinen Vektor v haben wollte, der das Minimalpolynom //als Ordnungspolynom hat. for( i=1; i<dim_V ; i++ ) { if( deg(ordpol[i]) == 0 ) continue; c=m; d=ordpol[i]; t = ggT(c,d); algo_r(c,d/t,C); algo_r(d,c/t,D); T = ggT(C,D); D2=D/T; m=C*D2; v = mult(c/C,A,v,ret) + mult(d/D2,A,V[i],ret2); if( deg(m) == dim_V - dim_U ) break; // Minimalpolynom kann nicht grösser werden } normiere_poly(m); if( U.A.size() > 0 ) ordpoly(A,v,U,ordpol[0],f); //Dieser Aufruf geschieht um f zu berechnen. //Dies macht nur Sinn, wenn U.A nicht leer ist. //ordpol[0] ist hier lediglich ein dummy. return m; }
Travel_Time_Function_Output uniform_time_table_interpolate(Ray_Endpoints x, char *phase, int mode) { double delta; /* epicentral distance in radians */ double azimuth; /* source to receiver azimuth angle (radians) */ Travel_Time_Function_Output o; XZ_table_uniform *ttable, *utable; double f_ll,f_hl,f_lh,f_hh; int ix_low, iz_low, ix_high, iz_high; double x_low, z_low, x_high, z_high; double u; /* Slowness in s/km interpolated from tables */ double vsource; /* Source depth velocity interpolated from velocity depth vector */ double vl, vh; /* temporaries */ double theta; /* emergence angle used for elevation correction calculation */ int discontinuity; /* First we compute the epicentral distance and azimuth */ dist(rad(x.slat), rad(x.slon), rad(x.rlat), rad(x.rlon), &delta, &azimuth); /* Look up the correct table for this phase */ ttable = (XZ_table_uniform *) getarr(time_tables_uniform,phase); utable = (XZ_table_uniform *) getarr(slow_tables_uniform,phase); if( (ttable == NULL) || (utable == NULL) ) { char e[80]; sprintf(e,"No travel time tables for phase %s",phase); o = set_time_table_error(e); return(o); } /* compute the indices assuming table is tabulated in degrees */ ix_low = (int) ((deg(delta)-ttable->x0)/ttable->dx); iz_low = (int) ((x.sz-ttable->z0)/ttable->dz); ix_high = ix_low + 1; iz_high = iz_low + 1; if( (ix_high >= ttable->nx) || (iz_high >= ttable->nz) || (ix_low <0) || (iz_low<0) ) { o = set_time_table_error("Requested point is outside table"); return(o); } x_low = ((double)ix_low)*((ttable->dx))+ttable->x0; x_high = ((double)ix_high)*((ttable->dx))+ttable->x0; z_low = ((double)iz_low)*((ttable->dz))+ttable->z0; z_high = ((double)iz_high)*((ttable->dz))+ttable->z0; /* Now we have to handle the general problem of of how to handle discontinuities in the table. These occur in earth models in three forms: (1) crossovers -- marked as points with continuous travel times, but discontinuous slopes, (2) termination of named branch, and (3) discontinuities in a generic branch like P (e.g. Pdiff passing to PkiKP in the core shadow.) The handling of these is described above in the code for check_discontinuity */ f_ll = ttable->values[ix_low][iz_low]; f_hl = ttable->values[ix_high][iz_low]; f_lh = ttable->values[ix_low][iz_high]; f_hh = ttable->values[ix_high][iz_high]; discontinuity = check_discontinuity(ttable->branch,ix_low, iz_low); switch (discontinuity) { case NOWAY: o.time = TIME_INVALID; o.dtdx = 0.0; o.dtdy = 0.0; o.dtdz = 0.0; return(o); case NO_PROBLEM: case NO_PROBLEM_VALUE: o.time = serendipity(x_low, z_low, x_high, z_high, f_ll, f_hl, f_lh, f_hh, deg(delta), x.sz); break; case TWO_LOW: o.time = interpolate_discontinuity_twopoint(x_low, z_low, x_high, z_high, f_ll, f_lh, ttable->slopes[ix_low][iz_low], ttable->slopes[ix_low][iz_high], deg(delta), x.sz); break; case ONE_LL: o.time = interpolate_discontinuity_onepoint(x_low,f_ll, ttable->slopes[ix_low][iz_low],deg(delta)); break; case ONE_LH: o.time = interpolate_discontinuity_onepoint(x_low,f_lh, ttable->slopes[ix_low][iz_high],deg(delta)); } if(mode == RESIDUALS_ONLY) { o.dtdx = 0.0; o.dtdy = 0.0; o.dtdz = 0.0; return(o); } /* Now we turn to the travel time drivatives which we calculate from the slopes table */ f_ll = utable->values[ix_low][iz_low]; f_hl = utable->values[ix_high][iz_low]; f_lh = utable->values[ix_low][iz_high]; f_hh = utable->values[ix_high][iz_high]; discontinuity = check_discontinuity(utable->branch,ix_low, iz_low); switch (discontinuity) { case NOWAY: u = SLOWNESS_INVALID; break ; case NO_PROBLEM: case NO_PROBLEM_VALUE: u = serendipity(x_low, z_low, x_high, z_high, f_ll, f_hl, f_lh, f_hh, deg(delta), x.sz); break; case TWO_LOW: u = interpolate_discontinuity_twopoint(x_low, z_low, x_high, z_high, f_ll, f_lh, utable->slopes[ix_low][iz_low], utable->slopes[ix_low][iz_high], deg(delta), x.sz); break; case ONE_LL: u = interpolate_discontinuity_onepoint(x_low,f_ll, utable->slopes[ix_low][iz_low],deg(delta)); break; case ONE_LH: u = interpolate_discontinuity_onepoint(x_low,f_lh, utable->slopes[ix_low][iz_high],deg(delta)); } /* We store the u grid in units of s/km so we do not have to convert to travel time derivatives. Init procedure must handle this consistently. First get the velocity at the source depth using a simple linear interpolation. */ vl = ttable->velocity[iz_low]; vh = ttable->velocity[iz_high]; vsource = vl + (x.sz - ((double)iz_low*((ttable->dz)))) *(vh - vl)/(ttable->dz); /* The x and y derivatives are now simple */ o.dtdx = - u*sin(azimuth); o.dtdy = - u*cos(azimuth); o.dtdz = cos(asin(u*vsource))/vsource; /* We have to flip the sign of dtdz for upward traveling rays */ if(ttable->branch[ix_high][iz_high] == UPWARD) o.dtdz = - o.dtdz; /* The following is seriously restrictive, but the alternative is to add near surface velocity as a seperate parameter for each station. This is rarely known, and probably better handled with a better travel time calculator function when it makes a difference. That is, we will assume two things: (1) ttable->velocity[0] is an appropriate velocity to use for elevation corrections. (2) We are far enough away that we can use a simple correction based on dt/ddelta and some simple trigonometry. This approximation will be incorrect when we are very close to the source and p varies rapidly with distance. This is not the situations where tables like this should be used anyway. The first term is a correction along the horizontal plane for the projection of the elevation correction ray path to datum. The second term is adding back the time along the slant path elevation correction. Note the -x.rz is used because rz is a "depth" not an elevation. */ theta = asin(u*ttable->velocity[0]); o.time -= u*(-x.rz)*tan(theta); o.time += (-x.rz)/((ttable->velocity[0])*cos(theta)); return(o); }
static int bbs_main(char *argv) { #define BBS_MAIN_EXIT(time) do{local_Net_Sleep(time);shutdown(0,2);close(0);return -1;}while(0) FILE *fp; struct stat st; struct rlimit rl; char buf[256]; #ifndef DEBUG if (strcmp(getSession()->fromhost,"0.0.0.0")&&strcmp(getSession()->fromhost,"127.0.0.1") &&((fp=fopen("NOLOGIN","r")))) { while (fgets(buf,256,fp)) local_prints("%s",buf); fclose(fp); BBS_MAIN_EXIT(20); } #endif /* ! DEBUG */ #ifdef LOAD_LIMIT if (!stat("NO_LOAD_LIMIT",&st)&&S_ISREG(st.st_mode)) { double load,cpu_load[3]; get_load(cpu_load); load=cpu_load[0]; local_prints("CPU 最近 (1,10,15) 分钟的平均负荷分别为 %.2f, %.2f, %.2f (目前上限 = %d)\r\n", cpu_load[0],cpu_load[1],cpu_load[2],max_load); if ((load<0)||(load>max_load)) { local_prints("%s\r\n\r\n%s\r\n%s\r\n", "很抱歉, 目前 CPU 负荷过重, 请稍候再来", "因为重复连接对本站冲击太大, 请您配合, 不要重复多次连接", "请您先休息 10 分钟, 然后再连接本站, 非常感谢!"); BBS_MAIN_EXIT(((time_t)load)); } #ifdef AIX { int free=psdanger(-1); int safe=psdanger(SIGDANGER); int danger=125000; local_prints("RAM 当前空闲页数高出警戒阈值 %d (警戒阈值 = %d)\r\n\r\n",safe,(free-safe)); if (safe<danger) { if ((server_pid!=-1)&&(!heavy_load)) kill(server_pid,SIGUSR1); local_prints("%s\r\n\r\n%s\r\n%s\r\n", "很抱歉, 目前 RAM 被过度使用, 请稍候再来", "因为重复连接对本站冲击太大, 请您配合, 不要重复多次连接", "请您先休息 10 分钟, 然后再连接本站, 非常感谢!"); BBS_MAIN_EXIT(60); } if ((server_pid!=-1)&&heavy_load) kill(server_pid,SIGUSR2); } #endif /* AIX */ } #endif /* LOAD_LIMIT */ #ifdef BBSRF_CHROOT if (chroot(BBSHOME)==-1) { local_prints("Error while chroot to %s, exiting ...\r\n",BBSHOME); return -1; } #endif /* BBSRF_CHROOT */ #ifdef SECONDSITE #define deg(x...) if (!strncmp(getSession()->fromhost, "10.", 3)) { char ipbuf[16]; int len=0; while ((ipbuf[len] = igetkey()) != '\n') { deg("%d:%d\n",getpid(), ipbuf[len]); len++; if (len >= 15) break; } ipbuf[len]='\0'; strcpy(getSession()->fromhost, ipbuf); frommain=1; } #endif /* SECONDSITE */ getSession()->fromhost[IPLEN-1]=0; *buf = 0; if (check_ban_IP(getSession()->fromhost,buf)>0) { local_prints("本站目前不欢迎来自 %s 访问!\r\n原因: %s\r\n\r\n",getSession()->fromhost,buf); BBS_MAIN_EXIT(60); } #ifdef HAVE_REVERSE_DNS getremotehost(getSession()->fromhost,IPLEN); #endif /* HAVE_REVERSE_DNS */ if (stat("core",&st)==-1) { rl.rlim_cur=125829120; /* 120M */ rl.rlim_max=209715200; /* 200M */ setrlimit(RLIMIT_CORE,&rl); } main_bbs(0,argv); return -1; #undef BBS_MAIN_EXIT }
/* This function creates the mwslow table output of mwap. This table hold slowness vector estimates. arguments: phase - name of seismic phase u - estimated slowness vector t0 - start time at reference station twin - length of analysis time window relative to t0 array - array name to use in table for station key field evid - css3.0 evid of parent data bankid - defines unique multiwavelet bank (could be extracted from traces of gather, but it is so deep in indirection it gets ridiculous) fc - center frequency (in Hz) of this wavelet bank. fwin - bandwidth (in Hz) of this wavelet C - 3x3 covariance matrix estimate for this slowness vector (ux,uy, t order assumed ) cohtype - type of coherence measure used (mapped in multiwavelet.h) peakcm - value of coherence measure for this fc and evid. db - output database Returns 0 if dbaddv was successful, -1 if dbaddv fails. Author: G Pavlis Written: March 2000 */ int MWdb_save_slowness_vector(char *phase, MWSlowness_vector *u, double t0, double twin, char *array, int evid, int bankid, double fc, double fwin, double *C, int nsta, int ncomp, int cohtype, double peakcm, Dbptr db) { double slo, azimuth, cxx, cyy, cxy; char cmeasure[2]; slo = hypot(u->ux,u->uy); azimuth = atan2(u->ux,u->uy); azimuth = deg(azimuth); db = dblookup(db,0,"mwslow",0,0); switch(cohtype) { case(USE_COHERENCE): strcpy(cmeasure,"c"); break; case(USE_SEMBLANCE): default: strcpy(cmeasure,"s"); } if( dbaddv(db,0,"sta",array, "evid",evid, "bankid",bankid, "phase", phase, "fc",fc, "fwin",fwin, "time",t0, "twin",twin, "slo",slo, "azimuth",azimuth, "cxx",C[0], "cyy",C[4], "cxy",C[1], "nsta",nsta, "ncomp",ncomp, "cohtype",cmeasure, "cohmeas",peakcm, "algorithm","mwap",0) < 0) { elog_notify(0, "dbaddv error for mwslow table on evid %d fc=%lf\n", evid,fc); return(-1); } else return(0); }
void compute_location(Location_options o, RTlocate_Options rtopts, Arr *stations, Arr *arrays, Arr *phases, Pf *pf, Dbptr master_db, Dbptr dbtmp, ORB_Hypocenter hyp, int orbout) { Tbl *ta,*tu; /* Arrival and slowness tables respectively */ Hypocenter h0; int ret_code; Tbl *converge_history,*reason_converged,*residual; Hypocenter *hypo; int niterations; char *vmodel; int i; char *s; int orid; Point origin; double delta, seaz; double **C; float *emodel; int nass; initialize_hypocenter(&h0); /* It is inefficient to reread these from the parameter space on each entry, but preferable to a burdensome argument list */ origin.lat = pfget_double(pf,"center_latitude"); origin.lon = pfget_double(pf,"center_longitude"); origin.z = 0.0; /* This routine translates hyp structure to return tbl of arrival object pointers */ ta = orbhypo_to_genloc(&hyp,phases,stations); /* this is a pure place holder */ tu = newtbl(0); vmodel = pfget_string(pf,"velocity_model_name"); /* By default we use the location transmitted by orbassoc. This can be overriden in the parameter file by using the other options allowed in genloc*/ s=pfget_string(pf,"initial_location_method"); h0.lat = hyp.lat; h0.lon = hyp.lon; h0.z = hyp.depth; h0.time = hyp.time; /* this strange logic is to allow this parameter to be defaulted. If the "initial_location_method" is not defined, or set to "manual", we use the location given by orbassoc. Otherwise we utilize genlocs suite of initial locate options. */ if(s != NULL) if(strcmp(s,"manual")) h0 = initial_locate(ta, tu, o, pf); /* Now compute distance from origin, and process only if the event falls in the specified range */ dist(rad(origin.lat),rad(origin.lon),rad(h0.lat),rad(h0.lon), &delta,&seaz); delta = deg(delta); /* this is the distance sifting test to ignore things outside specified distance range */ if( ((delta>=rtopts.minimum_distance) && (delta <= rtopts.maximum_distance)) ) { /* Location with Generic Gauss_Newton code */ orid = -1; nass = maxtbl(ta); ret_code = ggnloc(h0,ta,tu,o, &converge_history,&reason_converged,&residual); if(ret_code < 0) { elog_notify (0,"ggnloc failed to produce a solution for evid %d\n",hyp.evid); } else { if(ret_code > 0) elog_notify(0,"Warning: %d travel time calculator failures in ggnloc\nSolution ok for evid %d\n", ret_code,hyp.evid); C = dmatrix(0,3,0,3); emodel = (float *) calloc(4,sizeof(float)); if((emodel == NULL) || (*C == NULL) ) die(0,"Malloc error for error arrays\n"); niterations = maxtbl(converge_history); hypo = (Hypocenter *)gettbl(converge_history, niterations-1); predicted_errors(*hypo, ta, tu, o, C, emodel); orid = save_origin(nass,hyp.evid,master_db, dbtmp,*hypo,o,orbout); save_origerr(orid,*hypo,C,dbtmp,orbout); save_assoc(ta, tu, orid, vmodel, *hypo, dbtmp,orbout); elog_notify(0,"orid %d converged in %d iterations\n", orid,niterations); elog_notify(0,"Reason(s) for convergence: \n"); for(i=0;i<maxtbl(reason_converged);++i) elog_notify(0,"%s",gettbl(reason_converged,i)); elog_notify(0,"\n"); s=format_hypo(hypo); elog_notify(0,"%s\n",s); free(emodel); free_matrix((char **)C,0,3,0); free(s); } write_to_logfile(rtopts, orid, hyp.evid, pf, converge_history, reason_converged,residual); if(maxtbl(converge_history)>0)freetbl(converge_history,free); if(maxtbl(reason_converged)>0)freetbl(reason_converged,free); if(maxtbl(residual)>0)freetbl(residual,free); } destroy_data_tables(ta, tu); return; }
int main() { // Define a random number generator and initialize it with a reproducible // seed. base_generator_type generator(42); std::cout << "10 samples of a uniform distribution in [0..1):\n"; // Define a uniform random number distribution which produces "double" // values between 0 and 1 (0 inclusive, 1 exclusive). boost::uniform_real<> uni_dist(0,1); boost::variate_generator<base_generator_type&, boost::uniform_real<> > uni(generator, uni_dist); std::cout.setf(std::ios::fixed); // You can now retrieve random numbers from that distribution by means // of a STL Generator interface, i.e. calling the generator as a zero- // argument function. for(int i = 0; i < 10; i++) std::cout << uni() << '\n'; /* * Change seed to something else. * * Caveat: std::time(0) is not a very good truly-random seed. When * called in rapid succession, it could return the same values, and * thus the same random number sequences could ensue. If not the same * values are returned, the values differ only slightly in the * lowest bits. A linear congruential generator with a small factor * wrapped in a uniform_smallint (see experiment) will produce the same * values for the first few iterations. This is because uniform_smallint * takes only the highest bits of the generator, and the generator itself * needs a few iterations to spread the initial entropy from the lowest bits * to the whole state. */ generator.seed(static_cast<unsigned int>(std::time(0))); std::cout << "\nexperiment: roll a die 10 times:\n"; // You can save a generator's state by copy construction. base_generator_type saved_generator = generator; // When calling other functions which take a generator or distribution // as a parameter, make sure to always call by reference (or pointer). // Calling by value invokes the copy constructor, which means that the // sequence of random numbers at the caller is disconnected from the // sequence at the callee. experiment(generator); std::cout << "redo the experiment to verify it:\n"; experiment(saved_generator); // After that, both generators are equivalent assert(generator == saved_generator); // as a degenerate case, you can set min = max for uniform_int boost::uniform_int<> degen_dist(4,4); boost::variate_generator<base_generator_type&, boost::uniform_int<> > deg(generator, degen_dist); std::cout << deg() << " " << deg() << " " << deg() << std::endl; { // You can save the generator state for future use. You can read the // state back in at any later time using operator>>. std::ofstream file("rng.saved", std::ofstream::trunc); file << generator; } return 0; }
void save_assoc(Tbl *ta, Tbl *tu, int orid, char *vmodel, Hypocenter hypo, Dbptr db, int orb) { double delta; double seaz; double esaz; double azres; double slores; Arr *u_arr; char key_arid[20]; Tbl *udregs; int i,n; double ux, uy, azimuth; double duphi; Arrival *a; Slowness_vector *u; /* We build an associative array keyed to arid for all the slowness vector measurements. Then in the loop below we can efficiently find any slowness vectors associated with the same arid as an Arrival. The overhead in this is significant, but it makes it completely general and open ended. */ n = maxtbl(tu); u_arr = newarr(0); for(i=0;i<n;i++) { Slowness_vector *utmp; utmp = (Slowness_vector *)gettbl(tu,i); sprintf(key_arid,"%d",utmp->arid); setarr(u_arr,key_arid,utmp); } db = dblookup(db,0,"assoc",0,0); db.record = dbSCRATCH; n=maxtbl(ta); for(i=0;i<n;i++) { a=(Arrival*)gettbl(ta,i); dist(rad(hypo.lat),rad(hypo.lon), rad(a->sta->lat),rad(a->sta->lon),&delta,&esaz); dist(rad(a->sta->lat),rad(a->sta->lon), rad(hypo.lat),rad(hypo.lon),&delta,&seaz); sprintf(key_arid,"%d",a->arid); u = (Slowness_vector *) getarr(u_arr,key_arid); if(u == NULL) { if(dbputv(db,0, "orid",orid, "arid",a->arid, "sta",a->sta->name, "phase",a->phase->name, "delta",deg(delta), "seaz",deg(seaz), "esaz",deg(esaz), "timeres",(double)a->res.raw_residual, "timedef","d", "vmodel",vmodel, "wgt",(double)a->res.residual_weight, 0)<0) { complain(0, "Can't add assoc record for station %s arid = %d orid = %d to working db scratch record\nRecord skipped and not saved anywhere\n", a->sta->name,a->arid,orid); continue; } } else { slores = deg2km(sqrt(sqr(u->xres.raw_residual) + sqr(u->yres.raw_residual))); azimuth = atan2 ( u->uy, u->ux ) ; duphi = (u->ux*cos(azimuth) - u->uy*sin(azimuth)) / sqrt(sqr(u->ux)+ sqr(u->uy)) ; azres = deg(duphi); if(dbputv(db,"assoc", "orid",orid, "arid",a->arid, "sta",a->sta->name, "phase",a->phase->name, "delta",deg(delta), "seaz",deg(seaz), "esaz",deg(esaz), "timeres",(double)a->res.raw_residual, "timedef","d", "vmodel",vmodel, "slores",slores, "slodef","d", "azres",azres, "azdef","d", "wgt",(double)a->res.residual_weight, 0)<0) { complain(0, "Can't add assoc record for station %s arid = %d orid = %d to working db scratch record\nRecord skipped and not saved anywhere\n", a->sta->name,a->arid,orid); delarr(u_arr,key_arid); continue; } /* We delete this entry from u_arr, then we can scan below for the dregs easily */ delarr(u_arr,key_arid); } if(save_dbrecord(db,orb)) complain(0,"Error saving assoc record for arid %d\n", a->arid); } /* Since it is possible that slowness vectors can be measured with no arrival time, we need to take care of that possibility. We do that by checking for dregs in u_arr not removed with delarr calls above */ udregs = keysarr(u_arr); n = maxtbl(udregs); for(i=0;i<n;i++) { char *key; key = gettbl(udregs,i); u = (Slowness_vector *) getarr(u_arr,key); dist(rad(hypo.lat),rad(hypo.lon), rad(u->array->lat),rad(u->array->lon),&delta,&esaz); dist(rad(u->array->lat),rad(u->array->lon), rad(hypo.lat),rad(hypo.lon),&delta,&seaz); slores = deg2km(sqrt(sqr(u->xres.raw_residual) + sqr(u->yres.raw_residual))); azimuth = atan2 ( u->uy, u->ux ) ; duphi = (u->ux*cos(azimuth) - u->uy*sin(azimuth)) / sqrt(sqr(u->ux)+ sqr(u->uy)) ; azres = deg(duphi); /* The residual weight extraction from the ux component is not ideal here because it could be wrong. It is unavoidable due to polar-cartesian conversion */ if(dbputv(db,"assoc", "orid",orid, "arid",u->arid, "sta",u->array->name, "phase",u->phase->name, "delta",deg(delta), "seaz",deg(seaz), "esaz",deg(esaz), "timedef","n", "vmodel",vmodel, "slores",slores, "slodef","d", "azres",azres, "azdef","d", "wgt",(double)u->xres.residual_weight, 0)<0) { complain(0,"Can't add assoc record for array slowness vector with %s arid = %d and orid = %d to working db scratch record\nNothing saved\n", u->array->name,u->arid,orid); continue; } if(save_dbrecord(db,orb)) complain(0,"Error saving assoc record for arid %d\n", u->arid); } /* We must not use regular free here, or later we could try to free the same area twice. That is, u_tmp contains keyed version of the pointers stored in tu. This releases only the Arr structures, but leaves the pointers to be freed later. I've never seen a better example of the need for a decent garbage collection system. */ freetbl(udregs,free_nothing); freearr(u_arr,free_nothing); }
float gsl_to_degrees (float angle) { return deg(angle); }
void inv(zz_pE& d, Mat<zz_pE>& X, const Mat<zz_pE>& A) { long n = A.NumRows(); if (A.NumCols() != n) LogicError("inv: nonsquare matrix"); if (n == 0) { set(d); X.SetDims(0, 0); return; } const zz_pXModulus& G = zz_pE::modulus(); zz_pX t1, t2; zz_pX pivot; zz_pX pivot_inv; Vec< Vec<zz_pX> > M; // scratch space M.SetLength(n); for (long i = 0; i < n; i++) { M[i].SetLength(n); for (long j = 0; j < n; j++) { M[i][j].SetMaxLength(2*deg(G)-1); M[i][j] = rep(A[i][j]); } } zz_pX det; det = 1; Vec<long> P; P.SetLength(n); for (long k = 0; k < n; k++) P[k] = k; // records swap operations zz_pContext zz_p_context; zz_p_context.save(); double sz = zz_pE_SizeInWords(); bool seq = double(n)*double(n)*sz*sz < PAR_THRESH; bool pivoting = false; for (long k = 0; k < n; k++) { long pos = -1; for (long i = k; i < n; i++) { rem(pivot, M[i][k], G); if (pivot != 0) { InvMod(pivot_inv, pivot, G); pos = i; break; } } if (pos != -1) { if (k != pos) { swap(M[pos], M[k]); negate(det, det); P[k] = pos; pivoting = true; } MulMod(det, det, pivot, G); { // multiply row k by pivot_inv zz_pX *y = &M[k][0]; for (long j = 0; j < n; j++) { rem(t2, y[j], G); MulMod(y[j], t2, pivot_inv, G); } y[k] = pivot_inv; } NTL_GEXEC_RANGE(seq, n, first, last) NTL_IMPORT(n) NTL_IMPORT(k) zz_p_context.restore(); zz_pX *y = &M[k][0]; zz_pX t1, t2; for (long i = first; i < last; i++) { if (i == k) continue; // skip row k zz_pX *x = &M[i][0]; rem(t1, x[k], G); negate(t1, t1); x[k] = 0; if (t1 == 0) continue; // add t1 * row k to row i for (long j = 0; j < n; j++) { mul(t2, y[j], t1); add(x[j], x[j], t2); } } NTL_GEXEC_RANGE_END } else { clear(d); return; } }
void SFCanZass(vec_ZZ_pX& factors, const ZZ_pX& ff, long verbose) { ZZ_pX f = ff; if (!IsOne(LeadCoeff(f))) LogicError("SFCanZass: bad args"); if (deg(f) == 0) { factors.SetLength(0); return; } if (deg(f) == 1) { factors.SetLength(1); factors[0] = f; return; } factors.SetLength(0); double t; const ZZ& p = ZZ_p::modulus(); ZZ_pXModulus F; build(F, f); ZZ_pX h; if (verbose) { cerr << "computing X^p..."; t = GetTime(); } PowerXMod(h, p, F); if (verbose) { cerr << (GetTime()-t) << "\n"; } vec_pair_ZZ_pX_long u; if (verbose) { cerr << "computing DDF..."; t = GetTime(); } NewDDF(u, f, h, verbose); if (verbose) { t = GetTime()-t; cerr << "DDF time: " << t << "\n"; } ZZ_pX hh; vec_ZZ_pX v; long i; for (i = 0; i < u.length(); i++) { const ZZ_pX& g = u[i].a; long d = u[i].b; long r = deg(g)/d; if (r == 1) { // g is already irreducible append(factors, g); } else { // must perform EDF if (d == 1) { // root finding RootEDF(v, g, verbose); append(factors, v); } else { // general case rem(hh, h, g); EDF(v, g, hh, d, verbose); append(factors, v); } } } }
/* This function saves particle motion parameter estimates to a extension table called mwpm. Individual station estimates and an array average estimate are all saved in the same table. they can be sorted out through the key field pmtype set to "ss" for single station and "aa" for array average. Arguments: array - array name used as tag on the array average row evid - css3.0 event id bankid - multiwavelet bank id tag phase - seismic phase name as in css3.0 fc - center frequency of band in hz. t0 - start time at reference station for particle motion analysis twin - length of analysis time window relative to t0 g - MWgather structure for this band. The routine loops through the list defined by this complicated structure. moveout - moveout vector. Elements of moveout are a parallel array to g->sta and related quanties in the gather structure. pmarr - particle motion structures array indexed by station name pmerrarr - particle motion error structure array indexed by station name pmavg - particle motion ellipse parameters for array average pmaerr - error parameters associated with pmavg db - output database Author: G Pavlis Written: march 2000 */ int MWdb_save_pm( char *array, int evid, int bankid, char *phase, double fc, double t0, double twin, MWgather *g, double *moveout, Arr *pmarr, Arr *pmerrarr, Particle_Motion_Ellipse *pmavg, Particle_Motion_Error *pmaerr, Dbptr db) { char *sta; int i; Particle_Motion_Ellipse *pm; Particle_Motion_Error *pmerr; int errcount=0; double time; int nsta; Spherical_Coordinate scoor; double majaz, majema, minaz, minema; db = dblookup(db,0,"mwpm",0,0); nsta = g->nsta; /* We look through the whole gather quietly skipping entries flagged bad with a null pointer */ for(i=0;i<nsta;++i) { pm = (Particle_Motion_Ellipse *)getarr(pmarr,g->sta[i]->sta); pmerr = (Particle_Motion_Error *)getarr(pmerrarr,g->sta[i]->sta); /* Silently skip null entries because autoediting makes this happen often. We could trap the condition where one of these pointers is null and the other is not, but this should not happen so I skip it.*/ if( (pm != NULL) && (pmerr != NULL) ) { /* We have to correct the start time for moveout. This asssumes the moveout vector has the current best estimate */ time = t0 + moveout[i]; /* The pm structure stores the major and minor axes as unit vectors. It is more compact and more intuitive to store these quantities in spherical coord form (az and ema) in the database so we have to convert them. NOte also all angles are stored internally in radians and need to be converted to degrees with the deg for external consumption. */ scoor = unit_vector_to_spherical(pm->major); /* Note azimuth in geographical coordinates is not the same as the phi angle in spherical coordinates used here. It is 90 - phi */ majaz = 90.0 - deg(scoor.phi); majema = deg(scoor.theta); scoor = unit_vector_to_spherical(pm->minor); minaz = 90.0 - deg(scoor.phi); minema = deg(scoor.theta); if( dbaddv(db,0, "sta",g->sta[i]->sta, "bankid",bankid, "fc",fc, "phase",phase, "evid",evid, "time",time, "twin",twin, "pmtype","ss", "majoraz",majaz, "majorema",majema, "minoraz",minaz, "minorema",minema, "rect",pm->rectilinearity, "errmajaz",deg(pmerr->dphi_major), "errmajema",deg(pmerr->dtheta_major), "errminaz",deg(pmerr->dphi_minor), "errminema",deg(pmerr->dtheta_minor), "errrect",pmerr->delta_rect, "majndgf",pmerr->ndgf_major, "minndgf",pmerr->ndgf_minor, "rectndgf",pmerr->ndgf_rect, "algorithm","mwap",0) < 0) { elog_notify(0,"dbaddv error in mwpm table for station %s\n",sta); ++errcount; } } } /* now we add a row for the array average. This is flagged only by the pmtype field. */ scoor = unit_vector_to_spherical(pmavg->major); majaz = 90.0 - deg(scoor.phi); majema = deg(scoor.theta); scoor = unit_vector_to_spherical(pmavg->minor); minaz = 90.0 - deg(scoor.phi); minema = deg(scoor.theta); if( dbaddv(db,0, "sta",array, "bankid",bankid, "fc",fc, "phase",phase, "evid",evid, "time",time, "twin",twin, "pmtype","aa", "majoraz",majaz, "majorema",majema, "minoraz",minaz, "minorema",minema, "rect",pmavg->rectilinearity, "errmajaz",deg(pmaerr->dphi_major), "errmajema",deg(pmaerr->dtheta_major), "errminaz",deg(pmaerr->dphi_minor), "errminema",deg(pmaerr->dtheta_minor), "errrect",pmaerr->delta_rect, "majndgf",pmaerr->ndgf_major, "minndgf",pmaerr->ndgf_minor, "rectndgf",pmaerr->ndgf_rect, "algorithm","mwap",0) < 0) { elog_notify(0,"dbaddv error saving array average particle motion parameters in mwpm table for evid %d\n", evid); ++errcount; } return(errcount); }
ZZ sumOfCoeffs(const ZZX& f) // = f(1) { ZZ sum = ZZ::zero(); for (long i=0; i<=deg(f); i++) sum += coeff(f,i); return sum; }
void BallMovement::bounce(bool collision) { if (stop_speed != 0) return; #ifdef CHOWDREN_IS_AVGN fix_position(); int direction = instance->direction; float angle = rad(direction * 11.25f); float found_a = -1.0f; for (float a = 0.0f; a < (CHOW_PI*2.0f); a += (CHOW_PI*2.0f) / 16.0f) { float x_move = 10.0f * cos(angle + a); float y_move = -10.0f * sin(angle + a); int x = instance->x + x_move; int y = instance->y + y_move; if (!test_position(x, y)) { found_a = a; break; } } if (found_a == -1.0f) { instance->set_direction((instance->direction + 16) % 32, false); return; } angle += found_a * 2.0f; if (angle > 2.0 * CHOW_PI) angle -= 2.0 * CHOW_PI; instance->set_direction(deg(angle) / 11.25f, false); if (back_col) instance->flags &= ~REPEAT_BACK_COLLISION; else instance->collision_flags = 0; #else add_x = add_y = 0; if (collision) { if (back_col) has_back_col = true; push_out(); } int x = instance->x; int y = instance->y; x -= 8; y -= 8; int rebond = 0; if (test_position(x, y)) rebond |= 0x01; x += 16; if (test_position(x, y)) rebond |= 0x02; y += 16; if (test_position(x, y)) rebond |= 0x04; x -= 16; if (test_position(x, y)) rebond |= 0x08; int value = rebond_list[rebond * 32 + instance->direction]; if (test_direction(value, 8)) { int angles = 4; int angles2 = angles; bool is_free = false; while (true) { value -= angles; value &= 31; if (!test_direction(value, 8)) { is_free = true; break; } value += 2 * angles; value &= 31; if (!test_direction(value, 8)) { is_free = true; break; } value -= angles; value &= 31; angles += angles2; if (angles <= 16) break; } if (!is_free) value = randrange(32); } int rnd = randrange(100); if (rnd < randomizer) { rnd >>= 2; if (rnd < 25) { rnd -= 12; rnd &= 31; if (!test_direction(rnd, 8)) value = rnd; } }
void debug_log_entity_args (entity_debug_modes mode, entity_debug_args arg, entity *en, ...) { va_list pargs; //////////////////////////////////////// // // trap remote args in single player game // //////////////////////////////////////// if (!debug_log_entity_args_enabled) { return; } if (mode == ENTITY_DEBUG_REMOTE) { if (direct_play_get_comms_mode () == DIRECT_PLAY_COMMS_MODE_NONE) { return; } } //////////////////////////////////////// // // sort debug log text // //////////////////////////////////////// va_start (pargs, en); switch (arg) { //////////////////////////////////////// case ENTITY_DEBUG_ATTITUDE_ANGLES: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, float heading, float pitch, float roll) // float heading, pitch, roll; heading = va_arg (pargs, double); pitch = va_arg (pargs, double); roll = va_arg (pargs, double); debug_log_text (mode, en, "attitude angles (h = %.3f, p = %.3f, r = %.3f)", deg (heading), deg (pitch), deg (roll)); break; } //////////////////////////////////////// case ENTITY_DEBUG_ATTITUDE_MATRIX: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, matrix3x3 attitude) // matrix3x3 *attitude; float heading, pitch, roll; attitude = va_arg (pargs, matrix3x3 *); ASSERT (attitude); heading = get_heading_from_attitude_matrix (*attitude); pitch = get_pitch_from_attitude_matrix (*attitude); roll = get_roll_from_attitude_matrix (*attitude); debug_log_text (mode, en, "attitude matrix (h = %.3f, p = %.3f, r = %.3f)", deg (heading), deg (pitch), deg (roll)); break; } //////////////////////////////////////// case ENTITY_DEBUG_CHAR_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, char_types type) // char_types type; type = va_arg (pargs, char_types); debug_log_text (mode, NULL, "char type = %s", get_char_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_CHAR_VALUE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, char_types type, char value) // char_types type; char value; type = va_arg (pargs, char_types); value = va_arg (pargs, int); debug_log_text (mode, en, "%s = %c", get_char_type_name (type), value); break; } //////////////////////////////////////// case ENTITY_DEBUG_CREATE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, entity_types type, int index) // entity_types type; int index; type = va_arg (pargs, entity_types); index = va_arg (pargs, int); debug_log_text (mode, NULL, "create %s (index = %d): ", get_entity_type_name (type), index); break; } //////////////////////////////////////// case ENTITY_DEBUG_DESTROY: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en) // debug_log_text (mode, en, "destroy"); break; } //////////////////////////////////////// case ENTITY_DEBUG_DESTROY_FAMILY: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en) // debug_log_text (mode, en, "destroy family"); break; } //////////////////////////////////////// case ENTITY_DEBUG_ENTITY_ATTRIBUTE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, entity_attributes attr) // entity_attributes attr; attr = va_arg (pargs, entity_attributes); debug_log_text (mode, NULL, "entity attribute = %s", get_entity_attribute_name (attr)); break; } //////////////////////////////////////// case ENTITY_DEBUG_ENTITY_COMMS_MESSAGE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, entity_comms_messages message) // entity_comms_messages message; message = va_arg (pargs, entity_comms_messages); debug_log_text (mode, NULL, "entity comms message = %s", get_entity_comms_message_name (message)); break; } //////////////////////////////////////// case ENTITY_DEBUG_ENTITY_INDEX: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, int index) // int index; index = va_arg (pargs, int); debug_log_text (mode, en, "entity index = %d", index); break; } //////////////////////////////////////// case ENTITY_DEBUG_ENTITY_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, entity_types type) // entity_types type; type = va_arg (pargs, entity_types); debug_log_text (mode, en, "entity type = %s", get_entity_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_FLOAT_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, float_types type) // float_types type; type = va_arg (pargs, float_types); debug_log_text (mode, NULL, "float type = %s", get_float_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_FLOAT_VALUE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, float_types type, float value) // float_types type; float value; type = va_arg (pargs, float_types); value = va_arg (pargs, double); debug_log_text (mode, en, "%s = %.3f", get_float_type_name (type), value); break; } //////////////////////////////////////// case ENTITY_DEBUG_INT_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, int_types type) // int_types type; type = va_arg (pargs, int_types); debug_log_text (mode, NULL, "int type = %s", get_int_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_INT_VALUE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, int_types type, int value) // int_types type; int value; type = va_arg (pargs, int_types); value = va_arg (pargs, int); debug_log_text (mode, en, "%s = %d", get_int_type_name (type), value); break; } //////////////////////////////////////// case ENTITY_DEBUG_KILL: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en) // debug_log_text (mode, en, "kill"); break; } //////////////////////////////////////// case ENTITY_DEBUG_LIST_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, list_types type) // list_types type; type = va_arg (pargs, list_types); debug_log_text (mode, NULL, "list type = %s", get_list_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_LIST_TYPE_CHILD_PRED: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, list_types type, entity *child_pred) // list_types type; entity *child_pred; int index; type = va_arg (pargs, list_types); child_pred = va_arg (pargs, entity *); index = get_local_entity_safe_index (child_pred); debug_log_text (mode, en, "%s child pred index = %d", get_list_type_name (type), index); break; } //////////////////////////////////////// case ENTITY_DEBUG_LIST_TYPE_CHILD_SUCC: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, list_types type, entity *child_succ) // list_types type; entity *child_succ; int index; type = va_arg (pargs, list_types); child_succ = va_arg (pargs, entity *); index = get_local_entity_safe_index (child_succ); debug_log_text (mode, en, "%s child succ index = %d", get_list_type_name (type), index); break; } //////////////////////////////////////// case ENTITY_DEBUG_LIST_TYPE_FIRST_CHILD: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, list_types type, entity *first_child) // list_types type; entity *first_child; int index; type = va_arg (pargs, list_types); first_child = va_arg (pargs, entity *); index = get_local_entity_safe_index (first_child); debug_log_text (mode, en, "%s first child index = %d", get_list_type_name (type), index); break; } //////////////////////////////////////// case ENTITY_DEBUG_LIST_TYPE_PARENT: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, list_types type, entity *parent) // list_types type; entity *parent; int index; type = va_arg (pargs, list_types); parent = va_arg (pargs, entity *); index = get_local_entity_safe_index (parent); debug_log_text (mode, en, "%s parent index = %d", get_list_type_name (type), index); break; } //////////////////////////////////////// case ENTITY_DEBUG_PTR_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, ptr_types type) // ptr_types type; type = va_arg (pargs, ptr_types); debug_log_text (mode, NULL, "ptr type = %s", get_ptr_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_PTR_VALUE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, ptr_types type, void *ptr) // ptr_types type; void *ptr; type = va_arg (pargs, ptr_types); ptr = va_arg (pargs, void *); debug_log_text (mode, en, "%s = 0x%x", get_ptr_type_name (type), ptr); break; } //////////////////////////////////////// case ENTITY_DEBUG_STRING_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, string_types type) // string_types type; type = va_arg (pargs, string_types); debug_log_text (mode, NULL, "string type = %s", get_string_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_STRING: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, string_types type, char *s) // string_types type; char *s; type = va_arg (pargs, string_types); s = va_arg (pargs, char *); ASSERT (s); debug_log_text (mode, en, "%s = %s", get_string_type_name (type), s); break; } //////////////////////////////////////// case ENTITY_DEBUG_VEC3D_TYPE: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, vec3d_types type) // vec3d_types type; type = va_arg (pargs, vec3d_types); debug_log_text (mode, NULL, "vec3d type = %s", get_vec3d_type_name (type)); break; } //////////////////////////////////////// case ENTITY_DEBUG_VEC3D: //////////////////////////////////////// { // // (entity_debug_modes mode, entity_debug_args arg, entity *en, vec3d_types type, vec3d *v) // vec3d_types type; vec3d *v; type = va_arg (pargs, vec3d_types); v = va_arg (pargs, vec3d *); ASSERT (v); if (type == VEC3D_TYPE_POSITION) { int x_sec, z_sec; get_x_sector (x_sec, v->x); get_z_sector (z_sec, v->z); debug_log_text (mode, en, "%s = (x = %.3f, y = %.3f, z = %.3f, x sec = %d, z sec = %d)", get_vec3d_type_name (type), v->x, v->y, v->z, x_sec, z_sec); } else { debug_log_text (mode, en, "%s = (x = %.3f, y = %.3f, z = %.3f)", get_vec3d_type_name (type), v->x, v->y, v->z); } break; } //////////////////////////////////////// default: //////////////////////////////////////// { debug_fatal ("Invalid entity debug arg = %d", arg); break; } } va_end (pargs); }
void CMonsterCoverManager::less_cover_direction(Fvector &dir) { float angle = ai().level_graph().vertex_cover_angle(m_object->ai_location().level_vertex_id(),deg(10), std::greater<float>()); collide::rq_result l_rq; float angle_from = angle_normalize(angle - ANGLE_DISP); float angle_to = angle_normalize(angle + ANGLE_DISP); Fvector trace_from; m_object->Center (trace_from); Fvector direction; // trace discretely left for (float ang = angle; angle_difference(ang, angle) < ANGLE_DISP; ang = angle_normalize(ang - ANGLE_DISP_STEP)) { direction.setHP (ang, 0.f); if (Level().ObjectSpace.RayPick(trace_from, direction, TRACE_STATIC_DIST, collide::rqtStatic, l_rq,m_object)) { if ((l_rq.range < TRACE_STATIC_DIST)) { angle_from = ang; break; } } } // trace discretely right for (float ang = angle; angle_difference(ang, angle) < ANGLE_DISP; ang = angle_normalize(ang + ANGLE_DISP_STEP)) { direction.setHP (ang, 0.f); if (Level().ObjectSpace.RayPick(trace_from, direction, TRACE_STATIC_DIST, collide::rqtStatic, l_rq,m_object)) { if ((l_rq.range < TRACE_STATIC_DIST)) { angle_to = ang; break; } } } angle = angle_normalize(angle_from + angle_difference(angle_from,angle_to) / 2); dir.setHP (angle,0.f); }
Slowness_Function_Output uniform_slowness_table_interpolate(Ray_Endpoints x, char *phase, int mode) { double delta, d_km; /* epicentral distance in radians and km respectively*/ double s2raz; /* source to receiver s2raz angle (radians) */ double uaz; /* slowness vector s2raz */ Slowness_Function_Output o; XZ_table_uniform *utable; double f_ll,f_hl,f_lh,f_hh; int ix_low, iz_low, ix_high, iz_high; int ix, iz; double x_low, z_low, x_high, z_high; double u; /* Slowness in s/km interpolated from tables */ int discontinuity; /* temporaries used to calculate slowness derivatives */ double sin_a, sin_a0, cos_a, cos_a0, dudz, dudr, du1, du2; /* First we compute the epicentral distance and source to receiver azimuth */ dist(rad(x.slat), rad(x.slon), rad(x.rlat), rad(x.rlon), &delta, &s2raz); d_km = delta*RADIUS_EARTH; /* Now compute the slowness vector azimuth */ dist(rad(x.rlat),rad(x.rlon),rad(x.slat),rad(x.slon),&delta,&uaz); uaz += M_PI; if(uaz >= 2.0*M_PI) uaz -= (2.0*M_PI); /* Look up the correct table for this phase */ utable = (XZ_table_uniform *) getarr(slow_tables_uniform,phase); if( utable == NULL ) { char e[80]; sprintf(e,"No travel time tables for phase %s",phase); o = set_slowness_table_error(e); return(o); } /* compute the indices assuming table is tabulated in degrees */ ix_low = (int) ((deg(delta)-utable->x0)/utable->dx); iz_low = (int) ((x.sz-utable->z0)/utable->dz); ix_high = ix_low + 1; iz_high = iz_low + 1; if( (ix_high >= utable->nx) || (iz_high >= utable->nz) || (ix_low <0) || (iz_low<0) ) { o = set_slowness_table_error("Requested point is outside table"); return(o); } x_low = ((double)ix_low)*((utable->dx))+utable->x0; x_high = ((double)ix_high)*((utable->dx))+utable->x0; z_low = ((double)iz_low)*((utable->dz))+utable->z0; z_high = ((double)iz_high)*((utable->dz))+utable->z0; f_ll = utable->values[ix_low][iz_low]; f_hl = utable->values[ix_high][iz_low]; f_lh = utable->values[ix_low][iz_high]; f_hh = utable->values[ix_high][iz_high]; discontinuity = check_discontinuity(utable->branch,ix_low, iz_low); switch (discontinuity) { case (NOWAY): o.ux = SLOWNESS_INVALID; o.uy = SLOWNESS_INVALID; o.duxdx = 0.0; o.duxdy = 0.0; o.duxdz = 0.0; o.duydx = 0.0; o.duydy = 0.0; o.duydz = 0.0; return(o); case NO_PROBLEM: case NO_PROBLEM_VALUE: u = serendipity(x_low, z_low, x_high, z_high, f_ll, f_hl, f_lh, f_hh, deg(delta), x.sz); break; case TWO_LOW: u = interpolate_discontinuity_twopoint(x_low, z_low, x_high, z_high, f_ll, f_lh, utable->slopes[ix_low][iz_low], utable->slopes[ix_low][iz_high], deg(delta), x.sz); break; case ONE_LL: u = interpolate_discontinuity_onepoint(x_low,f_ll, utable->slopes[ix_low][iz_low],deg(delta)); break; case ONE_LH: u = interpolate_discontinuity_onepoint(x_low,f_lh, utable->slopes[ix_low][iz_high],deg(delta)); } o.ux = u*sin(uaz); o.uy = u*cos(uaz); if(mode == RESIDUALS_ONLY) { o.duxdx = 0.0; o.duxdy = 0.0; o.duxdz = 0.0; o.duydx = 0.0; o.duydy = 0.0; o.duydz = 0.0; return(o); } /* Now we turn to the slowness derivatives. These require knowledge of both du/dr and angle terms. First, we need to determine dudr using the slopes field of the utable */ f_ll = utable->slopes[ix_low][iz_low]; f_hl = utable->slopes[ix_high][iz_low]; f_lh = utable->slopes[ix_low][iz_high]; f_hh = utable->slopes[ix_high][iz_high]; switch (discontinuity) { case (NOWAY): o.ux = SLOWNESS_INVALID; o.uy = SLOWNESS_INVALID; o.duxdx = 0.0; o.duxdy = 0.0; o.duxdz = 0.0; o.duydx = 0.0; o.duydy = 0.0; o.duydz = 0.0; return(o); case NO_PROBLEM: dudr = serendipity(x_low, z_low, x_high, z_high, f_ll, f_hl, f_lh, f_hh, deg(delta), x.sz); break; default: /* When one of the points contains a discontinuity, we simply hunt through the four nearest neighbor points in a fixed order and return the nearest valid point. This is fine for dudr since this quantity is usually rather small anyway. */ ix = rint((deg(delta)-utable->x0)/utable->dx); iz = rint((x.sz-utable->z0)/utable->dz); if(ix==ix_low) { if(slopes_valid(utable->branch[ix_low][iz])) dudr = utable->slopes[ix_low][iz]; else { if(slopes_valid(utable->branch[ix_high][iz])) dudr = utable->slopes[ix_high][iz]; else { if(iz==iz_low) if(slopes_valid(utable->branch[ix_low][iz_high])) dudr = utable->slopes[ix_low][iz_high]; else dudr = utable->slopes[ix_high][iz_high]; else if(slopes_valid(utable->branch[ix_low][iz_low])) dudr = utable->slopes[ix_low][iz_low]; else dudr = utable->slopes[ix_high][iz_low]; } } } else { if(slopes_valid(utable->branch[ix_high][iz])) dudr = utable->slopes[ix_high][iz]; else { if(slopes_valid(utable->branch[ix_low][iz])) dudr = utable->slopes[ix_low][iz]; else { if(iz==iz_low) if(slopes_valid(utable->branch[ix_high][iz_high]) ) dudr = utable->slopes[ix_high][iz_high]; else dudr = utable->slopes[ix_low][iz_high]; else if(slopes_valid(utable->branch[ix_high][iz_low]) ) dudr = utable->slopes[ix_high][iz_low]; else dudr = utable->slopes[ix_low][iz_low]; } } } } sin_a0 = sin(s2raz); cos_a0 = cos(s2raz); sin_a = sin(uaz); cos_a = cos(uaz); o.duxdx = -sin_a*sin_a0*dudr - u*cos_a*cos_a0/d_km; o.duxdy = -sin_a*cos_a0*dudr + u*cos_a*sin_a0/d_km; o.duydx = -cos_a*sin_a0*dudr + u*sin_a*cos_a0/d_km; o.duydy = -cos_a*cos_a0*dudr - u*sin_a*sin_a0/d_km; /*All that is left is the calculation of du/dz. du/dz is important only when the epicentral distance is close, but nonzero. (oddly du/dz is 0 at zero offset). We compute du/dz here by a simple finite difference from three depth points in the grid centered on the closest grid point in epicentral distance. If these points are not on the same travel time branch we just set dudz to zero. This may not always be appropriate, but the philosphy is why throw out the baby with the bathwater especially if the bathwater isn't that dirty. */ ix = rint((deg(delta)-utable->x0)/utable->dx); iz = rint((x.sz-utable->z0)/utable->dz); if(iz == 0) { /* We can only compute a forward difference in this case */ if( slopes_valid(utable->branch[ix][iz]) && slopes_valid(utable->branch[ix][iz+1]) ) dudz = (utable->values[ix][iz+1] - utable->values[ix][iz])/utable->dz; else dudz = 0.0; } else if(iz >= ((utable->nz)-1) ) /* Can only compute a backward difference in this case */ if( slopes_valid(utable->branch[ix][iz]) && slopes_valid(utable->branch[ix][iz-1]) ) dudz = (utable->values[ix][iz] - utable->values[ix][iz-1])/utable->dz; else dudz = 0.0; else { du1 = (utable->values[ix][iz] - utable->values[ix][iz-1]); du2 = (utable->values[ix][iz+1] - utable->values[ix][iz]); if( slopes_valid(utable->branch[ix][iz]) && slopes_valid(utable->branch[ix][iz-1]) && slopes_valid(utable->branch[ix][iz+1]) ) dudz = (du1/utable->dz + du2/utable->dz)/2.0; else if( ! slopes_valid(utable->branch[ix][iz])) dudz = 0.0; else if( slopes_valid(utable->branch[ix][iz+1]) ) dudz = du2/utable->dz; else dudz = du1/utable->dz; } o.duxdz = dudz*sin_a; o.duydz = dudz*cos_a; return(o); }
int main(int argc, char *argv[]) { if(argc < 5){ PrintHelp(); exit(0); } string mode = argv[1]; /// Modo de execucao string fN = argv[2]; /// Nome do arquivo .yuv int sx = atoi(argv[3]); /// Numero de colunas int sy = atoi(argv[4]); /// Numero de linhas int yuv = atoi(argv[5]); /// Tipo do arquivo .yuv - Ver definicoes em global.h Loader loadedFile(fN,sx,sy,yuv); /// load original string deg = "block"; /// type of degradation if(mode == "train"){ float DMOS = atof(argv[6]); /// DMOS do arquivo .yuv int fw = atoi(argv[7]); /// Frames em uma codeword int xw = atoi(argv[8]); /// Numero de colunas de uma codeword int yw = atoi(argv[9]); /// Numero de linhas de uma codeword loadedFile.writeCodebook("codebook.txt",DMOS,fw,xw,yw); }else if(mode == "predict"){ int K = atoi(argv[6]); /// Parametro K do K-NN int fw = atoi(argv[7]); /// Frames em uma codeword int xw = atoi(argv[8]); /// Numero de colunas de uma codeword int yw = atoi(argv[9]); /// Numero de linhas de uma codeword float predicted_MOS; predicted_MOS = loadedFile.predictMOS("codebook.txt",K,fw,xw,yw); loadedFile.compareLIVE(predicted_MOS); }else if(mode == "features"){ int fw = atoi(argv[6]); /// Frames em uma codeword int xw = atoi(argv[7]); /// Numero de colunas de uma codeword int yw = atoi(argv[8]); /// Numero de linhas de uma codeword string s(argv[9]); loadedFile.printFeatures(s,fw,xw,yw); }else if(mode == "metrics"){ float DMOS = atof(argv[6]); //loadedFile.callMetrics(); loadedFile.callMetrics2(DMOS); }else if(mode == "video"){ int frame_atual = 0; int total_frames = loadedFile.getTotalFrameNr(); while(true){ loadedFile.callDebug(frame_atual); int c = cvWaitKey(20); if((char)c==27) /// Tecla ESC break; if(((char)c==104) && frame_atual > 1) /// Tecla 'h' frame_atual--; if(((char)c==108) && (frame_atual < (total_frames-1))) /// Tecla 'l' frame_atual++; if(((char)c==106)) /// Tecla 'j' loadedFile.degradeFrame(frame_atual, deg); } }else if(mode == "degrade") { std::string output1(argv[6]); /// 1o argumento -- nome do video de saida std::string output; /// string de auxilio para escrita do nome do video de saida ///printf(argv[6]); string deg(argv[7]); /// 2o argumento -- tipo de degradacao int videonumber = atoi(argv[8]); /// number of this video printf("Generating the %d-th video with degradation %s and scaling factors",videonumber,argv[7]); double scale[3]; /// valores das escalas = 3 para blockyblurry --> atualizar depois!!! for (int i = 1; i<=3;i++) { scale[i] = atof(argv[8+i]); /// read all the scaling factors printf("%f ", scale[i]); /// printing on the screen } printf("\n"); char numstr[21]; sprintf(numstr, "%d.yuv", videonumber); /// builds the name of the output video using the number variable output = output1 + numstr; loadedFile.degradecombineVideo(output, deg, scale); /// calls the algorithm from the loader.cpp that degrades the original } else { printf("Invalid Option \n"); PrintHelp(); exit(0); } return 0; }
inline void Polynomial<T, Structure>::pad_zeros(size_type size) { for (size_type i = deg(); i < size; ++i) { m_coefficients.push_back(m_structure->zero()); } }
long IterIrredTest(const ZZ_pX& f) { if (deg(f) <= 0) return 0; if (deg(f) == 1) return 1; ZZ_pXModulus F; build(F, f); ZZ_pX h; PowerXMod(h, ZZ_p::modulus(), F); long CompTableSize = 2*SqrRoot(deg(f)); ZZ_pXArgument H; build(H, h, F, CompTableSize); long i, d, limit, limit_sqr; ZZ_pX g, X, t, prod; SetX(X); i = 0; g = h; d = 1; limit = 2; limit_sqr = limit*limit; set(prod); while (2*d <= deg(f)) { sub(t, g, X); MulMod(prod, prod, t, F); i++; if (i == limit_sqr) { GCD(t, f, prod); if (!IsOne(t)) return 0; set(prod); limit++; limit_sqr = limit*limit; i = 0; } d = d + 1; if (2*d <= deg(f)) { CompMod(g, g, H, F); } } if (i > 0) { GCD(t, f, prod); if (!IsOne(t)) return 0; } return 1; }
void KeySwitch::verify(FHESecKey& sk) { long fromSPower = fromKey.getPowerOfS(); long fromXPower = fromKey.getPowerOfX(); long fromIdx = fromKey.getSecretKeyID(); long toIdx = toKeyID; long p = ptxtSpace; long n = b.size(); cout << "KeySwitch::verify\n"; cout << "fromS = " << fromSPower << " fromX = " << fromXPower << " fromIdx = " << fromIdx << " toIdx = " << toIdx << " p = " << p << " n = " << n << "\n"; if (fromSPower != 1 || fromXPower != 1 || (fromIdx == toIdx) || n == 0) { cout << "KeySwitch::verify: these parameters not checkable\n"; return; } const FHEcontext& context = b[0].getContext(); // we don't store the context in the ks matrix, so let's // check that they are consistent for (long i = 0; i < n; i++) { if (&context != &(b[i].getContext())) cout << "KeySwitch::verify: bad context " << i << "\n"; } cout << "context.ctxtPrimes = " << context.ctxtPrimes << "\n"; cout << "context.specialPrimes = " << context.specialPrimes << "\n"; IndexSet allPrimes = context.ctxtPrimes | context.specialPrimes; cout << "digits: "; for (long i = 0; i < n; i++) cout << context.digits[i] << " "; cout << "\n"; cout << "IndexSets of b: "; for (long i = 0; i < n; i++) cout << b[i].getMap().getIndexSet() << " "; cout << "\n"; // VJS: suspicious shadowing of fromKey, toKey const DoubleCRT& _fromKey = sk.sKeys.at(fromIdx); const DoubleCRT& _toKey = sk.sKeys.at(toIdx); cout << "IndexSet of fromKey: " << _fromKey.getMap().getIndexSet() << "\n"; cout << "IndexSet of toKey: " << _toKey.getMap().getIndexSet() << "\n"; vector<DoubleCRT> a; a.resize(n, DoubleCRT(context, allPrimes)); // defined modulo all primes { RandomState state; SetSeed(prgSeed); for (long i = 0; i < n; i++) a[i].randomize(); } // the RandomState destructor "restores the state" (see NumbTh.h) vector<ZZX> A, B; A.resize(n); B.resize(n); for (long i = 0; i < n; i++) { a[i].toPoly(A[i]); b[i].toPoly(B[i]); } ZZX FromKey, ToKey; _fromKey.toPoly(FromKey, allPrimes); _toKey.toPoly(ToKey, allPrimes); ZZ Q = context.productOfPrimes(allPrimes); ZZ prod = context.productOfPrimes(context.specialPrimes); ZZX C, D; ZZX PhimX = context.zMStar.getPhimX(); long nb = 0; for (long i = 0; i < n; i++) { C = (B[i] - FromKey*prod + ToKey*A[i]) % PhimX; PolyRed(C, Q); if (!divide(D, C, p)) { cout << "*** not divisible by p at " << i << "\n"; } else { for (long j = 0; j <= deg(D); j++) if (NumBits(coeff(D, j)) > nb) nb = NumBits(coeff(D, j)); } prod *= context.productOfPrimes(context.digits[i]); } cout << "error ratio: " << ((double) nb)/((double) NumBits(Q)) << "\n"; }
void SFBerlekamp(vec_ZZ_pX& factors, const ZZ_pX& ff, long verbose) { ZZ_pX f = ff; if (!IsOne(LeadCoeff(f))) LogicError("SFBerlekamp: bad args"); if (deg(f) == 0) { factors.SetLength(0); return; } if (deg(f) == 1) { factors.SetLength(1); factors[0] = f; return; } double t; const ZZ& p = ZZ_p::modulus(); long n = deg(f); ZZ_pXModulus F; build(F, f); ZZ_pX g, h; if (verbose) { cerr << "computing X^p..."; t = GetTime(); } PowerXMod(g, p, F); if (verbose) { cerr << (GetTime()-t) << "\n"; } vec_long D; long r; vec_ZZVec M; if (verbose) { cerr << "building matrix..."; t = GetTime(); } BuildMatrix(M, n, g, F, verbose); if (verbose) { cerr << (GetTime()-t) << "\n"; } if (verbose) { cerr << "diagonalizing..."; t = GetTime(); } NullSpace(r, D, M, verbose); if (verbose) { cerr << (GetTime()-t) << "\n"; } if (verbose) cerr << "number of factors = " << r << "\n"; if (r == 1) { factors.SetLength(1); factors[0] = f; return; } if (verbose) { cerr << "factor extraction..."; t = GetTime(); } vec_ZZ_p roots; RandomBasisElt(g, D, M); MinPolyMod(h, g, F, r); if (deg(h) == r) M.kill(); FindRoots(roots, h); FindFactors(factors, f, g, roots); ZZ_pX g1; vec_ZZ_pX S, S1; long i; while (factors.length() < r) { if (verbose) cerr << "+"; RandomBasisElt(g, D, M); S.kill(); for (i = 0; i < factors.length(); i++) { const ZZ_pX& f = factors[i]; if (deg(f) == 1) { append(S, f); continue; } build(F, f); rem(g1, g, F); if (deg(g1) <= 0) { append(S, f); continue; } MinPolyMod(h, g1, F, min(deg(f), r-factors.length()+1)); FindRoots(roots, h); S1.kill(); FindFactors(S1, f, g1, roots); append(S, S1); } swap(factors, S); } if (verbose) { cerr << (GetTime()-t) << "\n"; } if (verbose) { cerr << "degrees:"; long i; for (i = 0; i < factors.length(); i++) cerr << " " << deg(factors[i]); cerr << "\n"; } }
void save_assoc(Dbptr dbi, long is, long ie, long orid, char *vmodel, Tbl *residual,Hypocenter h, Dbptr dbo) { /* These fields are copied from input assoc table */ long arid; char sta[8]; char phase[10]; double belief; /* These fields are set here */ double delta; double seaz; double esaz; double timeres; double azres; double slores; double lddate; double wgt; char timedef[2],slodef[2], azdef[2]; /* intentionally ignored: emares, commid */ /* passed through arg list; orid*/ /* We use this to produce a keyed arr list of the residual list passed into here as a Tbl */ Arr *residual_array; long i; char key[40]; double r, w, reswt,uxresid, uyresid; double stalat, stalon; double ux, uy, azimuth; double u,phi; /* polar form of measured slowness vector */ double duphi; dbo = dblookup(dbo,0,"assoc",0,0); lddate = now(); /* We build an associate array for the residual tbl keying each entry with a sta/phase/type key where type is set in ggnloc as time, ux, or uy. This complication is needed to sort out array residuals. */ residual_array = newarr(0); for(i=0;i<maxtbl(residual);++i) { char *s; char keysta[10], keyphase[10], keytype[5]; s = (char *)gettbl(residual,i); sscanf(s,"%s %s %s",keysta,keyphase,keytype); /* handle S-P case by having the same residual mapped to each half of - phase pair */ if(strchr(keyphase,'-')) { char *phase1,*phase2; /* algorithm to split phase names cloned from dbgenloc */ phase1 = strdup(keyphase); phase2= strchr(phase1,'-'); *phase2 = '\0'; ++phase2; sprintf(key,"%s %s %s",keysta,phase1,keytype); setarr(residual_array,key,s); sprintf(key,"%s %s %s",keysta,phase2,keytype); setarr(residual_array,key,s); free(phase1); } else { /* normal phases are one to one */ sprintf(key,"%s %s %s",keysta,keyphase,keytype); setarr(residual_array,key,s); } } for(dbi.record=is;dbi.record < ie;++dbi.record) { char *time_residual_record; char *ux_residual_record,*uy_residual_record; if( dbgetv(dbi,0, "assoc.arid",&arid, "assoc.sta",sta, "assoc.phase",phase, "assoc.belief",&belief, NULL) == dbINVALID) { die(1,"save_assoc: dbgetv error reading assoc fields of input view at record %ld\n", dbi.record); } if( dbgetv(dbi,0, "site.lat",&stalat, "site.lon",&stalon, NULL) == dbINVALID) { die(1,"save_assoc: dbgetv error reading site fields of input view at record %ld\n", dbi.record); } /* Find the time residual record for this arrival */ sprintf(key,"%s %s time",sta,phase); time_residual_record = (char *)getarr(residual_array,key); if(time_residual_record == NULL) { complain(1,"save_assoc: getarr mismatch for key %s\nCannot set residual\n",key); timeres = TIMENULL; wgt = 0.0; strcpy(timedef,"n"); } else { /* Changed by JN to avoid gcc warning */ /* sscanf(time_residual_record,"%*s%*s%*s%*lg%lg%lg%lg", */ sscanf(time_residual_record,"%*s%*s%*s%*g%lg%lg%lg", &r,&w,&reswt); timeres = r; wgt = w*reswt; strcpy(timedef,"d"); } sprintf(key,"%s %s ux",sta,phase); ux_residual_record = (char *)getarr(residual_array,key); sprintf(key,"%s %s uy",sta,phase); uy_residual_record = (char *)getarr(residual_array,key); /* Corrected by JN if( (ux_residual_record == NULL) || (ux_residual_record == NULL)) */ if( (ux_residual_record == NULL) || (uy_residual_record == NULL)) { /* This trick is not documented. By setting the record filed to dbNULL, and then calling dbgetv each of the fields will be set to their NULL value */ dbo.record = dbNULL; dbgetv(dbo,0,"azres",&azres,"slores",&slores,NULL ); strcpy(azdef,"n"); strcpy(slodef,"n"); } else { /* This gets nasty because we have to convert to polar coordinates from ux, uy components */ sscanf(ux_residual_record,"%*s%*s%*s%*g%lg",&uxresid); sscanf(uy_residual_record,"%*s%*s%*s%*g%lg",&uyresid); /* We fetch the measured slowness vector to convert */ if( dbgetv(dbi,0, "arrival.slow",&u, "arrival.azimuth",&phi, NULL) == dbINVALID) { die(1,"save_assoc: dbgetv error reading arrival fields of input view at record %ld\n", dbi.record); } /* css stores slowness in s/deg, but we use s/km internally here so we have to convert */ slores = sqrt(uxresid*uxresid+uyresid*uyresid); slores *= KMPERDEG; /* this is the azimuth term */ u /= KMPERDEG; duphi = ux*cos(rad(azimuth)) - uy*sin(rad(azimuth)); duphi /= u; azres = deg(duphi); strcpy(azdef,"d"); strcpy(slodef,"d"); } dist(rad(h.lat),rad(h.lon),rad(stalat),rad(stalon), &delta,&esaz); dist(rad(stalat),rad(stalon),rad(h.lat),rad(h.lon), &delta,&seaz); delta = deg(delta); seaz = deg(seaz); esaz = deg(esaz); if(dbaddv(dbo,0, "arid",arid, "orid",orid, "sta",sta, "phase",phase, "belief",belief, "delta",delta, "seaz",seaz, "esaz",esaz, "timeres",timeres, "timedef",timedef, "azres",azres, "azdef",azdef, "slores",slores, "slodef",slodef, "wgt",wgt, "vmodel",vmodel, "lddate",lddate, NULL ) == dbINVALID) { die(1,"save_assoc: dbaddv error writing assoc record for arid %ld\n", arid); } } freearr(residual_array,0); }
void DDF(vec_pair_ZZ_pX_long& factors, const ZZ_pX& ff, const ZZ_pX& hh, long verbose) { ZZ_pX f = ff; ZZ_pX h = hh; if (!IsOne(LeadCoeff(f))) LogicError("DDF: bad args"); factors.SetLength(0); if (deg(f) == 0) return; if (deg(f) == 1) { AddFactor(factors, f, 1, verbose); return; } long CompTableSize = 2*SqrRoot(deg(f)); long GCDTableSize = ZZ_pX_BlockingFactor; ZZ_pXModulus F; build(F, f); ZZ_pXArgument H; build(H, h, F, min(CompTableSize, deg(f))); long i, d, limit, old_n; ZZ_pX g, X; vec_ZZ_pX tbl(INIT_SIZE, GCDTableSize); SetX(X); i = 0; g = h; d = 1; limit = GCDTableSize; while (2*d <= deg(f)) { old_n = deg(f); sub(tbl[i], g, X); i++; if (i == limit) { ProcessTable(f, factors, F, i, tbl, d, verbose); i = 0; } d = d + 1; if (2*d <= deg(f)) { // we need to go further if (deg(f) < old_n) { // f has changed build(F, f); rem(h, h, f); rem(g, g, f); build(H, h, F, min(CompTableSize, deg(f))); } CompMod(g, g, H, F); } } ProcessTable(f, factors, F, i, tbl, d-1, verbose); if (!IsOne(f)) AddFactor(factors, f, deg(f), verbose); }
void DDF(vec_pair_GF2X_long& factors, const GF2X& ff, long verbose) { GF2X f = ff; if (IsZero(f)) LogicError("DDF: bad args"); factors.SetLength(0); if (deg(f) == 0) return; if (deg(f) == 1) { AddFactor(factors, f, 1, verbose); return; } long GCDTableSize = GF2X_BlockingFactor; GF2XModulus F; build(F, f); long i, d, limit, old_n; GF2X g, X; vec_GF2X tbl(INIT_SIZE, GCDTableSize); SetX(X); i = 0; SqrMod(g, X, F); d = 1; limit = GCDTableSize; while (2*d <= deg(f)) { old_n = deg(f); add(tbl[i], g, X); i++; if (i == limit) { ProcessTable(f, factors, F, i, tbl, d, verbose); i = 0; } d = d + 1; if (2*d <= deg(f)) { // we need to go further if (deg(f) < old_n) { // f has changed build(F, f); rem(g, g, F); } SqrMod(g, g, F); } } ProcessTable(f, factors, F, i, tbl, d-1, verbose); if (!IsOne(f)) AddFactor(factors, f, deg(f), verbose); }
// Note: poly is passed by value, not by reference, so the calling routine // keeps its original polynomial long evalPolyTopLevel(ZZX poly, long x, long p, long k=0) { if (verbose) cerr << "\n* evalPolyTopLevel: p="<<p<<", x="<<x<<", poly="<<poly; if (deg(poly)<=2) { // nothing to optimize here if (deg(poly)<1) return to_long(coeff(poly, 0)); DynamicPtxtPowers babyStep(x, p, deg(poly)); long ret = simplePolyEval(poly, babyStep, p); totalDepth = babyStep.getDepth(deg(poly)); return ret; } // How many baby steps: set k~sqrt(n/2), rounded up/down to a power of two // FIXME: There may be some room for optimization here: it may be possible // to choose k as something other than a power of two and still maintain // optimal depth, in principle we can try all possible values of k between // the two powers of two and choose the one that goves the least number // of multiplies, conditioned on minimum depth. if (k<=0) { long kk = (long) sqrt(deg(poly)/2.0); k = 1L << NextPowerOfTwo(kk); // heuristic: if k>>kk then use a smaler power of two if ((k==16 && deg(poly)>167) || (k>16 && k>(1.44*kk))) k /= 2; } cerr << ", k="<<k; long n = divc(deg(poly),k); // deg(p) = k*n +delta if (verbose) cerr << ", n="<<n<<endl; DynamicPtxtPowers babyStep(x, p, k); long x2k = babyStep.getPower(k); // Special case when deg(p)>k*(2^e -1) if (n==(1L << NextPowerOfTwo(n))) { // n is a power of two DynamicPtxtPowers giantStep(x2k, p, n/2, babyStep.getDepth(k)); if (verbose) cerr << "babyStep="<<babyStep<<", giantStep="<<giantStep<<endl; long ret = degPowerOfTwo(poly, k, babyStep, giantStep, p, totalDepth); if (verbose) { cerr << " degPowerOfTwo("<<poly<<") returns "<<ret<<", depth="<<totalDepth<<endl; if (ret != polyEvalMod(poly,babyStep[0], p)) { cerr << " ## recursive call failed, ret="<<ret<<"!=" << polyEvalMod(poly,babyStep[0], p)<<endl; exit(0); } // cerr << " babyStep depth=["; // for (long i=0; i<babyStep.size(); i++) // cerr << babyStep.getDepth(i+1)<<" "; // cerr << "]\n"; // cerr << " giantStep depth=["; // for (long i=0; i<giantStep.size(); i++) // cerr<<giantStep.getDepth(i+1)<<" "; // cerr << "]\n"; } return ret; } // If n is not a power of two, ensure that poly is monic and that // its degree is divisible by k, then call the recursive procedure ZZ topInv; // the inverse mod p of the top coefficient of poly (if any) bool divisible = (n*k == deg(poly)); // is the degree divisible by k? long nonInvertibe = InvModStatus(topInv, LeadCoeff(poly), to_ZZ(p)); // 0 if invertible, 1 if not // FIXME: There may be some room for optimization below: instead of // adding a term X^{n*k} we can add X^{n'*k} for some n'>n, so long // as n' is smaller than the next power of two. We could save a few // multiplications since giantStep[n'] may be easier to compute than // giantStep[n] when n' has fewer 1's than n in its binary expansion. long extra = 0; // extra!=0 denotes an added term extra*X^{n*k} if (!divisible || nonInvertibe) { // need to add a term // set extra = 1 - current-coeff-of-X^{n*k} extra = SubMod(1, to_long(coeff(poly,n*k)), p); SetCoeff(poly, n*k); // set the top coefficient of X^{n*k} to one topInv = to_ZZ(1); // inverse of new top coefficient is one } long t = (extra==0)? divc(n,2) : n; DynamicPtxtPowers giantStep(x2k, p, t, babyStep.getDepth(k)); if (verbose) cerr << "babyStep="<<babyStep<<", giantStep="<<giantStep<<endl; long y; // the value to return long subDepth1 =0; if (!IsOne(topInv)) { long top = to_long(poly[n*k]); // record the current top coefficient // cerr << ", top-coeff="<<top; // Multiply by topInv modulo p to make into a monic polynomial poly *= topInv; for (long i=0; i<=n*k; i++) rem(poly[i], poly[i], to_ZZ(p)); poly.normalize(); y = recursivePolyEval(poly, k, babyStep, giantStep, p, subDepth1); if (verbose) { cerr << " recursivePolyEval("<<poly<<") returns "<<y<<", depth="<<subDepth1<<endl; if (y != polyEvalMod(poly,babyStep[0], p)) { cerr << "## recursive call failed, ret="<<y<<"!=" << polyEvalMod(poly,babyStep[0], p)<<endl; exit(0); } } y = MulMod(y, top, p); // multiply by the original top coefficient } else { y = recursivePolyEval(poly, k, babyStep, giantStep, p, subDepth1); if (verbose) { cerr << " recursivePolyEval("<<poly<<") returns "<<y<<", depth="<<subDepth1<<endl; if (y != polyEvalMod(poly,babyStep[0], p)) { cerr << "## recursive call failed, ret="<<y<<"!=" << polyEvalMod(poly,babyStep[0], p)<<endl; exit(0); } } } if (extra != 0) { // if we added a term, now is the time to subtract back if (verbose) cerr << ", subtracting "<<extra<<"*X^"<<k*n; extra = MulMod(extra, giantStep.getPower(n), p); totalDepth = max(subDepth1, giantStep.getDepth(n)); y = SubMod(y, extra, p); } else totalDepth = subDepth1; if (verbose) cerr << endl; return y; }