bool PreconditionedDownhillType::improve_energy(bool verbose) { iter++; const double old_energy = energy(); const VectorXd g = pgrad(); // Let's immediately free the cached gradient stored internally! invalidate_cache(); // We waste some memory storing newx (besides *x itself), but this // avoids roundoff weirdness of trying to add nu*g back to *x, which // won't always get us back to the same value. Grid newx(gd, *x - nu*g); double newE = f.integral(kT, newx); int num_tries = 0; while (better(old_energy,newE)) { nu *= 0.5; newx = *x - nu*g; newE = f.integral(kT, newx); if (num_tries++ > 40) { printf("PreconditionedDownhill giving up after %d tries...\n", num_tries); return false; // It looks like we can't do any better with this algorithm. } } *x = newx; invalidate_cache(); nu *= 1.1; if (verbose) { //lm->print_info(); print_info(); } return true; }
void Medial_explore_3::application_settings_changed(const QString& name) { if (name == "medial-explore-front-steps" || name == "debug-medial-explore-front") { if (Application_settings::get_bool_setting("debug-medial-explore-front")) { invalidate_cache(); do_widget_repaint(); } } if (name == "medial-surface-boundary-smoothing-steps") { invalidate_cache(); has_mat = false; do_widget_repaint(); } }
/*FUNCTION*------------------------------------------------------------------- * * Function Name : ftfl_ram_function * Returned Value : void * Comments : * Code required to run in SRAM to perform flash commands. * All else can be run in flash. * Parameter is an address of flash status register and function to invalidate cache. * *END*-----------------------------------------------------------------------*/ static void ftfl_ram_function ( /* [IN] Flash info structure */ volatile uint8_t *ftfl_fstat_ptr, /* [IN] Pointer to function of invalidate cache*/ void (* invalidate_cache)(volatile uint32_t) ) { /* start flash write */ *ftfl_fstat_ptr |= FTFL_FSTAT_CCIF_MASK; /* wait until execution complete */ while (0 == ((*ftfl_fstat_ptr) & FTFL_FSTAT_CCIF_MASK)) { /* void */ } if(invalidate_cache != NULL) { invalidate_cache((uint32_t)FLASHX_INVALIDATE_CACHE_ALL); } /* Flush the pipeline and ensures that all previous instructions are completed * before executing new instructions in flash */ #ifdef ISB ISB(); #endif #ifdef DSB DSB(); #endif }
static void on_objects_added (ECalClientView *view, GSList *objects, gpointer user_data) { App *app = user_data; GSList *l; print_debug ("%s for calendar", G_STRFUNC); for (l = objects; l != NULL; l = l->next) { icalcomponent *ical = l->data; const char *uid; uid = icalcomponent_get_uid (ical); if (g_hash_table_lookup (app->appointments, uid) == NULL) { /* new appointment we don't know about => changed signal */ invalidate_cache (app); app_schedule_changed (app); } } }
/* * handler for button to switch to day view. */ extern void day_button (Widget widget, XtPointer data, XtPointer cbs) { Calendar *c = calendar; if (c->view->glance == dayGlance) return; XtUnmapWidget(c->canvas); invalidate_cache(c); switch (c->view->glance) { case weekGlance: c->view->glance = dayGlance; cleanup_after_weekview(c); break; case yearGlance: c->view->glance = dayGlance; cleanup_after_yearview(c); break; case monthGlance: c->view->glance = dayGlance; cleanup_after_monthview(c); break; default: break; } init_mo(c); (void)init_dayview(c); XtMapWidget(c->canvas); }
/* * Let timestamp.c know that we are suspending. It needs to take * snapshots of the current time, and do any pre-suspend work. */ void tsc_suspend(void) { /* * What we need to do here, is to get the time we suspended, so that we * know how much we should add to the resume. * This routine is called by each CPU, so we need to handle reentry. */ if (tsc_gethrtime_enable) { /* * We put the tsc_read() inside the lock as it * as no locking constraints, and it puts the * aquired value closer to the time stamp (in * case we delay getting the lock). */ mutex_enter(&tod_lock); tsc_saved_tsc = tsc_read(); tsc_saved_ts = TODOP_GET(tod_ops); mutex_exit(&tod_lock); /* We only want to do this once. */ if (tsc_needs_resume == 0) { if (tsc_delta_onsuspend) { tsc_adjust_delta(tsc_saved_tsc); } else { tsc_adjust_delta(nsec_scale); } tsc_suspend_count++; } } invalidate_cache(); tsc_needs_resume = 1; }
void Medial_explore_3::receive_structure_changed(const std::string& name) { if (name == SHARED_MOFF_STRING) { std::cout << LOG_GREEN << "New moff string" << std::endl; invalidate_cache(); has_mat = false; } }
void Mesh_view_3::load_generic_file(const std::string& filename) { if (QString(filename.c_str()).endsWith(".off")) { std::ifstream f(filename.c_str()); p.clear(); f >> p; f.close(); Polyhedron::Vertex_iterator v_it, v_end = p.vertices_end(); double minx, miny, minz, maxx, maxy, maxz; for (v_it = p.vertices_begin(); v_it!=v_end; ++v_it) { double x = v_it->point().x(); double y = v_it->point().y(); double z = v_it->point().z(); if (v_it == p.vertices_begin()) { minx = maxx = x; miny = maxy = y; minz = maxz = z; } else { if (x < minx) minx = x; if (x > maxx) maxx =x; if (y < miny) miny = y; if (y > maxy) maxy =y; if (z < minz) minz = z; if (z > maxz) maxz =z; } } std::cout << "Bounding box: (" << minx << "," << miny << "," << minz << ") - (" << maxx << "," << maxy << "," << maxz << ")" << std::endl; double dx = maxx-minx, dy = maxy-miny, dz = maxz - minz; bounding_radius = sqrt(dx*dx + dy*dy + dz*dz) / 2.0; add_variable("Input bounding box radius", bounding_radius); std::cout << "Mesh_view_3: Polyhedron loaded from " << filename << " and contains " << p.size_of_facets () <<" faces and " << p.size_of_vertices() << " vertices." << std::endl; invalidate_cache(); }
void anm_link_node(struct anm_node *p, struct anm_node *c) { c->next = p->child; p->child = c; c->parent = p; invalidate_cache(c); }
/* * Free the maps for this task */ void pagemap_free(ptptr p) { uint8_t *ptr = (uint8_t *) & p->p_page; pfree[pfptr--] = *ptr; if (*ptr != ptr[1]) { pfree[pfptr--] = ptr[1]; invalidate_cache((uint16_t)ptr[1]); } }
unsigned long Floating_set_precision (Floating* self, unsigned long precision) { mpfr_set_prec(*self->value, precision); invalidate_cache(self); return precision; }
Floating* Floating_set_positive_zero (Floating* self) { mpfr_set_zero(*self->value, 1); invalidate_cache(self); return self; }
int sh_eth_send(struct eth_device *dev, void *packet, int len) { struct sh_eth_dev *eth = dev->priv; int port = eth->port, ret = 0, timeout; struct sh_eth_info *port_info = ð->port_info[port]; if (!packet || len > 0xffff) { printf(SHETHER_NAME ": %s: Invalid argument\n", __func__); ret = -EINVAL; goto err; } /* packet must be a 4 byte boundary */ if ((int)packet & 3) { printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n" , __func__); ret = -EFAULT; goto err; } /* Update tx descriptor */ flush_cache_wback(packet, len); port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet); port_info->tx_desc_cur->td1 = len << 16; /* Must preserve the end of descriptor list indication */ if (port_info->tx_desc_cur->td0 & TD_TDLE) port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE; else port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP; flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s)); /* Restart the transmitter if disabled */ if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS)) sh_eth_write(eth, EDTRR_TRNS, EDTRR); /* Wait until packet is transmitted */ timeout = TIMEOUT_CNT; do { invalidate_cache(port_info->tx_desc_cur, sizeof(struct tx_desc_s)); udelay(100); } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--); if (timeout < 0) { printf(SHETHER_NAME ": transmit timeout\n"); ret = -ETIMEDOUT; goto err; } port_info->tx_desc_cur++; if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC) port_info->tx_desc_cur = port_info->tx_desc_base; err: return ret; }
Floating* Floating_set_nan (Floating* self) { mpfr_set_nan(*self->value); invalidate_cache(self); return self; }
Floating* Floating_set_negative_infinity (Floating* self) { mpfr_set_inf(*self->value, -1); invalidate_cache(self); return self; }
static void on_objects_removed (ECalClientView *view, GSList *uids, gpointer user_data) { App *app = user_data; print_debug ("%s for calendar", G_STRFUNC); invalidate_cache (app); app_schedule_changed (app); }
void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm) { struct anm_animation *anim = anm_get_active_animation(node, 0); if(!anim) return; anm_set_value(anim->tracks + ANM_TRACK_SCL_X, tm, scl.x); anm_set_value(anim->tracks + ANM_TRACK_SCL_Y, tm, scl.y); anm_set_value(anim->tracks + ANM_TRACK_SCL_Z, tm, scl.z); invalidate_cache(node); }
void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm) { struct anm_animation *anim = anm_get_active_animation(node, 0); if(!anim) return; anm_set_value(anim->tracks + ANM_TRACK_POS_X, tm, pos.x); anm_set_value(anim->tracks + ANM_TRACK_POS_Y, tm, pos.y); anm_set_value(anim->tracks + ANM_TRACK_POS_Z, tm, pos.z); invalidate_cache(node); }
Floating* Floating_new (Runtime* rt) { Floating* self = (Floating*) GC_ALLOCATE(rt, FLOATING); self->value = GC_NEW_FLOATING(rt); invalidate_cache(self); return self; }
Floating* Floating_set_string_with_base (Floating* self, const char* string, int base) { assert(self); mpfr_set_str(*self->value, string, base, MPFR_RNDN); invalidate_cache(self); return self; }
Floating* Floating_set_string (Floating* self, const char* string) { assert(self); mpfr_set_str(*self->value, string, 0, MPFR_RNDN); invalidate_cache(self); return self; }
Floating* Floating_set_double (Floating* self, double number) { assert(self); mpfr_set_d(*self->value, number, MPFR_RNDN); invalidate_cache(self); return self; }
void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex) { int i; struct anm_animation *anim = anm_get_active_animation(node, 0); if(!anim) return; for(i=0; i<ANM_NUM_TRACKS; i++) { anm_set_track_extrapolator(anim->tracks + i, ex); } invalidate_cache(node); }
void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm) { struct anm_animation *anim = anm_get_active_animation(node, 0); if(!anim) return; anm_set_value(anim->tracks + ANM_TRACK_ROT_X, tm, rot.x); anm_set_value(anim->tracks + ANM_TRACK_ROT_Y, tm, rot.y); anm_set_value(anim->tracks + ANM_TRACK_ROT_Z, tm, rot.z); anm_set_value(anim->tracks + ANM_TRACK_ROT_W, tm, rot.w); invalidate_cache(node); }
int sh_eth_recv(struct eth_device *dev) { struct sh_eth_dev *eth = dev->priv; int port = eth->port, len = 0; struct sh_eth_info *port_info = ð->port_info[port]; uchar *packet; /* Check if the rx descriptor is ready */ invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s)); if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) { /* Check for errors */ if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) { len = port_info->rx_desc_cur->rd1 & 0xffff; packet = (uchar *) ADDR_TO_P2(port_info->rx_desc_cur->rd2); invalidate_cache(packet, len); net_process_received_packet(packet, len); } /* Make current descriptor available again */ if (port_info->rx_desc_cur->rd0 & RD_RDLE) port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE; else port_info->rx_desc_cur->rd0 = RD_RACT; flush_cache_wback(port_info->rx_desc_cur, sizeof(struct rx_desc_s)); /* Point to the next descriptor */ port_info->rx_desc_cur++; if (port_info->rx_desc_cur >= port_info->rx_desc_base + NUM_RX_DESC) port_info->rx_desc_cur = port_info->rx_desc_base; } /* Restart the receiver if disabled */ if (!(sh_eth_read(eth, EDRRR) & EDRRR_R)) sh_eth_write(eth, EDRRR_R, EDRRR); return len; }
/* Change position within the cache file, taking into account read caching. */ static off_t fcc_lseek(fcc_data *data, off_t offset, int whence) { /* If we read some extra data in advance, and then want to know or use our * "current" position, we need to back up a little. */ if (whence == SEEK_CUR && data->valid_bytes) { assert(data->cur_offset > 0); assert(data->cur_offset <= data->valid_bytes); offset -= (data->valid_bytes - data->cur_offset); } invalidate_cache(data); return lseek(data->fd, offset, whence); }
int anm_unlink_node(struct anm_node *p, struct anm_node *c) { struct anm_node *iter; if(p->child == c) { p->child = c->next; c->next = 0; invalidate_cache(c); return 0; } iter = p->child; while(iter->next) { if(iter->next == c) { iter->next = c->next; c->next = 0; invalidate_cache(c); return 0; } } return -1; }
bool ConjugateGradientType::improve_energy(bool verbose) { iter++; //printf("I am running ConjugateGradient::improve_energy\n"); const double E0 = energy(); if (E0 != E0) { // There is no point continuing, since we're starting with a NaN! // So we may as well quit here. if (verbose) { printf("The initial energy is a NaN, so I'm quitting early from ConjugateGradientType::improve_energy.\n"); f.print_summary("has nan:", E0); fflush(stdout); } return false; } double gdotd; { const VectorXd g = -grad(); // Let's immediately free the cached gradient stored internally! invalidate_cache(); // Note: my notation vaguely follows that of // [wikipedia](http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method). // I use the Polak-Ribiere method, with automatic direction reset. // Note that we could save some memory by using Fletcher-Reeves, and // it seems worth implementing that as an option for // memory-constrained problems (then we wouldn't need to store oldgrad). double beta = g.dot(g - oldgrad)/oldgradsqr; oldgrad = g; if (beta < 0 || beta != beta || oldgradsqr == 0) beta = 0; oldgradsqr = oldgrad.dot(oldgrad); direction = g + beta*direction; gdotd = oldgrad.dot(direction); if (gdotd < 0) { direction = oldgrad; // If our direction is uphill, reset to gradient. if (verbose) printf("reset to gradient...\n"); gdotd = oldgrad.dot(direction); } } Minimizer lm = linmin(f, gd, kT, x, direction, -gdotd, &step); for (int i=0; i<100 && lm.improve_energy(verbose); i++) { if (verbose) lm.print_info("\t"); } if (verbose) { //lm->print_info(); print_info(); printf("grad*dir/oldgrad*dir = %g\n", grad().dot(direction)/gdotd); } return (energy() < E0); }
static void invalidate_cache(struct anm_node *node) { struct anm_node *c; struct mat_cache *cache = pthread_getspecific(node->cache_key); if(cache) { cache->time = cache->inv_time = ANM_TIME_INVAL; } c = node->child; while(c) { invalidate_cache(c); c = c->next; } }
void Medial_explore_3::load_generic_file(const std::string& file_name) { if (QString(file_name.c_str()).endsWith(".off")) { mat.read_from_off(file_name.c_str()); mat.compute_face_normals(); mat.smooth_medial_surface_boundary(Application_settings::get_int_setting("medial-surface-boundary-smoothing-steps")); Face_iterator f_it, f_end = mat.faces_end(); for (f_it=mat.faces_begin(); f_it!=f_end; ++f_it) { f_it->angle = ((double)rand())/RAND_MAX * 90.0; // std::cout << "Angle: " << f_it->angle << std::endl; } invalidate_cache(); } if (QString(file_name.c_str()).endsWith(".moff")) { mat.read_from_moff(file_name.c_str()); mat.compute_face_normals(); mat.smooth_medial_surface_boundary(Application_settings::get_int_setting("medial-surface-boundary-smoothing-steps")); add_variable("Faces - medial explore", mat.faces.size()); add_variable("Vertices - medial explore", mat.vertices.size()); add_variable("Edges - medial explore", mat.edges.size()); has_mat = true; invalidate_cache(); // widget->repaint(); } }