void TB_SaveSelect::SetVisible(bool enable, bool saving) { fVisible = enable; if (!enable) return; game.showmapnametime = 0; fCoords.w = 244; fCoords.h = 152; #ifdef _480X272 // 480x272 widescreen fix fCoords.x = (SCREEN_WIDTH / 2) - (fCoords.w / 2); fCoords.y = 30; #else fCoords.x = 38; fCoords.y = 8; #endif fNumFiles = MAX_SAVE_SLOTS; fSaving = saving; fCurSel = settings->last_save_slot; fPicXOffset = -24; // load all profiles memset(fHaveProfile, 0, sizeof(fHaveProfile)); for(int i=0;i<fNumFiles;i++) { if (!profile_load(GetProfileName(i), &fProfiles[i])) fHaveProfile[i] = true; } textbox.ClearText(); }
static inline void profile_load_scheduled(struct queue *q, bladerf_module module) { struct queue_entry *e; uint32_t i; uint8_t used = 0; if ((q == NULL) || (q->count == 0)) { return; } /* Check the contents of the retune queue and load all the profiles we can * without causing them to step on each other. This should reduce retune * times in most scenarios because the profile will have already been * loaded into the RFFE when it becomes time to retune. */ for (i = 0; i < q->count; i++) { e = peek_next_retune_offset(q, i); if( e != NULL ) { if (e->state == ENTRY_STATE_NEW) { if ( !(used & (1 << e->profile->profile_num)) ) { /* Profile slot is available in RFFE, fill it */ profile_load(module, e->profile); /* Mark profile slot used */ used |= 1 << e->profile->profile_num; } } } } }
profile *profile_search(profile_db *pdb, const char *name) { if(!pdb || !name) return NULL; DBT key, data; memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); if(pdb->db->get(pdb->db, &key, &data, 0) == 0) { return profile_load(data.data, data.size); } else { return NULL; } }
static inline void perform_work(struct queue *q, bladerf_module module) { struct queue_entry *e = peek_next_retune(q); if (e == NULL) { return; } switch (e->state) { case ENTRY_STATE_NEW: /* Load the fast lock profile into the RFFE */ profile_load(module, e->profile); /* Schedule the retune */ e->state = ENTRY_STATE_SCHEDULED; tamer_schedule(module, e->timestamp); break; case ENTRY_STATE_SCHEDULED: /* Nothing to do. * Waiting for this entry to become ready */ break; case ENTRY_STATE_READY: /* Activate the fast lock profile for this retune */ profile_activate(module, e->profile); /* Drop the item from the queue */ dequeue_retune(q, NULL); break; default: INCREMENT_ERROR_COUNT(); break; } }
profile_db *profile_db_open(const char *name, int flags, int mode) { profile_db *pdb = (profile_db *)malloc(sizeof(profile_db)); memset(pdb, 0, sizeof(profile_db)); pdb->fd = open(name, flags, mode); off_t size = 0; struct stat fd_stat; memset(&fd_stat, 0, sizeof(fd_stat)); if(pdb->fd < 0) goto error; //stat(name, &fd_stat); size = lseek(pdb->fd, 0, SEEK_END); //size = fd_stat.st_size; if(size > 0) { /* * Read profile data from database. */ u_long profiles_count, profile_size; char *buf = NULL; size_t buf_alloc = 0; lseek(pdb->fd, 0, SEEK_SET); read(pdb->fd, &profiles_count, sizeof(u_long)); pdb->profiles.alloc = ntohl(profiles_count); pdb->profiles.array = (profile **)malloc(sizeof(profile *)*pdb->profiles.alloc); memset(pdb->profiles.array, 0, sizeof(profile *)*pdb->profiles.alloc); for(pdb->profiles.used = 0; pdb->profiles.used < pdb->profiles.alloc; pdb->profiles.used++) { read(pdb->fd, &profile_size, sizeof(u_long)); profile_size = ntohl(profile_size); if(buf_alloc < profile_size) { if(buf) free(buf); buf = (char *)malloc(buf_alloc = profile_size); } if(profile_size) { read(pdb->fd, buf, profile_size); pdb->profiles.array[pdb->profiles.used] = profile_load(buf, profile_size); } } if(buf) free(buf); } return pdb; error: if(pdb) profile_db_close(pdb); return NULL; }
void pkt_retune2(struct pkt_buf *b) { int status = -1; bladerf_module module; uint8_t flags; uint64_t timestamp; uint64_t start_time; uint64_t end_time; uint64_t duration = 0; uint16_t nios_profile; uint8_t rffe_profile; uint8_t port; uint8_t spdt; fastlock_profile *profile; flags = NIOS_PKT_RETUNE2_RESP_FLAG_SUCCESS; nios_pkt_retune2_unpack(b->req, &module, ×tamp, &nios_profile, &rffe_profile, &port, &spdt); switch (module) { case BLADERF_MODULE_RX: profile = &fastlocks_rx[nios_profile]; break; case BLADERF_MODULE_TX: profile = &fastlocks_tx[nios_profile]; break; default: profile = NULL; } if (profile == NULL) { INCREMENT_ERROR_COUNT(); status = -1; } else { /* Update the fastlock profile data */ profile->profile_num = rffe_profile; profile->port = port; profile->spdt = spdt; } start_time = time_tamer_read(module); if (timestamp == NIOS_PKT_RETUNE2_NOW) { /* Fire off this retune operation now */ switch (module) { case BLADERF_MODULE_RX: case BLADERF_MODULE_TX: /* Load the profile data into RFFE memory */ profile_load(module, profile); /* Activate the fast lock profile for this retune */ profile_activate(module, profile); flags |= NIOS_PKT_RETUNE2_RESP_FLAG_TSVTUNE_VALID; status = 0; break; default: INCREMENT_ERROR_COUNT(); status = -1; } } else if (timestamp == NIOS_PKT_RETUNE2_CLEAR_QUEUE) { switch (module) { case BLADERF_MODULE_RX: reset_queue(&rx_queue); status = 0; break; case BLADERF_MODULE_TX: reset_queue(&tx_queue); status = 0; break; default: INCREMENT_ERROR_COUNT(); status = -1; } } else { uint8_t queue_size; switch (module) { case BLADERF_MODULE_RX: queue_size = enqueue_retune(&rx_queue, profile, timestamp); profile_load_scheduled(&rx_queue, module); break; case BLADERF_MODULE_TX: queue_size = enqueue_retune(&tx_queue, profile, timestamp); profile_load_scheduled(&tx_queue, module); break; default: INCREMENT_ERROR_COUNT(); queue_size = QUEUE_FULL; } if (queue_size == QUEUE_FULL) { status = -1; } else { status = 0; } } end_time = time_tamer_read(module); duration = end_time - start_time; if (status != 0) { flags &= ~(NIOS_PKT_RETUNE2_RESP_FLAG_SUCCESS); } nios_pkt_retune2_resp_pack(b->resp, duration, flags); }