board_t& board_t::operator = (const board_t& b) { // Resize our board rows_ = b.rows_; cols_ = b.cols_; init(rows_, cols_); for(auto i = 0; i < rows_*cols_; ++i) { *(cells_[i]) = *(b.cells_[i]); //Deep copy *(regions_[i])= *(b.regions_[i]); } // Our board's cells & regions now point to "b"'s cells & regions // Let's change those pointers to point into our cells & regions //DEBUG("\nNum regions: " << b.regions().size() << endl); auto ref_regions = b.regions(); assert(ref_regions.size() == rows_*cols_); for(auto i = 0; i < rows_*cols_; ++i) { regions_[i]->set_board(this); if (ref_regions[i]->region() != region_t::INVALID_REGION) { auto ref_cells = ref_regions[i]->cells(); assert(regions_[i]->size() == ref_cells.size()); for (auto j = 0; j < ref_regions[i]->size(); ++j) { //DEBUG("\nNum cells in this region: " << regions_[j]->size() << endl); //Point my regions to cells in my board //Read the indeces of those cells from "b" auto r = ref_cells[j]->row(); auto c = ref_cells[j]->col(); auto cell = this->cell(r, c); //assert(cell->region() == ref_regions[i]); regions_[i]->set_cell_ptr(cell, j); //Point the cells to their newly assigned region cell->set_region(regions_[i].get()); } } else { assert(ref_regions[i]->region() == region_t::INVALID_REGION); } } return *this; }
static void set_pg_region(struct core_mmu_table_info *dir_info, struct tee_mmap_region *region, struct pgt **pgt, struct core_mmu_table_info *pg_info) { struct tee_mmap_region r = *region; vaddr_t end = r.va + r.size; uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; while (r.va < end) { if (!pg_info->table || r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { /* * We're assigning a new translation table. */ unsigned int idx; assert(*pgt); /* We should have alloced enough */ /* Virtual addresses must grow */ assert(r.va > pg_info->va_base); idx = core_mmu_va2idx(dir_info, r.va); pg_info->table = (*pgt)->tbl; pg_info->va_base = core_mmu_idx2va(dir_info, idx); #ifdef CFG_PAGED_USER_TA assert((*pgt)->vabase == pg_info->va_base); #endif *pgt = SLIST_NEXT(*pgt, link); core_mmu_set_entry(dir_info, idx, virt_to_phys(pg_info->table), pgt_attr); } r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), end - r.va); if (!(r.attr & TEE_MATTR_PAGED)) set_region(pg_info, &r); r.va += r.size; r.pa += r.size; } }
int main(void) { // uint8_t i; uint8_t mcustate; // turn boost off TCCR0B = 0; BOOST_DDR |= _BV(BOOST); BOOST_PORT &= ~_BV(BOOST); // pull boost fet low // check if we were reset mcustate = MCUSR; MCUSR = 0; wdt_disable(); // now turn it back on... 2 second time out //WDTCSR |= _BV(WDP0) | _BV(WDP1) | _BV(WDP2); //WDTCSR = _BV(WDE); wdt_enable(WDTO_2S); kickthedog(); // we lost power at some point so lets alert the user // that the time may be wrong (the clock still works) timeunknown = 1; // have we read the time & date from eeprom? restored = 0; // setup uart uart_init(BRRL_192); //DEBUGP("VFD Clock"); DEBUGP("!"); //DEBUGP("turning on anacomp"); // set up analog comparator ACSR = _BV(ACBG) | _BV(ACIE); // use bandgap, intr. on toggle! // settle! if (ACSR & _BV(ACO)) { // hmm we should not interrupt here ACSR |= _BV(ACI); // even in low power mode, we run the clock DEBUGP("clock init"); clock_init(); } else { // we aren't in low power mode so init stuff // init io's initbuttons(); VFDSWITCH_PORT &= ~_BV(VFDSWITCH); DEBUGP("turning on buttons"); // set up button interrupts DEBUGP("turning on alarmsw"); // set off an interrupt if alarm is set or unset EICRA = _BV(ISC00); EIMSK = _BV(INT0); displaymode = SHOW_TIME; DEBUGP("vfd init"); vfd_init(); dimmer_init(); DEBUGP("boost init"); brightness_level = eeprom_read_byte((uint8_t *)EE_BRIGHT); boost_init(brightness_level); sei(); region = eeprom_read_byte((uint8_t *)EE_REGION); DEBUGP("speaker init"); speaker_init(); beep(4000, 1); DEBUGP("clock init"); clock_init(); DEBUGP("alarm init"); setalarmstate(); } DEBUGP("done"); while (1) { //_delay_ms(100); kickthedog(); //uart_putc_hex(ACSR); if (ACSR & _BV(ACO)) { // DEBUGP("SLEEPYTIME"); gotosleep(); continue; } //DEBUGP("."); if (just_pressed & 0x1) { just_pressed = 0; switch(displaymode) { case (SHOW_TIME): displaymode = SET_ALARM; display_str("set alarm"); set_alarm(); break; case (SET_ALARM): displaymode = SET_TIME; display_str("set time"); set_time(); timeunknown = 0; break; case (SET_TIME): displaymode = SET_DATE; display_str("set date"); set_date(); break; case (SET_DATE): displaymode = SET_BRIGHTNESS; display_str("set brit"); set_brightness(); break; case (SET_BRIGHTNESS): displaymode = SET_DIMMER; display_str("set dimr"); set_dimmer(); break; case (SET_DIMMER): displaymode = SET_VOLUME; display_str("set vol "); set_volume(); break; case (SET_VOLUME): displaymode = SET_REGION; display_str("set regn"); set_region(); break; /* case (SET_REGION): displaymode = SET_SNOOZE; display_str("set snoz"); set_snooze(); break; */ default: displaymode = SHOW_TIME; } } else if ((just_pressed & 0x2) || (just_pressed & 0x4)) { just_pressed = 0; displaymode = NONE; display_date(DAY); kickthedog(); delayms(1500); kickthedog(); displaymode = SHOW_TIME; } } }
static DFBResult realize_region( CoreLayerRegion *region ) { DFBResult ret; CoreLayer *layer; CoreLayerShared *shared; DisplayLayerFuncs *funcs; D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); D_ASSERT( D_FLAGS_IS_SET( region->state, CLRSF_CONFIGURED ) ); D_ASSERT( ! D_FLAGS_IS_SET( region->state, CLRSF_REALIZED ) ); layer = dfb_layer_at( region->context->layer_id ); D_ASSERT( layer != NULL ); D_ASSERT( layer->shared != NULL ); D_ASSERT( layer->funcs != NULL ); shared = layer->shared; funcs = layer->funcs; D_ASSERT( ! fusion_vector_contains( &shared->added_regions, region ) ); /* Allocate the driver's region data. */ if (funcs->RegionDataSize) { int size = funcs->RegionDataSize(); if (size > 0) { region->region_data = SHCALLOC( 1, size ); if (!region->region_data) return D_OOSHM(); } } D_DEBUG_AT( Core_Layers, "Adding region (%d, %d - %dx%d) to '%s'.\n", DFB_RECTANGLE_VALS( ®ion->config.dest ), shared->description.name ); /* Add the region to the driver. */ if (funcs->AddRegion) { ret = funcs->AddRegion( layer, layer->driver_data, layer->layer_data, region->region_data, ®ion->config ); if (ret) { D_DERROR( ret, "Core/Layers: Could not add region!\n" ); if (region->region_data) { SHFREE( region->region_data ); region->region_data = NULL; } return ret; } } /* Add the region to the 'added' list. */ fusion_vector_add( &shared->added_regions, region ); /* Update the region's state. */ D_FLAGS_SET( region->state, CLRSF_REALIZED ); /* Initially setup hardware. */ ret = set_region( region, ®ion->config, CLRCF_ALL, region->surface ); if (ret) { unrealize_region( region ); return ret; } return DFB_OK; }
DFBResult dfb_layer_region_set_configuration( CoreLayerRegion *region, CoreLayerRegionConfig *config, CoreLayerRegionConfigFlags flags ) { DFBResult ret; CoreLayer *layer; DisplayLayerFuncs *funcs; CoreLayerRegionConfig new_config; D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); D_ASSERT( config != NULL ); D_ASSERT( config->buffermode != DLBM_WINDOWS ); D_ASSERT( (flags == CLRCF_ALL) || (region->state & CLRSF_CONFIGURED) ); D_ASSUME( flags != CLRCF_NONE ); D_ASSUME( ! (flags & ~CLRCF_ALL) ); layer = dfb_layer_at( region->context->layer_id ); D_ASSERT( layer != NULL ); D_ASSERT( layer->funcs != NULL ); D_ASSERT( layer->funcs->TestRegion != NULL ); funcs = layer->funcs; /* Lock the region. */ if (dfb_layer_region_lock( region )) return DFB_FUSION; /* Full configuration supplied? */ if (flags == CLRCF_ALL) { new_config = *config; } else { /* Use the current configuration. */ new_config = region->config; /* Update each modified entry. */ if (flags & CLRCF_WIDTH) new_config.width = config->width; if (flags & CLRCF_HEIGHT) new_config.height = config->height; if (flags & CLRCF_FORMAT) new_config.format = config->format; if (flags & CLRCF_SURFACE_CAPS) new_config.surface_caps = config->surface_caps; if (flags & CLRCF_BUFFERMODE) new_config.buffermode = config->buffermode; if (flags & CLRCF_OPTIONS) new_config.options = config->options; if (flags & CLRCF_SOURCE_ID) new_config.source_id = config->source_id; if (flags & CLRCF_SOURCE) new_config.source = config->source; if (flags & CLRCF_DEST) new_config.dest = config->dest; if (flags & CLRCF_OPACITY) new_config.opacity = config->opacity; if (flags & CLRCF_ALPHA_RAMP) { new_config.alpha_ramp[0] = config->alpha_ramp[0]; new_config.alpha_ramp[1] = config->alpha_ramp[1]; new_config.alpha_ramp[2] = config->alpha_ramp[2]; new_config.alpha_ramp[3] = config->alpha_ramp[3]; } if (flags & CLRCF_SRCKEY) new_config.src_key = config->src_key; if (flags & CLRCF_DSTKEY) new_config.dst_key = config->dst_key; if (flags & CLRCF_PARITY) new_config.parity = config->parity; } /* Check if the new configuration is supported. */ ret = funcs->TestRegion( layer, layer->driver_data, layer->layer_data, &new_config, NULL ); if (ret) { dfb_layer_region_unlock( region ); return ret; } /* Propagate new configuration to the driver if the region is realized. */ if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { ret = set_region( region, &new_config, flags, region->surface ); if (ret) { dfb_layer_region_unlock( region ); return ret; } } /* Update the region's current configuration. */ region->config = new_config; /* Update the region's state. */ D_FLAGS_SET( region->state, CLRSF_CONFIGURED ); /* Unlock the region. */ dfb_layer_region_unlock( region ); return DFB_OK; }
/** * evl_kbufread() - Copy records from evlog circular buffer into user space. * If successful, returns the number of bytes copied; else returns a * negative error code. * * @retbuf: pointer to the buffer to be filled with the event records * @bufsize: length, in bytes, of retbuf */ int evl_kbufread(char *retbuf, size_t bufsize) { char *rec; size_t rec_size; int error = 0; int retbuflen = 0; char *tail, *buf = retbuf; if (bufsize < REC_HDR_SIZE) { return -EINVAL; } /* * We expect that only the logging daemon will be running here, * but serialize access just in case. */ error = down_interruptible(&evl_read_sem); if (error == -EINTR) { return -EINTR; } /* Go to sleep if the buffer is empty. */ error = wait_event_interruptible(readq, (evl_ebuf.bf_head != evl_ebuf.bf_tail)); if (error) { up(&evl_read_sem); return error; } /* * Assemble message(s) into the user buffer, as many as will * fit. On running out of space in the buffer, try to copy * the header for the overflowing message. This means that * there will always be at least a header returned. The caller * must compare the numbers of bytes returned (remaining) with * the length of the message to see if the entire message is * present. A subsequent read will get the entire message, * including the header (again). * * For simplicity, take a snapshot of bf_tail, and don't read * past that even if evl_kwrite_buf() pours in more records while * we're draining. We'll get those new records next time around. */ tail = evl_ebuf.bf_tail; rec = evl_ebuf.bf_head; if (rec == tail) { /* Should not happen. Buffer must have at least one record. */ error = -EFAULT; goto out; } do { struct cbregion rg; __u16 vardata_size; /* type must match rec.log_size */ if (bufsize < REC_HDR_SIZE) { break; } /* * Extract log_size from header, which could be split due to * wraparound, or misaligned. */ set_region(&rg, rec + size_offset, sizeof(vardata_size)); copy_from_cbuf(&rg, (char*) &vardata_size); rec_size = REC_HDR_SIZE + vardata_size; if (bufsize < rec_size) { /* * Copyout only the header 'cause user buffer can't * hold full record. */ set_region(&rg, rec, REC_HDR_SIZE); error = copy_cbuf_to_user(&rg, buf); if (error) { error = -EFAULT; break; } bufsize -= REC_HDR_SIZE; retbuflen += REC_HDR_SIZE; break; } set_region(&rg, rec, rec_size); error = copy_cbuf_to_user(&rg, buf); if (error) { error = -EFAULT; break; } rec = rg.rg_tail; buf += rec_size; bufsize -= rec_size; retbuflen += rec_size; } while (rec != tail); if (error == 0) { evl_ebuf.bf_head = rec; error = retbuflen; } out: up(&evl_read_sem); return(error); }
DFBResult dfb_layer_region_flip_update( CoreLayerRegion *region, const DFBRegion *update, DFBSurfaceFlipFlags flags ) { DFBResult ret = DFB_OK; DFBRegion rotated; CoreLayer *layer; CoreLayerContext *context; CoreSurface *surface; const DisplayLayerFuncs *funcs; if (update) D_DEBUG_AT( Core_Layers, "dfb_layer_region_flip_update( %p, %p, 0x%08x ) <- [%d, %d - %dx%d]\n", region, update, flags, DFB_RECTANGLE_VALS_FROM_REGION( update ) ); else D_DEBUG_AT( Core_Layers, "dfb_layer_region_flip_update( %p, %p, 0x%08x )\n", region, update, flags ); D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); /* Lock the region. */ if (dfb_layer_region_lock( region )) return DFB_FUSION; D_ASSUME( region->surface != NULL ); /* Check for NULL surface. */ if (!region->surface) { D_DEBUG_AT( Core_Layers, " -> No surface => no update!\n" ); dfb_layer_region_unlock( region ); return DFB_UNSUPPORTED; } context = region->context; surface = region->surface; layer = dfb_layer_at( context->layer_id ); D_ASSERT( layer->funcs != NULL ); funcs = layer->funcs; /* Unfreeze region? */ if (D_FLAGS_IS_SET( region->state, CLRSF_FROZEN )) { D_FLAGS_CLEAR( region->state, CLRSF_FROZEN ); if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { ret = set_region( region, ®ion->config, CLRCF_ALL, surface ); if (ret) D_DERROR( ret, "Core/LayerRegion: set_region() in dfb_layer_region_flip_update() failed!\n" ); } else if (D_FLAGS_ARE_SET( region->state, CLRSF_ENABLED | CLRSF_ACTIVE )) { ret = realize_region( region ); if (ret) D_DERROR( ret, "Core/LayerRegion: realize_region() in dfb_layer_region_flip_update() failed!\n" ); } if (ret) { dfb_layer_region_unlock( region ); return ret; } } /* Depending on the buffer mode... */ switch (region->config.buffermode) { case DLBM_TRIPLE: case DLBM_BACKVIDEO: /* Check if simply swapping the buffers is possible... */ if (!(flags & DSFLIP_BLIT) && !surface->rotation && (!update || (update->x1 == 0 && update->y1 == 0 && update->x2 == surface->config.size.w - 1 && update->y2 == surface->config.size.h - 1))) { D_DEBUG_AT( Core_Layers, " -> Going to swap buffers...\n" ); /* Use the driver's routine if the region is realized. */ if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { D_ASSUME( funcs->FlipRegion != NULL ); ret = region_buffer_lock( region, surface, CSBR_BACK ); if (ret) { dfb_layer_region_unlock( region ); return ret; } D_DEBUG_AT( Core_Layers, " -> Flipping region using driver...\n" ); if (funcs->FlipRegion) ret = funcs->FlipRegion( layer, layer->driver_data, layer->layer_data, region->region_data, surface, flags, ®ion->surface_lock ); dfb_surface_unlock( surface ); } else { D_DEBUG_AT( Core_Layers, " -> Flipping region not using driver...\n" ); /* Just do the hardware independent work. */ dfb_surface_lock( surface ); dfb_surface_flip( surface, false ); dfb_surface_unlock( surface ); } break; } /* fall through */ case DLBM_BACKSYSTEM: D_DEBUG_AT( Core_Layers, " -> Going to copy portion...\n" ); if ((flags & DSFLIP_WAITFORSYNC) == DSFLIP_WAITFORSYNC) { D_DEBUG_AT( Core_Layers, " -> Waiting for VSync...\n" ); dfb_layer_wait_vsync( layer ); } D_DEBUG_AT( Core_Layers, " -> Copying content from back to front buffer...\n" ); /* ...or copy updated contents from back to front buffer. */ dfb_back_to_front_copy_rotation( surface, update, surface->rotation ); if ((flags & DSFLIP_WAITFORSYNC) == DSFLIP_WAIT) { D_DEBUG_AT( Core_Layers, " -> Waiting for VSync...\n" ); dfb_layer_wait_vsync( layer ); } /* fall through */ case DLBM_FRONTONLY: /* Tell the driver about the update if the region is realized. */ if (funcs->UpdateRegion && D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { if (surface) { CoreSurfaceAllocation *allocation; allocation = region->surface_lock.allocation; D_ASSERT( allocation != NULL ); /* If hardware has written or is writing... */ if (allocation->accessed[CSAID_GPU] & CSAF_WRITE) { D_DEBUG_AT( Core_Layers, " -> Waiting for pending writes...\n" ); /* ...wait for the operation to finish. */ if (!(flags & DSFLIP_PIPELINE)) dfb_gfxcard_sync(); /* TODO: wait for serial instead */ allocation->accessed[CSAID_GPU] &= ~CSAF_WRITE; } dfb_surface_lock( surface ); dfb_surface_allocation_update( allocation, CSAF_READ ); dfb_surface_unlock( surface ); } D_DEBUG_AT( Core_Layers, " -> Notifying driver about updated content...\n" ); dfb_region_from_rotated( &rotated, update, &surface->config.size, surface->rotation ); ret = funcs->UpdateRegion( layer, layer->driver_data, layer->layer_data, region->region_data, surface, &rotated, ®ion->surface_lock ); } break; default: D_BUG("unknown buffer mode"); ret = DFB_BUG; } D_DEBUG_AT( Core_Layers, " -> done.\n" ); /* Unlock the region. */ dfb_layer_region_unlock( region ); return ret; }
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) { struct core_mmu_table_info tbl_info; struct tee_mmap_region *map; size_t n; size_t granule; paddr_t p; size_t l; if (!len) return true; /* Check if the memory is already mapped */ map = find_map_by_type_and_pa(type, addr); if (map && pbuf_inside_map_area(addr, len, map)) return true; /* Find the reserved va space used for late mappings */ map = find_map_by_type(MEM_AREA_RES_VASPACE); if (!map) return false; if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info)) return false; granule = 1 << tbl_info.shift; p = ROUNDDOWN(addr, granule); l = ROUNDUP(len + addr - p, granule); /* * Something is wrong, we can't fit the va range into the selected * table. The reserved va range is possibly missaligned with * granule. */ if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) return false; /* Find end of the memory map */ n = 0; while (static_memory_map[n].type != MEM_AREA_NOTYPE) n++; if (n < (ARRAY_SIZE(static_memory_map) - 1)) { /* There's room for another entry */ static_memory_map[n].va = map->va; static_memory_map[n].size = l; static_memory_map[n + 1].type = MEM_AREA_NOTYPE; map->va += l; map->size -= l; map = static_memory_map + n; } else { /* * There isn't room for another entry, steal the reserved * entry as it's not useful for anything else any longer. */ map->size = l; } map->type = type; map->region_size = granule; map->attr = core_mmu_type_to_attr(type); map->pa = p; set_region(&tbl_info, map); return true; }