/* ATTENTION. The AL's extents are 4MB each, while the extents in the * resync LRU-cache are 16MB each. * The caller of this function has to hold an get_ldev() reference. * * TODO will be obsoleted once we have a caching lru of the on disk bitmap */ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, int count, int success) { struct lc_element *e; struct update_odbm_work *udw; unsigned int enr; D_ASSERT(atomic_read(&mdev->local_cnt)); /* I simply assume that a sector/size pair never crosses * a 16 MB extent border. (Currently this is true...) */ enr = BM_SECT_TO_EXT(sector); e = lc_get(mdev->resync, enr); if (e) { struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); if (ext->lce.lc_number == enr) { if (success) ext->rs_left -= count; else ext->rs_failed += count; if (ext->rs_left < ext->rs_failed) { dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d " "rs_failed=%d count=%d\n", (unsigned long long)sector, ext->lce.lc_number, ext->rs_left, ext->rs_failed, count); dump_stack(); lc_put(mdev->resync, &ext->lce); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); return; } } else { /* Normally this element should be in the cache, * since drbd_rs_begin_io() pulled it already in. * * But maybe an application write finished, and we set * something outside the resync lru_cache in sync. */ int rs_left = drbd_bm_e_weight(mdev, enr); if (ext->flags != 0) { dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" " -> %d[%u;00]\n", ext->lce.lc_number, ext->rs_left, ext->flags, enr, rs_left); ext->flags = 0; } if (ext->rs_failed) { dev_warn(DEV, "Kicking resync_lru element enr=%u " "out with rs_failed=%d\n", ext->lce.lc_number, ext->rs_failed); set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); } ext->rs_left = rs_left; ext->rs_failed = success ? 0 : count; lc_changed(mdev->resync, &ext->lce); } lc_put(mdev->resync, &ext->lce); /* no race, we are within the al_lock! */ if (ext->rs_left == ext->rs_failed) { ext->rs_failed = 0; udw = kmalloc(sizeof(*udw), GFP_ATOMIC); if (udw) { udw->enr = ext->lce.lc_number; udw->w.cb = w_update_odbm; drbd_queue_work_front(&mdev->data.work, &udw->w); } else { dev_warn(DEV, "Could not kmalloc an udw\n"); set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); } } } else {
DirectResult voodoo_link_init_local( VoodooLink *link, const char *path, bool raw ) { DirectResult ret; int err; struct sockaddr_un addr; Link *l; D_ASSERT( link != NULL ); D_ASSERT( path != NULL ); l = D_CALLOC( 1, sizeof(Link) ); if (!l) return D_OOM(); /* Create the client socket. */ l->fd[0] = socket( AF_LOCAL, SOCK_STREAM, 0 ); if (l->fd[0] < 0) { ret = errno2result( errno ); D_PERROR( "Voodoo/Link: Socket creation failed!\n" ); D_FREE( l ); return ret; } l->fd[1] = l->fd[0]; #if !VOODOO_BUILD_NO_SETSOCKOPT if (setsockopt( l->fd[0], SOL_IP, IP_TOS, &tos, sizeof(tos) ) < 0) D_PERROR( "Voodoo/Manager: Could not set IP_TOS!\n" ); if (setsockopt( l->fd[0], SOL_TCP, TCP_NODELAY, &one, sizeof(one) ) < 0) D_PERROR( "Voodoo/Manager: Could not set TCP_NODELAY!\n" ); #endif D_INFO( "Voodoo/Link: Connecting to '%s'...\n", path ); memset( &addr, 0, sizeof(addr) ); /* Bind the socket to the local port. */ addr.sun_family = AF_UNIX; snprintf( addr.sun_path + 1, UNIX_PATH_MAX - 1, "%s", path ); /* Connect to the server. */ err = connect( l->fd[0], (struct sockaddr*) &addr, strlen(addr.sun_path+1)+1 + sizeof(addr.sun_family) ); if (err) { ret = errno2result( errno ); D_PERROR( "Voodoo/Link: Socket connect failed!\n" ); close( l->fd[0] ); D_FREE( l ); return ret; } D_INFO( "Voodoo/Link: Connected.\n" ); DUMP_SOCKET_OPTION( l->fd[0], SO_SNDLOWAT ); DUMP_SOCKET_OPTION( l->fd[0], SO_RCVLOWAT ); DUMP_SOCKET_OPTION( l->fd[0], SO_SNDBUF ); DUMP_SOCKET_OPTION( l->fd[0], SO_RCVBUF ); if (!raw) { link->code = 0x80008676; if (write( l->fd[1], &link->code, sizeof(link->code) ) != 4) { D_ERROR( "Voodoo/Link: Coult not write initial four bytes!\n" ); close( l->fd[0] ); D_FREE( l ); return DR_IO; } } D_INFO( "Voodoo/Link: Sent link code (%s).\n", raw ? "raw" : "packet" ); if (pipe( l->wakeup_fds )) return errno2result( errno ); link->priv = l; link->Close = Close; link->Read = Read; link->Write = Write; link->SendReceive = SendReceive; link->WakeUp = WakeUp; link->WaitForData = WaitForData; return DR_OK; }
int w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) { struct update_al_work *aw = container_of(w, struct update_al_work, w); struct lc_element *updated = aw->al_ext; const unsigned int new_enr = aw->enr; const unsigned int evicted = aw->old_enr; struct al_transaction *buffer; sector_t sector; int i, n, mx; unsigned int extent_nr; u32 xor_sum = 0; if (!get_ldev(mdev)) { dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); complete(&((struct update_al_work *)w)->event); return 1; } /* do we have to do a bitmap write, first? * TODO reduce maximum latency: * submit both bios, then wait for both, * instead of doing two synchronous sector writes. */ if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ buffer = (struct al_transaction *)page_address(mdev->md_io_page); buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); buffer->tr_number = cpu_to_be32(mdev->al_tr_number); n = lc_index_of(mdev->act_log, updated); buffer->updates[0].pos = cpu_to_be32(n); buffer->updates[0].extent = cpu_to_be32(new_enr); xor_sum ^= new_enr; mx = min_t(int, AL_EXTENTS_PT, mdev->act_log->nr_elements - mdev->al_tr_cycle); for (i = 0; i < mx; i++) { unsigned idx = mdev->al_tr_cycle + i; extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; buffer->updates[i+1].pos = cpu_to_be32(idx); buffer->updates[i+1].extent = cpu_to_be32(extent_nr); xor_sum ^= extent_nr; } for (; i < AL_EXTENTS_PT; i++) { buffer->updates[i+1].pos = __constant_cpu_to_be32(-1); buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE); xor_sum ^= LC_FREE; } mdev->al_tr_cycle += AL_EXTENTS_PT; if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) mdev->al_tr_cycle = 0; buffer->xor_sum = cpu_to_be32(xor_sum); sector = mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) drbd_chk_io_error(mdev, 1, TRUE); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) mdev->al_tr_pos = 0; D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE); mdev->al_tr_number++; mutex_unlock(&mdev->md_io_mutex); complete(&((struct update_al_work *)w)->event); put_ldev(mdev); return 1; }
int main( int argc, char *argv[] ) { DirectResult ret; FusionWorld *world; StretRegion *root; StretRegion *child[16]; int child_num = 0; StretIteration iteration; DFBRegion clip; FusionSHMPoolShared *pool; ret = fusion_enter( -1, 0, FER_ANY, &world ); if (ret) return -1; ret = fusion_shm_pool_create( world, "StReT Test Pool", 0x10000, direct_config->debug, &pool ); if (ret) { fusion_exit( world, false ); return -1; } D_INFO( "StReT/Test: Starting...\n" ); ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 2, 0, 0, 1000, 1000, NULL, 0, pool, &root ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create root region!\n" ); goto error_root; } else { ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 1, 10, 10, 100, 100, root, 1, pool, &child[child_num++] ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create child region!\n" ); goto error_child; } else { ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 1, 50, 50, 30, 30, child[0], 0, pool, &child[child_num++] ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create child region!\n" ); goto error_child; } ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 1, 20, 20, 30, 30, child[0], 0, pool, &child[child_num++] ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create child region!\n" ); goto error_child; } else { ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 1, 10, 10, 10, 10, child[2], 0, pool, &child[child_num++] ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create child region!\n" ); goto error_child; } ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 1, 20, 20, 10, 10, child[2], 0, pool, &child[child_num++] ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create child region!\n" ); goto error_child; } } } ret = stret_region_create( 0, NULL, 0, SRF_ACTIVE, 1, 200, 10, 200, 200, root, 0, pool, &child[child_num++] ); if (ret) { D_DERROR( ret, "StReT/Test: Could not create child region!\n" ); goto error_child; } } stret_iteration_init( &iteration, root, NULL ); clip = (DFBRegion) { 50, 50, 200, 59 }; D_ASSERT( stret_iteration_next( &iteration, &clip ) == child[4] ); //D_ASSERT( stret_iteration_next( &iteration, &clip ) == child[3] ); D_ASSERT( stret_iteration_next( &iteration, &clip ) == child[2] ); //D_ASSERT( stret_iteration_next( &iteration, &clip ) == child[1] ); D_ASSERT( stret_iteration_next( &iteration, &clip ) == child[0] ); D_ASSERT( stret_iteration_next( &iteration, &clip ) == child[5] ); D_ASSERT( stret_iteration_next( &iteration, &clip ) == root ); D_ASSERT( stret_iteration_next( &iteration, &clip ) == NULL ); stret_region_destroy( child[child_num-1] ); error_child: while (--child_num) stret_region_destroy( child[child_num-1] ); stret_region_destroy( root ); error_root: fusion_shm_pool_destroy( world, pool ); fusion_exit( world, false ); return 0; }
DFBResult dfb_surface_pool_bridge_join( CoreDFB *core, CoreSurfacePoolBridge *bridge, const SurfacePoolBridgeFuncs *funcs, void *context ) { DFBResult ret; D_MAGIC_ASSERT( bridge, CoreSurfacePoolBridge ); D_DEBUG_AT( Core_SurfPoolBridge, "%s( %p [%d], %p, %p )\n", __FUNCTION__, bridge, bridge->bridge_id, funcs, context ); D_ASSERT( core != NULL ); D_ASSERT( funcs != NULL ); D_ASSERT( bridge->bridge_id < MAX_SURFACE_POOL_BRIDGES ); D_ASSERT( bridge->bridge_id == bridge_count ); D_ASSERT( bridge_funcs[bridge->bridge_id] == NULL ); /* Enforce same order as initialization to be used during join. */ if (bridge->bridge_id != bridge_count) { D_ERROR( "Core/SurfacePoolBridge: Wrong order of joining bridges, got %d, should be %d!\n", bridge->bridge_id, bridge_count ); return DFB_BUG; } /* Allocate local bridge data. */ if (bridge->bridge_local_data_size && !(bridge_locals[bridge->bridge_id] = D_CALLOC( 1, bridge->bridge_local_data_size ))) return D_OOM(); /* Set function table of the bridge. */ bridge_funcs[bridge->bridge_id] = funcs; /* Add to global bridge list. */ bridge_array[bridge->bridge_id] = bridge; /* Adjust bridge count. */ if (bridge_count < bridge->bridge_id + 1) bridge_count = bridge->bridge_id + 1; funcs = get_funcs( bridge ); if (funcs->JoinPoolBridge) { ret = funcs->JoinPoolBridge( core, bridge, bridge->data, get_local(bridge), context ); if (ret) { D_DERROR( ret, "Core/SurfacePoolBridge: Joining '%s' failed!\n", bridge->desc.name ); if (bridge_locals[bridge->bridge_id]) { D_FREE( bridge_locals[bridge->bridge_id] ); bridge_locals[bridge->bridge_id] = NULL; } bridge_array[bridge->bridge_id] = NULL; bridge_funcs[bridge->bridge_id] = NULL; bridge_count--; return ret; } } /* Insert new bridge into priority order */ insert_bridge_local( bridge ); return DFB_OK; }
static DFBResult wm_init_stack( CoreWindowStack *stack, void *wm_data, void *stack_data ) { DFBResult ret; StackData *data = stack_data; WMData *wmdata = wm_data; CoreLayerContext *context; CoreLayerRegion *region; D_ASSERT( stack != NULL ); D_ASSERT( stack->context != NULL ); D_ASSERT( wm_data != NULL ); D_ASSERT( stack_data != NULL ); context = stack->context; D_ASSERT( context != NULL ); ret = dfb_layer_context_get_primary_region( context, true, ®ion ); if (ret) { D_DERROR( ret, "WM/UniQuE: Could not get the primary region!\n" ); return ret; } /* Create the unique context. */ ret = unique_context_create( wmdata->core, stack, region, context->layer_id, wmdata->shared, &data->context ); dfb_layer_region_unref( region ); if (ret) { D_DERROR( ret, "WM/UniQuE: Could not create the context!\n" ); return ret; } /* Attach the global context listener. */ ret = unique_context_attach_global( data->context, UNIQUE_WM_MODULE_CONTEXT_LISTENER, data, &data->context_reaction ); if (ret) { unique_context_unref( data->context ); D_DERROR( ret, "WM/UniQuE: Could not attach global context listener!\n" ); return ret; } /* Inherit all local references from the layer context. */ ret = unique_context_inherit( data->context, context ); unique_context_unref( data->context ); if (ret) { unique_context_detach_global( data->context, &data->context_reaction ); D_DERROR( ret, "WM/UniQuE: Could not inherit from layer context!\n" ); return ret; } data->stack = stack; D_MAGIC_SET( data, StackData ); return DFB_OK; }
static int cont_prop_read(struct rdb_tx *tx, struct cont *cont, uint64_t bits, daos_prop_t **prop_out) { daos_prop_t *prop = NULL; daos_iov_t value; uint64_t val, bitmap; uint32_t idx = 0, nr = 0; int rc; bitmap = bits & DAOS_CO_QUERY_PROP_ALL; while (idx < DAOS_CO_QUERY_PROP_BITS_NR) { if (bitmap & 0x1) nr++; idx++; bitmap = bitmap >> 1; }; if (nr == 0) return 0; D_ASSERT(nr <= DAOS_CO_QUERY_PROP_BITS_NR); prop = daos_prop_alloc(nr); if (prop == NULL) return -DER_NOMEM; idx = 0; if (bits & DAOS_CO_QUERY_PROP_LABEL) { daos_iov_set(&value, NULL, 0); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_label, &value); if (rc != 0) return rc; if (value.iov_len > DAOS_PROP_LABEL_MAX_LEN) { D_ERROR("bad label length %zu (> %d).\n", value.iov_len, DAOS_PROP_LABEL_MAX_LEN); D_GOTO(out, rc = -DER_NOMEM); } D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_LABEL; prop->dpp_entries[idx].dpe_str = strndup(value.iov_buf, value.iov_len); if (prop->dpp_entries[idx].dpe_str == NULL) D_GOTO(out, rc = -DER_NOMEM); idx++; } if (bits & DAOS_CO_QUERY_PROP_LAYOUT_TYPE) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_layout_type, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_LAYOUT_TYPE; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_LAYOUT_VER) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_layout_ver, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_LAYOUT_VER; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_CSUM) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_csum, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_CSUM; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_REDUN_FAC) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_redun_fac, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_REDUN_FAC; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_REDUN_LVL) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_redun_lvl, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_REDUN_LVL; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_SNAPSHOT_MAX) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_snapshot_max, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_SNAPSHOT_MAX; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_COMPRESS) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_compress, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_COMPRESS; prop->dpp_entries[idx].dpe_val = val; idx++; } if (bits & DAOS_CO_QUERY_PROP_ENCRYPT) { daos_iov_set(&value, &val, sizeof(val)); rc = rdb_tx_lookup(tx, &cont->c_prop, &ds_cont_prop_encrypt, &value); if (rc != 0) D_GOTO(out, rc); D_ASSERT(idx < nr); prop->dpp_entries[idx].dpe_type = DAOS_PROP_CO_ENCRYPT; prop->dpp_entries[idx].dpe_val = val; } out: if (rc) daos_prop_free(prop); else *prop_out = prop; return rc; }
static void * processor_thread( DirectThread *thread, void *ctx ) { DirectResult ret; bool started = false; DirectProcessor *processor = ctx; const DirectProcessorFuncs *funcs; D_DEBUG_AT( Direct_Processor, "%s( %p, %p )...\n", __FUNCTION__, thread, ctx ); D_MAGIC_ASSERT( processor, DirectProcessor ); funcs = processor->funcs; D_ASSERT( funcs != NULL ); D_ASSERT( funcs->Process != NULL ); while (!processor->stop) { ProcessorCommand *command = direct_fifo_pull( &processor->commands ); if (command) { D_DEBUG_AT( Direct_Processor, "=---### %p - %p (%s)\n", command, command + 1, processor->name ); D_MAGIC_ASSERT( command, ProcessorCommand ); if (!started) { if (funcs->Start) funcs->Start( processor, processor->context ); started = true; } ret = funcs->Process( processor, command + 1, processor->context ); if (ret) D_DERROR( ret, "Direct/Processor: Processing failed! (%s)\n", processor->name ); } else { if (started) { if (funcs->Stop) funcs->Stop( processor, processor->context ); started = false; } #if 0 while (processor->lock) { direct_mutex_lock( &processor->lock_mutex ); processor->locked = true; direct_waitqueue_signal( &processor->lock_cond ); while (processor->lock) direct_waitqueue_wait( &processor->lock_cond, &processor->lock_mutex ); processor->locked = false; direct_waitqueue_signal( &processor->lock_cond ); direct_mutex_unlock( &processor->lock_mutex ); } #endif if (processor->idle_ms) { while (direct_fifo_wait_timed( &processor->commands, processor->idle_ms ) == DR_TIMEOUT) { if (funcs->Idle) funcs->Idle( processor, processor->context ); } } else { if (funcs->Idle) funcs->Idle( processor, processor->context ); direct_fifo_wait( &processor->commands ); } } } return NULL; }
/* Helper for __req_mod(). * Set m->bio to the master bio, if it is fit to be completed, * or leave it alone (it is initialized to NULL in __req_mod), * if it has already been completed, or cannot be completed yet. * If m->bio is set, the error status to be returned is placed in m->error. */ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) { const unsigned long s = req->rq_state; struct drbd_conf *mdev = req->mdev; int rw = req->rq_state & RQ_WRITE ? WRITE : READ; /* we must not complete the master bio, while it is * still being processed by _drbd_send_zc_bio (drbd_send_dblock) * not yet acknowledged by the peer * not yet completed by the local io subsystem * these flags may get cleared in any order by * the worker, * the receiver, * the bio_endio completion callbacks. */ if (s & RQ_NET_QUEUED) return; if (s & RQ_NET_PENDING) return; if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) return; if (req->master_bio) { /* this is data_received (remote read) * or protocol C P_WRITE_ACK * or protocol B P_RECV_ACK * or protocol A "handed_over_to_network" (SendAck) * or canceled or failed, * or killed from the transfer log due to connection loss. */ /* * figure out whether to report success or failure. * * report success when at least one of the operations succeeded. * or, to put the other way, * only report failure, when both operations failed. * * what to do about the failures is handled elsewhere. * what we need to do here is just: complete the master_bio. * * local completion error, if any, has been stored as ERR_PTR * in private_bio within drbd_endio_pri. */ int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); int error = PTR_ERR(req->private_bio); /* remove the request from the conflict detection * respective block_id verification hash */ if (!hlist_unhashed(&req->collision)) hlist_del(&req->collision); else D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); /* for writes we need to do some extra housekeeping */ if (rw == WRITE) _about_to_complete_local_write(mdev, req); /* Update disk stats */ _drbd_end_io_acct(mdev, req); m->error = ok ? 0 : (error ?: -EIO); m->bio = req->master_bio; req->master_bio = NULL; }
DFBResult dfb_screen_get_layer_dimension( CoreScreen *screen, CoreLayer *layer, int *ret_width, int *ret_height ) { int i; DFBResult ret = DFB_UNSUPPORTED; CoreScreenShared *shared; ScreenFuncs *funcs; D_ASSERT( screen != NULL ); D_ASSERT( screen->shared != NULL ); D_ASSERT( screen->funcs != NULL ); D_ASSERT( layer != NULL ); D_ASSERT( ret_width != NULL ); D_ASSERT( ret_height != NULL ); shared = screen->shared; funcs = screen->funcs; if (funcs->GetMixerState) { for (i=0; i<shared->description.mixers; i++) { const DFBScreenMixerConfig *config = &shared->mixers[i].configuration; if ((config->flags & DSMCONF_LAYERS) && DFB_DISPLAYLAYER_IDS_HAVE( config->layers, dfb_layer_id(layer) )) { CoreMixerState state; ret = funcs->GetMixerState( screen, screen->driver_data, screen->screen_data, i, &state ); if (ret == DFB_OK) { if (state.flags & CMSF_DIMENSION) { *ret_width = state.dimension.w; *ret_height = state.dimension.h; return DFB_OK; } ret = DFB_UNSUPPORTED; } } } for (i=0; i<shared->description.mixers; i++) { const DFBScreenMixerDescription *desc = &shared->mixers[i].description; if ((desc->caps & DSMCAPS_SUB_LAYERS) && DFB_DISPLAYLAYER_IDS_HAVE( desc->sub_layers, dfb_layer_id(layer) )) { CoreMixerState state; ret = funcs->GetMixerState( screen, screen->driver_data, screen->screen_data, i, &state ); if (ret == DFB_OK) { if (state.flags & CMSF_DIMENSION) { *ret_width = state.dimension.w; *ret_height = state.dimension.h; return DFB_OK; } ret = DFB_UNSUPPORTED; } } } } if (funcs->GetScreenSize) ret = funcs->GetScreenSize( screen, screen->driver_data, screen->screen_data, ret_width, ret_height ); return ret; }
static void root_update( StretRegion *region, void *region_data, void *update_data, unsigned long arg, int x, int y, const DFBRegion *updates, int num ) { int i; CoreWindowStack *stack; UniqueContext *context = region_data; CardState *state = update_data; D_ASSERT( region != NULL ); D_ASSERT( region_data != NULL ); D_ASSERT( update_data != NULL ); D_ASSERT( updates != NULL ); D_ASSERT( x == 0 ); D_ASSERT( y == 0 ); D_MAGIC_ASSERT( context, UniqueContext ); D_MAGIC_ASSERT( state, CardState ); stack = context->stack; D_ASSERT( stack != NULL ); D_ASSERT( stack->bg.image != NULL || (stack->bg.mode != DLBM_IMAGE && stack->bg.mode != DLBM_TILE) ); D_DEBUG_AT( UniQuE_Root, "root_update( region %p, num %d )\n", region, num ); #if D_DEBUG_ENABLED for (i=0; i<num; i++) { D_DEBUG_AT( UniQuE_Root, " (%d) %4d,%4d - %4dx%4d\n", i, DFB_RECTANGLE_VALS_FROM_REGION( &updates[i] ) ); } #endif switch (stack->bg.mode) { case DLBM_COLOR: { CoreSurface *dest = state->destination; DFBColor *color = &stack->bg.color; DFBRectangle rects[num]; /* Set the background color. */ if (DFB_PIXELFORMAT_IS_INDEXED( dest->config.format )) dfb_state_set_color_index( state, dfb_palette_search( dest->palette, color->r, color->g, color->b, color->a ) ); else dfb_state_set_color( state, color ); for (i=0; i<num; i++) dfb_rectangle_from_region( &rects[i], &updates[i] ); /* Simply fill the background. */ dfb_gfxcard_fillrectangles( rects, num, state ); break; } case DLBM_IMAGE: { CoreSurface *bg = stack->bg.image; /* Set blitting source. */ state->source = bg; state->modified |= SMF_SOURCE; /* Set blitting flags. */ dfb_state_set_blitting_flags( state, DSBLIT_NOFX ); /* Check the size of the background image. */ if (bg->config.size.w == stack->width && bg->config.size.h == stack->height) { for (i=0; i<num; i++) { DFBRectangle dst = DFB_RECTANGLE_INIT_FROM_REGION( &updates[i] ); /* Simple blit for 100% fitting background image. */ dfb_gfxcard_blit( &dst, dst.x, dst.y, state ); } } else { DFBRegion clip = state->clip; for (i=0; i<num; i++) { DFBRectangle src = { 0, 0, bg->config.size.w, bg->config.size.h }; DFBRectangle dst = { 0, 0, stack->width, stack->height }; /* Change clipping region. */ dfb_state_set_clip( state, &updates[i] ); /* Stretch blit for non fitting background images. */ dfb_gfxcard_stretchblit( &src, &dst, state ); } /* Restore clipping region. */ dfb_state_set_clip( state, &clip ); } /* Reset blitting source. */ state->source = NULL; state->modified |= SMF_SOURCE; break; } case DLBM_TILE: { CoreSurface *bg = stack->bg.image; DFBRegion clip = state->clip; /* Set blitting source. */ state->source = bg; state->modified |= SMF_SOURCE; /* Set blitting flags. */ dfb_state_set_blitting_flags( state, DSBLIT_NOFX ); for (i=0; i<num; i++) { DFBRectangle src = { 0, 0, bg->config.size.w, bg->config.size.h }; /* Change clipping region. */ dfb_state_set_clip( state, &updates[i] ); /* Tiled blit (aligned). */ dfb_gfxcard_tileblit( &src, 0, 0, stack->width, stack->height, state ); } /* Restore clipping region. */ dfb_state_set_clip( state, &clip ); /* Reset blitting source. */ state->source = NULL; state->modified |= SMF_SOURCE; break; } case DLBM_DONTCARE: break; default: D_BUG( "unknown background mode" ); break; } }
static DFBResult x11Read( CoreSurfacePool *pool, void *pool_data, void *pool_local, CoreSurfaceAllocation *allocation, void *alloc_data, void *destination, int pitch, const DFBRectangle *rect ) { XImage *image; XImage *sub; x11PoolLocalData *local = pool_local; x11AllocationData *alloc = alloc_data; DFBX11 *x11 = local->x11; D_DEBUG_AT( X11_Surfaces, "%s( %p )\n", __FUNCTION__, allocation ); D_DEBUG_AT( X11_Surfaces, " -> allocation: %s\n", ToString_CoreSurfaceAllocation( allocation ) ); D_MAGIC_ASSERT( pool, CoreSurfacePool ); D_MAGIC_ASSERT( allocation, CoreSurfaceAllocation ); D_ASSERT( destination != NULL ); D_ASSERT( pitch >= 0 ); DFB_RECTANGLE_ASSERT( rect ); D_DEBUG_AT( X11_Surfaces, " => %p 0x%08lx [%4d,%4d-%4dx%4d]\n", alloc, alloc->xid, DFB_RECTANGLE_VALS(rect) ); XLockDisplay( x11->display ); #if 1 image = XCreateImage( x11->display, alloc->visual, alloc->depth, ZPixmap, 0, destination, rect->w, rect->h, 32, pitch ); if (!image) { D_ERROR( "X11/Surfaces: XCreateImage( %dx%d, depth %d ) failed!\n", rect->w, rect->h, alloc->depth ); XUnlockDisplay( x11->display ); return DFB_FAILURE; } sub = XGetSubImage( x11->display, alloc->xid, rect->x, rect->y, rect->w, rect->h, ~0, ZPixmap, image, 0, 0 ); #else image = XGetImage( x11->display, alloc->window ? alloc->window : alloc->xid, rect->x, rect->y, rect->w, rect->h, ~0, ZPixmap ); #endif if (image) { // dfb_surface_buffer_dump_type_locked2( allocation->buffer, ".", "x11Read", false, image->data, image->bytes_per_line ); /* FIXME: Why the X-hell is XDestroyImage() freeing *MY* data? */ image->data = NULL; XDestroyImage( image ); } XUnlockDisplay( x11->display ); #if 1 if (!sub) { D_ERROR( "X11/Surfaces: XGetSubImage( %d,%d-%dx%d ) failed!\n", DFB_RECTANGLE_VALS(rect) ); return DFB_FAILURE; } #endif return DFB_OK; }
ColEdge(std::pair<int, int> p) : tar(p.first), col(p.second) { D_ASSERT(tar > 0); }
ColEdge(int _target, int _colour) : tar(_target), col(_colour) { D_ASSERT(tar > 0); }
/* * listen to the layer's surface */ ReactionResult _dfb_layer_region_surface_listener( const void *msg_data, void *ctx ) { CoreSurfaceNotificationFlags flags; CoreSurface *surface; CoreLayer *layer; CoreLayerShared *shared; DisplayLayerFuncs *funcs; const CoreSurfaceNotification *notification = msg_data; CoreLayerRegion *region = ctx; D_ASSERT( notification != NULL ); D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); D_DEBUG_AT( Core_Layers, "_dfb_layer_region_surface_listener( %p, %p ) <- 0x%08x\n", notification, region, notification->flags ); D_ASSERT( notification->surface != NULL ); D_ASSUME( notification->surface == region->surface ); if (notification->surface != region->surface) return RS_OK; layer = dfb_layer_at( region->context->layer_id ); D_ASSERT( layer != NULL ); D_ASSERT( layer->funcs != NULL ); D_ASSERT( layer->funcs->SetRegion != NULL ); D_ASSERT( layer->shared != NULL ); funcs = layer->funcs; shared = layer->shared; flags = notification->flags; surface = notification->surface; if (flags & CSNF_DESTROY) { D_WARN( "layer region surface destroyed" ); region->surface = NULL; return RS_REMOVE; } if (dfb_layer_region_lock( region )) return RS_OK; if (D_FLAGS_ARE_SET( region->state, CLRSF_REALIZED | CLRSF_CONFIGURED )) { if (D_FLAGS_IS_SET( flags, CSNF_PALETTE_CHANGE | CSNF_PALETTE_UPDATE )) { if (surface->palette) funcs->SetRegion( layer, layer->driver_data, layer->layer_data, region->region_data, ®ion->config, CLRCF_PALETTE, surface, surface->palette ); } if ((flags & CSNF_FIELD) && funcs->SetInputField) funcs->SetInputField( layer, layer->driver_data, layer->layer_data, region->region_data, surface->field ); if ((flags & CSNF_ALPHA_RAMP) && (shared->description.caps & DLCAPS_ALPHA_RAMP)) { region->config.alpha_ramp[0] = surface->alpha_ramp[0]; region->config.alpha_ramp[1] = surface->alpha_ramp[1]; region->config.alpha_ramp[2] = surface->alpha_ramp[2]; region->config.alpha_ramp[3] = surface->alpha_ramp[3]; funcs->SetRegion( layer, layer->driver_data, layer->layer_data, region->region_data, ®ion->config, CLRCF_ALPHA_RAMP, surface, surface->palette ); } } dfb_layer_region_unlock( region ); return RS_OK; }
unsigned int dfb_colorhash_lookup( DFBColorHashCore *core, CorePalette *palette, u8 r, u8 g, u8 b, u8 a ) { unsigned int pixel = PIXEL_ARGB(a, r, g, b); unsigned int index = (pixel ^ (unsigned long) palette) % HASH_SIZE; DFBColorHashCoreShared *shared; // D_ASSUME( core != NULL ); if (core) { D_MAGIC_ASSERT( core, DFBColorHashCore ); D_MAGIC_ASSERT( core->shared, DFBColorHashCoreShared ); } else core = core_colorhash; shared = core->shared; D_ASSERT( shared->hash != NULL ); fusion_skirmish_prevail( &shared->hash_lock ); /* try a lookup in the hash table */ if (shared->hash[index].palette == palette && shared->hash[index].pixel == pixel) { /* set the return value */ index = shared->hash[index].index; } else { /* look for the closest match */ DFBColor *entries = palette->entries; int min_diff = 0; unsigned int i, min_index = 0; for (i = 0; i < palette->num_entries; i++) { int diff; int r_diff = (int) entries[i].r - (int) r; int g_diff = (int) entries[i].g - (int) g; int b_diff = (int) entries[i].b - (int) b; int a_diff = (int) entries[i].a - (int) a; if (a) diff = (r_diff * r_diff + g_diff * g_diff + b_diff * b_diff + ((a_diff * a_diff) >> 6)); else diff = (r_diff + g_diff + b_diff + (a_diff * a_diff)); if (i == 0 || diff < min_diff) { min_diff = diff; min_index = i; } if (!diff) break; } /* store the matching entry in the hash table */ shared->hash[index].pixel = pixel; shared->hash[index].index = min_index; shared->hash[index].palette = palette; /* set the return value */ index = min_index; }
static DFBResult realize_region( CoreLayerRegion *region ) { DFBResult ret; CoreLayer *layer; CoreLayerShared *shared; DisplayLayerFuncs *funcs; D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); D_ASSERT( D_FLAGS_IS_SET( region->state, CLRSF_CONFIGURED ) ); D_ASSERT( ! D_FLAGS_IS_SET( region->state, CLRSF_REALIZED ) ); layer = dfb_layer_at( region->context->layer_id ); D_ASSERT( layer != NULL ); D_ASSERT( layer->shared != NULL ); D_ASSERT( layer->funcs != NULL ); shared = layer->shared; funcs = layer->funcs; D_ASSERT( ! fusion_vector_contains( &shared->added_regions, region ) ); /* Allocate the driver's region data. */ if (funcs->RegionDataSize) { int size = funcs->RegionDataSize(); if (size > 0) { region->region_data = SHCALLOC( 1, size ); if (!region->region_data) return D_OOSHM(); } } D_DEBUG_AT( Core_Layers, "Adding region (%d, %d - %dx%d) to '%s'.\n", DFB_RECTANGLE_VALS( ®ion->config.dest ), shared->description.name ); /* Add the region to the driver. */ if (funcs->AddRegion) { ret = funcs->AddRegion( layer, layer->driver_data, layer->layer_data, region->region_data, ®ion->config ); if (ret) { D_DERROR( ret, "Core/Layers: Could not add region!\n" ); if (region->region_data) { SHFREE( region->region_data ); region->region_data = NULL; } return ret; } } /* Add the region to the 'added' list. */ fusion_vector_add( &shared->added_regions, region ); /* Update the region's state. */ D_FLAGS_SET( region->state, CLRSF_REALIZED ); /* Initially setup hardware. */ ret = set_region( region, ®ion->config, CLRCF_ALL, region->surface ); if (ret) { unrealize_region( region ); return ret; } return DFB_OK; }
static DFBResult system_initialize( CoreDFB *core, void **data ) { int i, n; CoreScreen *screen; D_ASSERT( dfb_x11 == NULL ); dfb_x11 = (DFBX11*) SHCALLOC( dfb_core_shmpool(core), 1, sizeof(DFBX11) ); if (!dfb_x11) { D_ERROR( "DirectFB/X11: Couldn't allocate shared memory!\n" ); return D_OOSHM(); } dfb_x11_core = core; fusion_skirmish_init( &dfb_x11->lock, "X11 System", dfb_core_world(core) ); fusion_call_init( &dfb_x11->call, call_handler, NULL, dfb_core_world(core) ); dfb_surface_pool_initialize( core, &x11SurfacePoolFuncs, &dfb_x11->surface_pool ); screen = dfb_screens_register( NULL, NULL, &x11PrimaryScreenFuncs ); dfb_layers_register( screen, NULL, &x11PrimaryLayerFuncs ); fusion_arena_add_shared_field( dfb_core_arena( core ), "x11", dfb_x11 ); *data = dfb_x11; XInitThreads(); dfb_x11->data_shmpool = dfb_core_shmpool_data( core ); dfb_x11->display = XOpenDisplay(getenv("DISPLAY")); if (!dfb_x11->display) { D_ERROR("X11: Error in XOpenDisplay for '%s'\n", getenv("DISPLAY")); return DFB_INIT; } dfb_x11->screenptr = DefaultScreenOfDisplay(dfb_x11->display); dfb_x11->screennum = DefaultScreen(dfb_x11->display); for (i=0; i<dfb_x11->screenptr->ndepths; i++) { const Depth *depth = &dfb_x11->screenptr->depths[i]; for (n=0; n<depth->nvisuals; n++) { Visual *visual = &depth->visuals[n]; D_DEBUG( "X11/Visual: ID %02lu, depth %d, red 0x%06lx, green 0x%06lx, blue 0x%06lx, %d bits/rgb, %d entries\n", visual->visualid, depth->depth, visual->red_mask, visual->green_mask, visual->blue_mask, visual->bits_per_rgb, visual->map_entries ); switch (depth->depth) { case 24: if (visual->red_mask == 0xff0000 && visual->green_mask == 0x00ff00 && visual->blue_mask == 0x0000ff) { dfb_x11->visuals[DFB_PIXELFORMAT_INDEX(DSPF_RGB32)] = visual; dfb_x11->visuals[DFB_PIXELFORMAT_INDEX(DSPF_ARGB)] = visual; } break; case 16: if (visual->red_mask == 0xf800 && visual->green_mask == 0x07e0 && visual->blue_mask == 0x001f) dfb_x11->visuals[DFB_PIXELFORMAT_INDEX(DSPF_RGB16)] = visual; break; case 15: if (visual->red_mask == 0x7c00 && visual->green_mask == 0x03e0 && visual->blue_mask == 0x001f) dfb_x11->visuals[DFB_PIXELFORMAT_INDEX(DSPF_RGB555)] = visual; break; } } } return DFB_OK; }
/* HACK: implementation dumped in here for now, will move into context */ static DFBResult wm_update_cursor( CoreWindowStack *stack, void *wm_data, void *stack_data, CoreCursorUpdateFlags flags ) { DFBResult ret; DFBRegion old_region; WMData *wmdata = wm_data; StackData *data = stack_data; bool restored = false; CoreLayer *layer; CoreLayerRegion *region; CardState *state; CoreSurface *surface; UniqueContext *context; D_ASSERT( stack != NULL ); D_ASSERT( stack->context != NULL ); D_ASSERT( wm_data != NULL ); D_ASSERT( stack_data != NULL ); D_MAGIC_ASSERT( data, StackData ); context = data->context; D_MAGIC_ASSERT( context, UniqueContext ); /* Optimize case of invisible cursor moving. */ if (!(flags & ~(CCUF_POSITION | CCUF_SHAPE)) && (!stack->cursor.opacity || !stack->cursor.enabled)) { context->cursor_bs_valid = false; return DFB_OK; } layer = dfb_layer_at( context->layer_id ); state = &layer->state; region = context->region; surface = context->surface; D_ASSERT( region != NULL ); D_ASSERT( surface != NULL ); if (flags & CCUF_ENABLE) { CoreSurface *cursor_bs; DFBSurfaceCapabilities caps = DSCAPS_NONE; dfb_surface_caps_apply_policy( stack->cursor.policy, &caps ); D_ASSERT( context->cursor_bs == NULL ); /* Create the cursor backing store surface. */ ret = dfb_surface_create_simple( wmdata->core, stack->cursor.size.w, stack->cursor.size.h, region->config.format, region->config.colorspace, caps, CSTF_SHARED | CSTF_CURSOR, 0, /* FIXME: no shared cursor objects, no cursor id */ NULL, &cursor_bs ); if (ret) { D_ERROR( "WM/Default: Failed creating backing store for cursor!\n" ); return ret; } ret = dfb_surface_globalize( cursor_bs ); D_ASSERT( ret == DFB_OK ); /* Ensure valid back buffer for now. * FIXME: Keep a flag to know when back/front have been swapped and need a sync. */ switch (region->config.buffermode) { case DLBM_BACKVIDEO: case DLBM_TRIPLE: dfb_gfx_copy( surface, surface, NULL ); break; default: break; } context->cursor_bs = cursor_bs; } else { D_ASSERT( context->cursor_bs != NULL ); /* restore region under cursor */ if (context->cursor_drawn) { DFBRectangle rect = { 0, 0, context->cursor_region.x2 - context->cursor_region.x1 + 1, context->cursor_region.y2 - context->cursor_region.y1 + 1 }; D_ASSERT( stack->cursor.opacity || (flags & CCUF_OPACITY) ); D_ASSERT( context->cursor_bs_valid ); dfb_gfx_copy_to( context->cursor_bs, surface, &rect, context->cursor_region.x1, context->cursor_region.y1, false ); context->cursor_drawn = false; old_region = context->cursor_region; restored = true; } if (flags & CCUF_SIZE) { ret = dfb_surface_reformat( context->cursor_bs, stack->cursor.size.w, stack->cursor.size.h, context->cursor_bs->config.format ); if (ret) { D_ERROR( "WM/Default: Failed resizing backing store for cursor!\n" ); return ret; } } } if (flags & (CCUF_ENABLE | CCUF_POSITION | CCUF_SIZE | CCUF_OPACITY)) { context->cursor_bs_valid = false; context->cursor_region.x1 = stack->cursor.x - stack->cursor.hot.x; context->cursor_region.y1 = stack->cursor.y - stack->cursor.hot.y; context->cursor_region.x2 = context->cursor_region.x1 + stack->cursor.size.w - 1; context->cursor_region.y2 = context->cursor_region.y1 + stack->cursor.size.h - 1; if (!dfb_region_intersect( &context->cursor_region, 0, 0, stack->width - 1, stack->height - 1 )) { D_BUG( "invalid cursor region" ); return DFB_BUG; } } D_ASSERT( context->cursor_bs != NULL ); if (flags & CCUF_DISABLE) { dfb_surface_unlink( &context->cursor_bs ); } else if (stack->cursor.opacity) { /* backup region under cursor */ if (!context->cursor_bs_valid) { DFBRectangle rect = DFB_RECTANGLE_INIT_FROM_REGION( &context->cursor_region ); D_ASSERT( !context->cursor_drawn ); /* FIXME: this requires using blitted flipping all the time, but fixing it seems impossible, for now DSFLIP_BLIT is forced in repaint_stack() when the cursor is enabled. */ dfb_gfx_copy_to( surface, context->cursor_bs, &rect, 0, 0, true ); context->cursor_bs_valid = true; } /* Set destination. */ state->destination = surface; state->modified |= SMF_DESTINATION; /* Set clipping region. */ dfb_state_set_clip( state, &context->cursor_region ); /* draw cursor */ unique_draw_cursor( stack, context, state, &context->cursor_region ); /* Reset destination. */ state->destination = NULL; state->modified |= SMF_DESTINATION; context->cursor_drawn = true; if (restored) { if (dfb_region_region_intersects( &old_region, &context->cursor_region )) dfb_region_region_union( &old_region, &context->cursor_region ); else dfb_layer_region_flip_update( region, &context->cursor_region, DSFLIP_BLIT ); dfb_layer_region_flip_update( region, &old_region, DSFLIP_BLIT ); } else dfb_layer_region_flip_update( region, &context->cursor_region, DSFLIP_BLIT ); } else if (restored) dfb_layer_region_flip_update( region, &old_region, DSFLIP_BLIT ); return DFB_OK; }
static DirectResult init_pool( FusionSHM *shm, FusionSHMPool *pool, FusionSHMPoolShared *shared, const char *name, unsigned int max_size, bool debug ) { DirectResult ret; int size; FusionWorld *world; FusionSHMPoolNew pool_new = { .pool_id = 0 }; FusionSHMPoolAttach pool_attach = { .pool_id = 0 }; FusionEntryInfo info; char buf[FUSION_SHM_TMPFS_PATH_NAME_LEN + 32]; D_DEBUG_AT( Fusion_SHMPool, "%s( %p, %p, %p, '%s', %d, %sdebug )\n", __FUNCTION__, shm, pool, shared, name, max_size, debug ? "" : "non-" ); D_MAGIC_ASSERT( shm, FusionSHM ); D_MAGIC_ASSERT( shm->shared, FusionSHMShared ); D_ASSERT( name != NULL ); D_ASSERT( max_size > sizeof(shmalloc_heap) ); world = shm->world; D_MAGIC_ASSERT( world, FusionWorld ); D_ASSERT( pool != NULL ); D_ASSERT( shared != NULL ); /* Fill out information for new pool. */ pool_new.max_size = max_size; pool_new.max_size += BLOCKALIGN(sizeof(shmalloc_heap)) + BLOCKALIGN( (max_size + BLOCKSIZE-1) / BLOCKSIZE * sizeof(shmalloc_info) ); /* Create the new pool. */ while (ioctl( world->fusion_fd, FUSION_SHMPOOL_NEW, &pool_new )) { if (errno == EINTR) continue; D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_NEW failed!\n" ); return DR_FUSION; } /* Set the pool info. */ info.type = FT_SHMPOOL; info.id = pool_new.pool_id; snprintf( info.name, sizeof(info.name), "%s", name ); ioctl( world->fusion_fd, FUSION_ENTRY_SET_INFO, &info ); fusion_entry_add_permissions( world, FT_SHMPOOL, pool_new.pool_id, 0, FUSION_SHMPOOL_ATTACH, FUSION_SHMPOOL_DETACH, 0 ); /* Set pool to attach to. */ pool_attach.pool_id = pool_new.pool_id; /* Attach to the pool. */ while (ioctl( world->fusion_fd, FUSION_SHMPOOL_ATTACH, &pool_attach )) { if (errno == EINTR) continue; D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_ATTACH failed!\n" ); while (ioctl( world->fusion_fd, FUSION_SHMPOOL_DESTROY, &shared->pool_id )) { if (errno != EINTR) { D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_DESTROY failed!\n" ); break; } } return DR_FUSION; } /* Generate filename. */ snprintf( buf, sizeof(buf), "%s/fusion.%d.%d", shm->shared->tmpfs, fusion_world_index( shm->world ), pool_new.pool_id ); /* Initialize the heap. */ ret = __shmalloc_init_heap( shm, buf, pool_new.addr_base, max_size, &size ); if (ret) { while (ioctl( world->fusion_fd, FUSION_SHMPOOL_DESTROY, &shared->pool_id )) { if (errno != EINTR) { D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_DESTROY failed!\n" ); break; } } return ret; } /* Initialize local data. */ pool->attached = true; pool->shm = shm; pool->shared = shared; pool->pool_id = pool_new.pool_id; pool->filename = D_STRDUP( buf ); /* Initialize shared data. */ shared->active = true; shared->debug = debug; shared->shm = shm->shared; shared->max_size = pool_new.max_size; shared->pool_id = pool_new.pool_id; shared->addr_base = pool_new.addr_base; shared->heap = pool_new.addr_base; shared->heap->pool = shared; fusion_skirmish_init2( &shared->lock, name, world, fusion_config->secure_fusion ); D_MAGIC_SET( pool, FusionSHMPool ); D_MAGIC_SET( shared, FusionSHMPoolShared ); shared->name = SHSTRDUP( shared, name ); return DR_OK; } static DirectResult join_pool( FusionSHM *shm, FusionSHMPool *pool, FusionSHMPoolShared *shared ) { DirectResult ret; FusionWorld *world; FusionSHMPoolAttach pool_attach = { .pool_id = 0 }; char buf[FUSION_SHM_TMPFS_PATH_NAME_LEN + 32]; D_DEBUG_AT( Fusion_SHMPool, "%s( %p, %p, %p )\n", __FUNCTION__, shm, pool, shared ); D_MAGIC_ASSERT( shm, FusionSHM ); D_MAGIC_ASSERT( shm->shared, FusionSHMShared ); D_MAGIC_ASSERT( shared, FusionSHMPoolShared ); #if !DIRECT_BUILD_DEBUGS if (shared->debug) { D_ERROR( "Fusion/SHM: Can't join debug enabled pool with pure-release library!\n" ); return DR_UNSUPPORTED; } #endif world = shm->world; D_MAGIC_ASSERT( world, FusionWorld ); /* Set pool to attach to. */ pool_attach.pool_id = shared->pool_id; /* Attach to the pool. */ while (ioctl( world->fusion_fd, FUSION_SHMPOOL_ATTACH, &pool_attach )) { if (errno == EINTR) continue; D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_ATTACH failed!\n" ); return DR_FUSION; } /* Generate filename. */ snprintf( buf, sizeof(buf), "%s/fusion.%d.%d", shm->shared->tmpfs, fusion_world_index( shm->world ), shared->pool_id ); /* Join the heap. */ ret = __shmalloc_join_heap( shm, buf, pool_attach.addr_base, shared->max_size, !fusion_config->secure_fusion ); if (ret) { while (ioctl( world->fusion_fd, FUSION_SHMPOOL_DETACH, &shared->pool_id )) { if (errno != EINTR) { D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_DETACH failed!\n" ); break; } } return ret; } /* Initialize local data. */ pool->attached = true; pool->shm = shm; pool->shared = shared; pool->pool_id = shared->pool_id; pool->filename = D_STRDUP( buf ); D_MAGIC_SET( pool, FusionSHMPool ); return DR_OK; } static void leave_pool( FusionSHM *shm, FusionSHMPool *pool, FusionSHMPoolShared *shared ) { FusionWorld *world; D_DEBUG_AT( Fusion_SHMPool, "%s( %p, %p, %p )\n", __FUNCTION__, shm, pool, shared ); D_MAGIC_ASSERT( shm, FusionSHM ); D_MAGIC_ASSERT( pool, FusionSHMPool ); D_MAGIC_ASSERT( shared, FusionSHMPoolShared ); world = shm->world; D_MAGIC_ASSERT( world, FusionWorld ); while (ioctl( world->fusion_fd, FUSION_SHMPOOL_DETACH, &shared->pool_id )) { if (errno != EINTR) { D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_DETACH failed!\n" ); break; } } if (munmap( shared->addr_base, shared->max_size )) D_PERROR( "Fusion/SHM: Could not munmap shared memory file '%s'!\n", pool->filename ); pool->attached = false; D_FREE( pool->filename ); D_MAGIC_CLEAR( pool ); } static void shutdown_pool( FusionSHM *shm, FusionSHMPool *pool, FusionSHMPoolShared *shared ) { FusionWorld *world; D_DEBUG_AT( Fusion_SHMPool, "%s( %p, %p, %p )\n", __FUNCTION__, shm, pool, shared ); D_MAGIC_ASSERT( shm, FusionSHM ); D_MAGIC_ASSERT( pool, FusionSHMPool ); D_MAGIC_ASSERT( shared, FusionSHMPoolShared ); world = shm->world; D_MAGIC_ASSERT( world, FusionWorld ); SHFREE( shared, shared->name ); fusion_dbg_print_memleaks( shared ); while (ioctl( world->fusion_fd, FUSION_SHMPOOL_DESTROY, &shared->pool_id )) { if (errno != EINTR) { D_PERROR( "Fusion/SHM: FUSION_SHMPOOL_DESTROY failed!\n" ); break; } } if (munmap( shared->addr_base, shared->max_size )) D_PERROR( "Fusion/SHM: Could not munmap shared memory file '%s'!\n", pool->filename ); if (unlink( pool->filename )) D_PERROR( "Fusion/SHM: Could not unlink shared memory file '%s'!\n", pool->filename ); shared->active = false; pool->attached = false; D_FREE( pool->filename ); D_MAGIC_CLEAR( pool ); fusion_skirmish_destroy( &shared->lock ); D_MAGIC_CLEAR( shared ); }
static void * direct_thread_main( void *arg ) { void *ret; DirectThread *thread = arg; prctl( PR_SET_NAME, thread->name, 0, 0, 0 ); pthread_setspecific( thread_key, thread ); D_DEBUG_AT( Direct_ThreadInit, "%s( %p )\n", __FUNCTION__, arg ); D_DEBUG_AT( Direct_ThreadInit, " -> starting...\n" ); D_MAGIC_ASSERT( thread, DirectThread ); pthread_cleanup_push( direct_thread_cleanup, thread ); thread->tid = direct_gettid(); D_DEBUG_AT( Direct_ThreadInit, " -> tid %d\n", thread->tid ); __D_direct_thread_call_init_handlers( thread ); /* Have all signals handled by the main thread. */ if (direct_config->thread_block_signals) direct_signals_block_all(); /* Lock the thread mutex. */ D_DEBUG_AT( Direct_ThreadInit, " -> locking...\n" ); direct_mutex_lock( &thread->lock ); /* Indicate that our initialization has completed. */ D_ASSERT( !thread->init ); thread->init = true; D_DEBUG_AT( Direct_ThreadInit, " -> signalling...\n" ); direct_waitqueue_signal( &thread->cond ); /* Unlock the thread mutex. */ D_DEBUG_AT( Direct_ThreadInit, " -> unlocking...\n" ); direct_mutex_unlock( &thread->lock ); if (thread->joining) { D_DEBUG_AT( Direct_Thread, " -> Being joined before entering main routine!\n" ); return NULL; } D_MAGIC_ASSERT( thread, DirectThread ); /* Call main routine. */ D_DEBUG_AT( Direct_ThreadInit, " -> running...\n" ); ret = thread->main( thread, thread->arg ); D_DEBUG_AT( Direct_Thread, " -> Returning %p from '%s' (%s, %d)...\n", ret, thread->name, direct_thread_type_name(thread->type), thread->tid ); D_MAGIC_ASSERT( thread, DirectThread ); pthread_cleanup_pop( 1 ); return ret; }
static DirectResult init_pool( FusionSHM *shm, FusionSHMPool *pool, FusionSHMPoolShared *shared, const char *name, unsigned int max_size, bool debug ) { DirectResult ret; int size; long page_size; int pool_id; unsigned int pool_max_size; void *pool_addr_base = NULL; FusionWorld *world; char buf[FUSION_SHM_TMPFS_PATH_NAME_LEN + 32]; D_DEBUG_AT( Fusion_SHMPool, "%s( %p, %p, %p, '%s', %d, %sdebug )\n", __FUNCTION__, shm, pool, shared, name, max_size, debug ? "" : "non-" ); D_MAGIC_ASSERT( shm, FusionSHM ); D_MAGIC_ASSERT( shm->shared, FusionSHMShared ); D_ASSERT( name != NULL ); D_ASSERT( max_size > sizeof(shmalloc_heap) ); world = shm->world; D_MAGIC_ASSERT( world, FusionWorld ); D_ASSERT( pool != NULL ); D_ASSERT( shared != NULL ); page_size = direct_pagesize(); pool_id = ++world->shared->pool_ids; pool_max_size = max_size + BLOCKALIGN(sizeof(shmalloc_heap)) + BLOCKALIGN( (max_size + BLOCKSIZE-1) / BLOCKSIZE * sizeof(shmalloc_info) ); pool_addr_base = world->shared->pool_base; world->shared->pool_base += ((pool_max_size + page_size - 1) & ~(page_size - 1)) + page_size; /* Exceeded limit? */ if (world->shared->pool_base > world->shared->pool_max) return DR_NOSHAREDMEMORY; /* Generate filename. */ snprintf( buf, sizeof(buf), "%s/fusion.%d.%d", shm->shared->tmpfs, fusion_world_index( world ), pool_id ); /* Initialize the heap. */ ret = __shmalloc_init_heap( shm, buf, pool_addr_base, max_size, &size ); if (ret) return ret; /* initialize local data. */ pool->attached = true; pool->shm = shm; pool->shared = shared; pool->pool_id = pool_id; pool->filename = D_STRDUP( buf ); /* Initialize shared data. */ shared->active = true; shared->debug = debug; shared->shm = shm->shared; shared->max_size = pool_max_size; shared->pool_id = pool_id; shared->addr_base = pool_addr_base; shared->heap = pool_addr_base; shared->heap->pool = shared; fusion_skirmish_init( &shared->lock, name, world ); D_MAGIC_SET( pool, FusionSHMPool ); D_MAGIC_SET( shared, FusionSHMPoolShared ); shared->name = SHSTRDUP( shared, name ); return DR_OK; }
DFBResult dfb_surface_pool_bridge_initialize( CoreDFB *core, const SurfacePoolBridgeFuncs *funcs, void *context, CoreSurfacePoolBridge **ret_bridge ) { DFBResult ret; CoreSurfacePoolBridge *bridge; FusionSHMPoolShared *shmpool; D_DEBUG_AT( Core_SurfPoolBridge, "%s( %p, %p )\n", __FUNCTION__, funcs, context ); D_ASSERT( core != NULL ); D_ASSERT( funcs != NULL ); D_ASSERT( ret_bridge != NULL ); /* Check against bridge limit. */ if (bridge_count == MAX_SURFACE_POOL_BRIDGES) { D_ERROR( "Core/SurfacePoolBridge: Maximum number of bridges (%d) reached!\n", MAX_SURFACE_POOL_BRIDGES ); return DFB_LIMITEXCEEDED; } D_ASSERT( bridge_funcs[bridge_count] == NULL ); shmpool = dfb_core_shmpool( core ); /* Allocate bridge structure. */ bridge = SHCALLOC( shmpool, 1, sizeof(CoreSurfacePoolBridge) ); if (!bridge) return D_OOSHM(); /* Assign a bridge ID. */ bridge->bridge_id = bridge_count++; /* Remember shared memory pool. */ bridge->shmpool = shmpool; /* Set function table of the bridge. */ bridge_funcs[bridge->bridge_id] = funcs; /* Add to global bridge list. */ bridge_array[bridge->bridge_id] = bridge; D_MAGIC_SET( bridge, CoreSurfacePoolBridge ); ret = init_bridge( core, bridge, funcs, context ); if (ret) { bridge_funcs[bridge->bridge_id] = NULL; bridge_array[bridge->bridge_id] = NULL; bridge_count--; D_MAGIC_CLEAR( bridge ); SHFREE( shmpool, bridge ); return ret; } /* Insert new bridge into priority order */ insert_bridge_local( bridge ); /* Return the new bridge. */ *ret_bridge = bridge; return DFB_OK; }
DirectResult fusion_shm_pool_create( FusionWorld *world, const char *name, unsigned int max_size, bool debug, FusionSHMPoolShared **ret_pool ) { int i; DirectResult ret; FusionSHM *shm; FusionSHMShared *shared; D_MAGIC_ASSERT( world, FusionWorld ); D_MAGIC_ASSERT( world->shared, FusionWorldShared ); D_ASSERT( name != NULL ); D_ASSERT( max_size > 0 ); D_ASSERT( ret_pool != NULL ); D_DEBUG_AT( Fusion_SHMPool, "%s( %p [%d], '%s', %d, %p, %sdebug )\n", __FUNCTION__, world, world->shared->world_index, name, max_size, ret_pool, debug ? "" : "non-" ); #if !DIRECT_BUILD_DEBUGS debug = false; #endif shm = &world->shm; D_MAGIC_ASSERT( shm, FusionSHM ); shared = shm->shared; D_MAGIC_ASSERT( shared, FusionSHMShared ); if (max_size < 8192) { D_ERROR( "Fusion/SHMPool: Maximum size (%d) should be 8192 at least!\n", max_size ); return DR_INVARG; } ret = fusion_skirmish_prevail( &shared->lock ); if (ret) goto error; if (shared->num_pools == FUSION_SHM_MAX_POOLS) { D_ERROR( "Fusion/SHMPool: Maximum number of pools (%d) already reached!\n", FUSION_SHM_MAX_POOLS ); ret = DR_LIMITEXCEEDED; goto error; } for (i=0; i<FUSION_SHM_MAX_POOLS; i++) { if (!shared->pools[i].active) break; D_MAGIC_ASSERT( &shared->pools[i], FusionSHMPoolShared ); D_MAGIC_ASSUME( &shm->pools[i], FusionSHMPool ); } D_ASSERT( i < FUSION_SHM_MAX_POOLS ); D_DEBUG_AT( Fusion_SHMPool, " -> index %d\n", i ); memset( &shm->pools[i], 0, sizeof(FusionSHMPool) ); memset( &shared->pools[i], 0, sizeof(FusionSHMPoolShared) ); shared->pools[i].index = i; ret = init_pool( shm, &shm->pools[i], &shared->pools[i], name, max_size, debug ); if (ret) goto error; shared->num_pools++; fusion_skirmish_dismiss( &shared->lock ); *ret_pool = &shared->pools[i]; D_DEBUG_AT( Fusion_SHMPool, " -> %p\n", *ret_pool ); return DR_OK; error: fusion_skirmish_dismiss( &shared->lock ); return ret; }
DFBResult dfb_surface_pool_bridges_transfer( CoreSurfaceBuffer *buffer, CoreSurfaceAllocation *from, CoreSurfaceAllocation *to, const DFBRectangle *rects, unsigned int num_rects ) { DFBResult ret; int i; DFBRectangle rect; CoreSurfacePoolBridge *bridge = NULL; const SurfacePoolBridgeFuncs *funcs; CoreSurfacePoolTransfer *transfer; D_MAGIC_ASSERT( buffer, CoreSurfaceBuffer ); CORE_SURFACE_ALLOCATION_ASSERT( from ); CORE_SURFACE_ALLOCATION_ASSERT( to ); D_ASSERT( rects != NULL || num_rects == 0 ); D_ASSERT( num_rects > 0 || rects == NULL ); D_DEBUG_AT( Core_SurfPoolBridge, "%s( %p [%dx%d %s], %p -> %p, %d rects )\n", __FUNCTION__, buffer, buffer->config.size.w, buffer->config.size.h, dfb_pixelformat_name( buffer->format ), from, to, num_rects ); if (!rects) { rect.x = rect.y = 0; rect.w = buffer->config.size.w; rect.h = buffer->config.size.h; rects = ▭ num_rects = 1; } for (i=0; i<bridge_count; i++) { D_ASSERT( bridge_order[i] >= 0 ); D_ASSERT( bridge_order[i] < bridge_count ); bridge = bridge_array[bridge_order[i]]; D_MAGIC_ASSERT( bridge, CoreSurfacePoolBridge ); funcs = get_funcs( bridge ); D_ASSERT( funcs->CheckTransfer != NULL ); ret = funcs->CheckTransfer( bridge, bridge->data, get_local(bridge), buffer, from, to ); if (ret) bridge = NULL; else break; } if (!bridge) return DFB_UNSUPPORTED; D_DEBUG_AT( Core_SurfPoolBridge, " -> using '%s'\n", bridge->desc.name ); ret = allocate_transfer( bridge, buffer, from, to, rects, num_rects, &transfer ); if (ret) return ret; D_ASSERT( funcs->StartTransfer != NULL ); D_DEBUG_AT( Core_SurfPoolBridge, " -> start...\n" ); ret = funcs->StartTransfer( bridge, bridge->data, get_local(bridge), transfer, transfer->data ); if (ret) D_DERROR( ret, "Core/SurfacePoolBridge: Starting transfer via '%s' failed!\n", bridge->desc.name ); else if (funcs->FinishTransfer) { D_DEBUG_AT( Core_SurfPoolBridge, " -> finish...\n" ); ret = funcs->FinishTransfer( bridge, bridge->data, get_local(bridge), transfer, transfer->data ); if (ret) D_DERROR( ret, "Core/SurfacePoolBridge: Finishing transfer via '%s' failed!\n", bridge->desc.name ); } D_DEBUG_AT( Core_SurfPoolBridge, " => %s\n", DirectResultString(ret) ); deallocate_transfer( transfer ); return ret; }
DFBResult dfb_layer_region_flip_update( CoreLayerRegion *region, const DFBRegion *update, DFBSurfaceFlipFlags flags ) { DFBResult ret = DFB_OK; CoreLayer *layer; CoreLayerContext *context; CoreSurface *surface; SurfaceBuffer *buffer; DisplayLayerFuncs *funcs; if (update) D_DEBUG_AT( Core_Layers, "dfb_layer_region_flip_update( %p, %p, 0x%08x ) <- [%d, %d - %dx%d]\n", region, update, flags, DFB_RECTANGLE_VALS_FROM_REGION( update ) ); else D_DEBUG_AT( Core_Layers, "dfb_layer_region_flip_update( %p, %p, 0x%08x )\n", region, update, flags ); D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); /* Lock the region. */ if (dfb_layer_region_lock( region )) return DFB_FUSION; D_ASSUME( region->surface != NULL ); /* Check for NULL surface. */ if (!region->surface) { D_DEBUG_AT( Core_Layers, " -> No surface => no update!\n" ); dfb_layer_region_unlock( region ); return DFB_UNSUPPORTED; } context = region->context; surface = region->surface; buffer = surface->back_buffer; layer = dfb_layer_at( context->layer_id ); D_ASSERT( layer->funcs != NULL ); funcs = layer->funcs; /* Depending on the buffer mode... */ switch (region->config.buffermode) { case DLBM_TRIPLE: case DLBM_BACKVIDEO: /* Check if simply swapping the buffers is possible... */ if (!(flags & DSFLIP_BLIT) && (!update || (update->x1 == 0 && update->y1 == 0 && update->x2 == surface->width - 1 && update->y2 == surface->height - 1))) { D_DEBUG_AT( Core_Layers, " -> Going to swap buffers...\n" ); /* Use the driver's routine if the region is realized. */ if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { D_ASSUME( funcs->FlipRegion != NULL ); D_DEBUG_AT( Core_Layers, " -> Waiting for pending writes...\n" ); if (buffer->video.access & VAF_HARDWARE_WRITE) { dfb_gfxcard_wait_serial( &buffer->video.serial ); buffer->video.access &= ~VAF_HARDWARE_WRITE; } D_DEBUG_AT( Core_Layers, " -> Flipping region using driver...\n" ); if (funcs->FlipRegion) ret = funcs->FlipRegion( layer, layer->driver_data, layer->layer_data, region->region_data, surface, flags ); } else { D_DEBUG_AT( Core_Layers, " -> Flipping region not using driver...\n" ); /* Just do the hardware independent work. */ dfb_surface_flip_buffers( surface, false ); } break; } /* fall through */ case DLBM_BACKSYSTEM: D_DEBUG_AT( Core_Layers, " -> Going to copy portion...\n" ); if ((flags & DSFLIP_WAITFORSYNC) == DSFLIP_WAITFORSYNC) { D_DEBUG_AT( Core_Layers, " -> Waiting for VSync...\n" ); dfb_layer_wait_vsync( layer ); } D_DEBUG_AT( Core_Layers, " -> Copying content from back to front buffer...\n" ); /* ...or copy updated contents from back to front buffer. */ dfb_back_to_front_copy( surface, update ); if ((flags & DSFLIP_WAITFORSYNC) == DSFLIP_WAIT) { D_DEBUG_AT( Core_Layers, " -> Waiting for VSync...\n" ); dfb_layer_wait_vsync( layer ); } /* fall through */ case DLBM_FRONTONLY: /* Tell the driver about the update if the region is realized. */ if (funcs->UpdateRegion && D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { D_DEBUG_AT( Core_Layers, " -> Notifying driver about updated content...\n" ); ret = funcs->UpdateRegion( layer, layer->driver_data, layer->layer_data, region->region_data, surface, update ); } break; default: D_BUG("unknown buffer mode"); ret = DFB_BUG; } D_DEBUG_AT( Core_Layers, " -> done.\n" ); /* Unlock the region. */ dfb_layer_region_unlock( region ); return ret; }
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, sector_t sector, int rw) { int logical_block_size, mask, ok; int offset = 0; struct page *iop = mdev->md_io_page; D_ASSERT(mutex_is_locked(&mdev->md_io_mutex)); BUG_ON(!bdev->md_bdev); logical_block_size = bdev_logical_block_size(bdev->md_bdev); if (logical_block_size == 0) logical_block_size = MD_SECTOR_SIZE; /* in case logical_block_size != 512 [ s390 only? ] */ if (logical_block_size != MD_SECTOR_SIZE) { mask = (logical_block_size / MD_SECTOR_SIZE) - 1; D_ASSERT(mask == 1 || mask == 3 || mask == 7); D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE); offset = sector & mask; sector = sector & ~mask; iop = mdev->md_io_tmpp; if (rw & WRITE) { /* these are GFP_KERNEL pages, pre-allocated * on device initialization */ void *p = page_address(mdev->md_io_page); void *hp = page_address(mdev->md_io_tmpp); ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, READ, logical_block_size); if (unlikely(!ok)) { dev_err(DEV, "drbd_md_sync_page_io(,%llus," "READ [logical_block_size!=512]) failed!\n", (unsigned long long)sector); return 0; } memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE); } } if (sector < drbd_md_first_sector(bdev) || sector > drbd_md_last_sector(bdev)) dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n", current->comm, current->pid, __func__, (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size); if (unlikely(!ok)) { dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n", (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); return 0; } if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) { void *p = page_address(mdev->md_io_page); void *hp = page_address(mdev->md_io_tmpp); memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE); } return ok; }
DFBResult dfb_layer_region_set_configuration( CoreLayerRegion *region, CoreLayerRegionConfig *config, CoreLayerRegionConfigFlags flags ) { DFBResult ret; CoreLayer *layer; DisplayLayerFuncs *funcs; CoreLayerRegionConfig new_config; D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); D_ASSERT( config != NULL ); D_ASSERT( config->buffermode != DLBM_WINDOWS ); D_ASSERT( (flags == CLRCF_ALL) || (region->state & CLRSF_CONFIGURED) ); D_ASSUME( flags != CLRCF_NONE ); D_ASSUME( ! (flags & ~CLRCF_ALL) ); layer = dfb_layer_at( region->context->layer_id ); D_ASSERT( layer != NULL ); D_ASSERT( layer->funcs != NULL ); D_ASSERT( layer->funcs->TestRegion != NULL ); funcs = layer->funcs; /* Lock the region. */ if (dfb_layer_region_lock( region )) return DFB_FUSION; /* Full configuration supplied? */ if (flags == CLRCF_ALL) { new_config = *config; } else { /* Use the current configuration. */ new_config = region->config; /* Update each modified entry. */ if (flags & CLRCF_WIDTH) new_config.width = config->width; if (flags & CLRCF_HEIGHT) new_config.height = config->height; if (flags & CLRCF_FORMAT) new_config.format = config->format; if (flags & CLRCF_SURFACE_CAPS) new_config.surface_caps = config->surface_caps; if (flags & CLRCF_BUFFERMODE) new_config.buffermode = config->buffermode; if (flags & CLRCF_OPTIONS) new_config.options = config->options; if (flags & CLRCF_SOURCE_ID) new_config.source_id = config->source_id; if (flags & CLRCF_SOURCE) new_config.source = config->source; if (flags & CLRCF_DEST) new_config.dest = config->dest; if (flags & CLRCF_OPACITY) new_config.opacity = config->opacity; if (flags & CLRCF_ALPHA_RAMP) { new_config.alpha_ramp[0] = config->alpha_ramp[0]; new_config.alpha_ramp[1] = config->alpha_ramp[1]; new_config.alpha_ramp[2] = config->alpha_ramp[2]; new_config.alpha_ramp[3] = config->alpha_ramp[3]; } if (flags & CLRCF_SRCKEY) new_config.src_key = config->src_key; if (flags & CLRCF_DSTKEY) new_config.dst_key = config->dst_key; if (flags & CLRCF_PARITY) new_config.parity = config->parity; } /* Check if the new configuration is supported. */ ret = funcs->TestRegion( layer, layer->driver_data, layer->layer_data, &new_config, NULL ); if (ret) { dfb_layer_region_unlock( region ); return ret; } /* Propagate new configuration to the driver if the region is realized. */ if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { ret = set_region( region, &new_config, flags, region->surface ); if (ret) { dfb_layer_region_unlock( region ); return ret; } } /* Update the region's current configuration. */ region->config = new_config; /* Update the region's state. */ D_FLAGS_SET( region->state, CLRSF_CONFIGURED ); /* Unlock the region. */ dfb_layer_region_unlock( region ); return DFB_OK; }
/** * drbd_al_read_log() - Restores the activity log from its on disk representation. * @mdev: DRBD device. * @bdev: Block device to read form. * * Returns 1 on success, returns 0 when reading the log failed due to IO errors. */ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) { struct al_transaction *buffer; int i; int rv; int mx; int active_extents = 0; int transactions = 0; int found_valid = 0; int from = 0; int to = 0; u32 from_tnr = 0; u32 to_tnr = 0; u32 cnr; mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT); /* lock out all other meta data io for now, * and make sure the page is mapped. */ mutex_lock(&mdev->md_io_mutex); buffer = page_address(mdev->md_io_page); /* Find the valid transaction in the log */ for (i = 0; i <= mx; i++) { rv = drbd_al_read_tr(mdev, bdev, buffer, i); if (rv == 0) continue; if (rv == -1) { mutex_unlock(&mdev->md_io_mutex); return 0; } cnr = be32_to_cpu(buffer->tr_number); if (++found_valid == 1) { from = i; to = i; from_tnr = cnr; to_tnr = cnr; continue; } if ((int)cnr - (int)from_tnr < 0) { D_ASSERT(from_tnr - cnr + i - from == mx+1); from = i; from_tnr = cnr; } if ((int)cnr - (int)to_tnr > 0) { D_ASSERT(cnr - to_tnr == i - to); to = i; to_tnr = cnr; } } if (!found_valid) { dev_warn(DEV, "No usable activity log found.\n"); mutex_unlock(&mdev->md_io_mutex); return 1; } /* Read the valid transactions. * dev_info(DEV, "Reading from %d to %d.\n",from,to); */ i = from; while (1) { int j, pos; unsigned int extent_nr; unsigned int trn; rv = drbd_al_read_tr(mdev, bdev, buffer, i); ERR_IF(rv == 0) goto cancel; if (rv == -1) { mutex_unlock(&mdev->md_io_mutex); return 0; } trn = be32_to_cpu(buffer->tr_number); spin_lock_irq(&mdev->al_lock); /* This loop runs backwards because in the cyclic elements there might be an old version of the updated element (in slot 0). So the element in slot 0 can overwrite old versions. */ for (j = AL_EXTENTS_PT; j >= 0; j--) { pos = be32_to_cpu(buffer->updates[j].pos); extent_nr = be32_to_cpu(buffer->updates[j].extent); if (extent_nr == LC_FREE) continue; lc_set(mdev->act_log, extent_nr, pos); active_extents++; } spin_unlock_irq(&mdev->al_lock); transactions++; cancel: if (i == to) break; i++; if (i > mx) i = 0; } mdev->al_tr_number = to_tnr+1; mdev->al_tr_pos = to; if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) mdev->al_tr_pos = 0; /* ok, we are done with it */ mutex_unlock(&mdev->md_io_mutex); dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n", transactions, active_extents); return 1; }
static void stab_df_hkey_gen(struct btr_instance *tins, daos_iov_t *key_iov, void *hkey) { D_ASSERT(key_iov->iov_len == sizeof(int)); memcpy(hkey, key_iov->iov_buf, key_iov->iov_len); }