/** * PWM configuration. * * @param ch operation channel * @return none */ void pwm_config(enum pwm_channel ch) { int mdl = pwm_channels[ch].channel; /* Disable PWM for module configuration */ pwm_enable(mdl, 0); /* Set PWM heartbeat mode is no heartbeat */ SET_FIELD(NPCX_PWMCTL(mdl), NPCX_PWMCTL_HB_DC_CTL_FIELD, NPCX_PWM_HBM_NORMAL); /* Select default CLK or LFCLK clock input to PWM module */ SET_FIELD(NPCX_PWMCTLEX(mdl), NPCX_PWMCTLEX_FCK_SEL_FIELD, NPCX_PWM_CLOCK_APB2_LFCLK); /* Set PWM polarity normal first */ CLEAR_BIT(NPCX_PWMCTL(mdl), NPCX_PWMCTL_INVP); /* Select PWM clock source */ UPDATE_BIT(NPCX_PWMCTL(mdl), NPCX_PWMCTL_CKSEL, (pwm_channels[ch].flags & PWM_CONFIG_DSLEEP_CLK)); /* Set PWM operation frequency */ pwm_set_freq(ch, pwm_channels[ch].freq, DUTY_CYCLE_RESOLUTION); }
static void gen8_upload_ds_state(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; const struct brw_stage_state *stage_state = &brw->tes.base; /* BRW_NEW_TESS_PROGRAMS */ bool active = brw->tess_eval_program; /* BRW_NEW_TES_PROG_DATA */ const struct brw_tes_prog_data *tes_prog_data = brw->tes.prog_data; const struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base; const struct brw_stage_prog_data *prog_data = &vue_prog_data->base; if (active) { BEGIN_BATCH(9); OUT_BATCH(_3DSTATE_DS << 16 | (9 - 2)); OUT_BATCH(stage_state->prog_offset); OUT_BATCH(0); OUT_BATCH(SET_FIELD(DIV_ROUND_UP(stage_state->sampler_count, 4), GEN7_DS_SAMPLER_COUNT) | SET_FIELD(prog_data->binding_table.size_bytes / 4, GEN7_DS_BINDING_TABLE_ENTRY_COUNT)); if (prog_data->total_scratch) { OUT_RELOC64(stage_state->scratch_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, ffs(prog_data->total_scratch) - 11); } else { OUT_BATCH(0); OUT_BATCH(0); } OUT_BATCH(SET_FIELD(prog_data->dispatch_grf_start_reg, GEN7_DS_DISPATCH_START_GRF) | SET_FIELD(vue_prog_data->urb_read_length, GEN7_DS_URB_READ_LENGTH)); OUT_BATCH(GEN7_DS_ENABLE | GEN7_DS_STATISTICS_ENABLE | (brw->max_ds_threads - 1) << HSW_DS_MAX_THREADS_SHIFT | (vue_prog_data->dispatch_mode == DISPATCH_MODE_SIMD8 ? GEN7_DS_SIMD8_DISPATCH_ENABLE : 0) | (tes_prog_data->domain == BRW_TESS_DOMAIN_TRI ? GEN7_DS_COMPUTE_W_COORDINATE_ENABLE : 0)); OUT_BATCH(SET_FIELD(ctx->Transform.ClipPlanesEnabled, GEN8_DS_USER_CLIP_DISTANCE)); ADVANCE_BATCH(); } else { BEGIN_BATCH(9); OUT_BATCH(_3DSTATE_DS << 16 | (9 - 2)); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); } brw->tes.enabled = active; }
int psb_sb_read(struct drm_device *dev, u32 reg, u32 *val) { int ret; ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000); if (ret) { DRM_ERROR("timeout waiting for SB to idle before read\n"); return ret; } REG_WRITE(SB_ADDR, reg); REG_WRITE(SB_PCKT, SET_FIELD(SB_OPCODE_READ, SB_OPCODE) | SET_FIELD(SB_DEST_DPLL, SB_DEST) | SET_FIELD(0xf, SB_BYTE_ENABLE)); ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000); if (ret) { DRM_ERROR("timeout waiting for SB to idle after read\n"); return ret; } *val = REG_READ(SB_DATA); return 0; }
bool Deserializer::String(const Ch* str, rapidjson::SizeType /*length*/, bool /*copy*/) { google::protobuf::FieldDescriptor const* field = _state.top(); google::protobuf::Message* message = _objectState.top(); switch (field->cpp_type()) { case google::protobuf::FieldDescriptor::CPPTYPE_ENUM: { google::protobuf::EnumValueDescriptor const* enumValue = field->enum_type()->FindValueByName(str); if (!enumValue) { _errors.push_back(Trinity::StringFormat("Field %s enum %s does not have a value named %s.", field->full_name().c_str(), field->enum_type()->full_name().c_str(), str)); return false; } SET_FIELD(message, field, Enum, enumValue); break; } case google::protobuf::FieldDescriptor::CPPTYPE_STRING: SET_FIELD(message, field, String, str); break; default: _errors.push_back(Trinity::StringFormat("Expected field type to be string or enum but got %s instead.", _state.top()->cpp_type_name())); return false; } return true; }
/* * Unpin and close a window so no new requests are accepted and the * hardware can evict this window from cache if necessary. */ static void unpin_close_window(struct vas_window *window) { u64 val; val = read_hvwc_reg(window, VREG(WINCTL)); val = SET_FIELD(VAS_WINCTL_PIN, val, 0); val = SET_FIELD(VAS_WINCTL_OPEN, val, 0); write_hvwc_reg(window, VREG(WINCTL), val); }
/** * Used to initialize the alpha value of an ARGB8888 miptree after copying * into it from an XRGB8888 source. * * This is very common with glCopyTexImage2D(). Note that the coordinates are * relative to the start of the miptree, not relative to a slice within the * miptree. */ static void intel_miptree_set_alpha_to_one(struct brw_context *brw, struct intel_mipmap_tree *mt, int x, int y, int width, int height) { uint32_t BR13, CMD; int pitch, cpp; drm_intel_bo *aper_array[2]; pitch = mt->pitch; cpp = mt->cpp; DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n", __FUNCTION__, mt->bo, pitch, x, y, width, height); BR13 = br13_for_cpp(cpp) | 0xf0 << 16; CMD = XY_COLOR_BLT_CMD; CMD |= XY_BLT_WRITE_ALPHA; if (mt->tiling != I915_TILING_NONE) { CMD |= XY_DST_TILED; pitch /= 4; } BR13 |= pitch; /* do space check before going any further */ aper_array[0] = brw->batch.bo; aper_array[1] = mt->bo; if (drm_intel_bufmgr_check_aperture_space(aper_array, ARRAY_SIZE(aper_array)) != 0) { intel_batchbuffer_flush(brw); } unsigned length = brw->gen >= 8 ? 7 : 6; bool dst_y_tiled = mt->tiling == I915_TILING_Y; BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, false); OUT_BATCH(CMD | (length - 2)); OUT_BATCH(BR13); OUT_BATCH(SET_FIELD(y, BLT_Y) | SET_FIELD(x, BLT_X)); OUT_BATCH(SET_FIELD(y + height, BLT_Y) | SET_FIELD(x + width, BLT_X)); if (brw->gen >= 8) { OUT_RELOC64(mt->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); } else { OUT_RELOC(mt->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); } OUT_BATCH(0xffffffff); /* white, but only alpha gets written */ ADVANCE_BATCH_TILED(dst_y_tiled, false); intel_batchbuffer_emit_mi_flush(brw); }
static void gen7_upload_ds_state(struct brw_context *brw) { const struct gen_device_info *devinfo = &brw->screen->devinfo; const struct brw_stage_state *stage_state = &brw->tes.base; /* BRW_NEW_TESS_PROGRAMS */ bool active = brw->tess_eval_program; /* BRW_NEW_TES_PROG_DATA */ const struct brw_stage_prog_data *prog_data = stage_state->prog_data; const struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(stage_state->prog_data); const struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(stage_state->prog_data); const unsigned thread_count = (devinfo->max_tes_threads - 1) << (brw->is_haswell ? HSW_DS_MAX_THREADS_SHIFT : GEN7_DS_MAX_THREADS_SHIFT); if (active) { BEGIN_BATCH(6); OUT_BATCH(_3DSTATE_DS << 16 | (6 - 2)); OUT_BATCH(stage_state->prog_offset); OUT_BATCH(SET_FIELD(DIV_ROUND_UP(stage_state->sampler_count, 4), GEN7_DS_SAMPLER_COUNT) | SET_FIELD(prog_data->binding_table.size_bytes / 4, GEN7_DS_BINDING_TABLE_ENTRY_COUNT)); if (prog_data->total_scratch) { OUT_RELOC(stage_state->scratch_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, ffs(stage_state->per_thread_scratch) - 11); } else { OUT_BATCH(0); } OUT_BATCH(SET_FIELD(prog_data->dispatch_grf_start_reg, GEN7_DS_DISPATCH_START_GRF) | SET_FIELD(vue_prog_data->urb_read_length, GEN7_DS_URB_READ_LENGTH)); OUT_BATCH(GEN7_DS_ENABLE | GEN7_DS_STATISTICS_ENABLE | thread_count | (tes_prog_data->domain == BRW_TESS_DOMAIN_TRI ? GEN7_DS_COMPUTE_W_COORDINATE_ENABLE : 0)); ADVANCE_BATCH(); } else { BEGIN_BATCH(6); OUT_BATCH(_3DSTATE_DS << 16 | (6 - 2)); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); } brw->tes.enabled = active; }
static void gen8_upload_hs_state(struct brw_context *brw) { const struct gen_device_info *devinfo = &brw->screen->devinfo; const struct brw_stage_state *stage_state = &brw->tcs.base; /* BRW_NEW_TESS_PROGRAMS */ bool active = brw->tess_eval_program; /* BRW_NEW_TCS_PROG_DATA */ const struct brw_stage_prog_data *prog_data = stage_state->prog_data; const struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(stage_state->prog_data); if (active) { BEGIN_BATCH(9); OUT_BATCH(_3DSTATE_HS << 16 | (9 - 2)); OUT_BATCH(SET_FIELD(DIV_ROUND_UP(stage_state->sampler_count, 4), GEN7_HS_SAMPLER_COUNT) | SET_FIELD(prog_data->binding_table.size_bytes / 4, GEN7_HS_BINDING_TABLE_ENTRY_COUNT)); OUT_BATCH(GEN7_HS_ENABLE | GEN7_HS_STATISTICS_ENABLE | (devinfo->max_tcs_threads - 1) << GEN8_HS_MAX_THREADS_SHIFT | SET_FIELD(tcs_prog_data->instances - 1, GEN7_HS_INSTANCE_COUNT)); OUT_BATCH(stage_state->prog_offset); OUT_BATCH(0); if (prog_data->total_scratch) { OUT_RELOC64(stage_state->scratch_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, ffs(stage_state->per_thread_scratch) - 11); } else { OUT_BATCH(0); OUT_BATCH(0); } OUT_BATCH(GEN7_HS_INCLUDE_VERTEX_HANDLES | SET_FIELD(prog_data->dispatch_grf_start_reg, GEN7_HS_DISPATCH_START_GRF)); OUT_BATCH(0); /* MBZ */ ADVANCE_BATCH(); } else { BEGIN_BATCH(9); OUT_BATCH(_3DSTATE_HS << 16 | (9 - 2)); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); } brw->tcs.enabled = active; }
static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) { u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; /* CDUC - connection configuration */ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; cxt_size = CONN_CXT_SIZE(p_hwfn); elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); }
static void gen7_upload_hs_state(struct brw_context *brw) { const struct brw_stage_state *stage_state = &brw->tcs.base; /* BRW_NEW_TESS_PROGRAMS */ bool active = brw->tess_eval_program; /* BRW_NEW_TCS_PROG_DATA */ const struct brw_vue_prog_data *prog_data = &brw->tcs.prog_data->base; if (active) { BEGIN_BATCH(7); OUT_BATCH(_3DSTATE_HS << 16 | (7 - 2)); OUT_BATCH(SET_FIELD(DIV_ROUND_UP(stage_state->sampler_count, 4), GEN7_HS_SAMPLER_COUNT) | SET_FIELD(prog_data->base.binding_table.size_bytes / 4, GEN7_HS_BINDING_TABLE_ENTRY_COUNT) | (brw->max_hs_threads - 1)); OUT_BATCH(GEN7_HS_ENABLE | GEN7_HS_STATISTICS_ENABLE | SET_FIELD(brw->tcs.prog_data->instances - 1, GEN7_HS_INSTANCE_COUNT)); OUT_BATCH(stage_state->prog_offset); if (prog_data->base.total_scratch) { OUT_RELOC(stage_state->scratch_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, ffs(prog_data->base.total_scratch) - 11); } else { OUT_BATCH(0); } OUT_BATCH(GEN7_HS_INCLUDE_VERTEX_HANDLES | SET_FIELD(prog_data->base.dispatch_grf_start_reg, GEN7_HS_DISPATCH_START_GRF)); /* Ignore URB semaphores */ OUT_BATCH(0); ADVANCE_BATCH(); } else { BEGIN_BATCH(7); OUT_BATCH(_3DSTATE_HS << 16 | (7 - 2)); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); } brw->tcs.enabled = active; }
bool Deserializer::Uint(uint32 i) { google::protobuf::FieldDescriptor const* field = _state.top(); google::protobuf::Message* message = _objectState.top(); switch (field->cpp_type()) { case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: SET_FIELD(message, field, UInt32, i); break; case google::protobuf::FieldDescriptor::CPPTYPE_STRING: { if (field->type() != google::protobuf::FieldDescriptor::TYPE_BYTES) { _errors.emplace_back("Expected field type to be bytes but got string instead."); return false; } std::string currentValue = message->GetReflection()->GetString(*message, field); currentValue.append(1, (char)i); message->GetReflection()->SetString(message, field, currentValue); break; } default: _errors.push_back(Trinity::StringFormat("Expected field type to be uint32 or string but got %s instead.", _state.top()->cpp_type_name())); return false; } return true; }
bool Deserializer::Int(int32 i) { if (!CheckType(google::protobuf::FieldDescriptor::CPPTYPE_INT32)) return false; SET_FIELD(_objectState.top(), _state.top(), Int32, i); return true; }
bool Deserializer::Bool(bool b) { if (!CheckType(google::protobuf::FieldDescriptor::CPPTYPE_BOOL)) return false; SET_FIELD(_objectState.top(), _state.top(), Bool, b); return true; }
bool Deserializer::Uint64(uint64 i) { if (!CheckType(google::protobuf::FieldDescriptor::CPPTYPE_UINT64)) return false; SET_FIELD(_objectState.top(), _state.top(), UInt64, i); return true; }
static inline void init_common_sqe(struct fcoe_task_params *task_params, enum fcoe_sqe_request_type request_type) { memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, request_type); task_params->sqe->task_id = task_params->itid; }
unsigned long set_config() { REGISTER mc_csc; unsigned char ch; for (ch=0; ch<8; ch++) { if (MC_SDRAM_CSMASK & (0x01 << ch) ) { mc_csc = (unsigned long*)(MC_BASE + MC_CSC(ch)); SET_FIELD(*mc_csc, MC_CSC, MS, mc_sdram_cs[ch].MS); SET_FIELD(*mc_csc, MC_CSC, BW, mc_sdram_cs[ch].BW); SET_FIELD(*mc_csc, MC_CSC, SEL, mc_sdram_cs[ch].M); SET_FLAG(*mc_csc, MC_CSC, EN); printf ("Channel Config %d - CSC = 0x%08lX\n", ch, *mc_csc); } } return 0; }
static int modify_returncode(xc_interface *xch, uint32_t domid) { vcpu_guest_context_any_t ctxt; xc_dominfo_t info; xen_capabilities_info_t caps; struct domain_info_context _dinfo = {}; struct domain_info_context *dinfo = &_dinfo; int rc; if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 || info.domid != domid ) { PERROR("Could not get domain info"); return -1; } if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) ) { ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid, info.shutdown, info.shutdown_reason); errno = EINVAL; return -1; } if ( info.hvm ) { /* HVM guests without PV drivers have no return code to modify. */ uint64_t irq = 0; xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq); if ( !irq ) return 0; /* HVM guests have host address width. */ if ( xc_version(xch, XENVER_capabilities, &caps) != 0 ) { PERROR("Could not get Xen capabilities"); return -1; } dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4; } else { /* Probe PV guest address width. */ if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) ) return -1; } if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 ) return rc; SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width); if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 ) return rc; return 0; }
Dwarf_Handler dwarf_seterrhand(Dwarf_Debug dbg, Dwarf_Handler errhand) { Dwarf_Handler oldhandler; SET_FIELD(dbg, oldhandler, errhand); return (oldhandler); }
Dwarf_Ptr dwarf_seterrarg(Dwarf_Debug dbg, Dwarf_Ptr errarg) { Dwarf_Ptr oldarg; SET_FIELD(dbg, oldarg, errarg); return (oldarg); }
void gpio_set_interrupt(const uint8_t gpio_num, const gpio_inttype_t int_type, gpio_interrupt_handler_t handler) { gpio_interrupt_handlers[gpio_num] = handler; GPIO.CONF[gpio_num] = SET_FIELD(GPIO.CONF[gpio_num], GPIO_CONF_INTTYPE, int_type); if (int_type != GPIO_INTTYPE_NONE) { _xt_isr_attach(INUM_GPIO, gpio_interrupt_handler, NULL); _xt_isr_unmask(1<<INUM_GPIO); } }
bool Deserializer::Double(double d) { google::protobuf::FieldDescriptor const* field = _state.top(); google::protobuf::Message* message = _objectState.top(); switch (field->cpp_type()) { case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT: SET_FIELD(message, field, Float, float(d)); break; case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE: SET_FIELD(message, field, Double, d); break; default: _errors.push_back(Trinity::StringFormat("Expected field type to be float or double but got %s instead.", _state.top()->cpp_type_name())); return false; } return true; }
int psb_sb_write(struct drm_device *dev, u32 reg, u32 val) { int ret; static bool dpio_debug = false; u32 temp; if (dpio_debug) { if (psb_sb_read(dev, reg, &temp) == 0) DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp); DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val); } ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000); if (ret) { DRM_ERROR("timeout waiting for SB to idle before write\n"); return ret; } REG_WRITE(SB_ADDR, reg); REG_WRITE(SB_DATA, val); REG_WRITE(SB_PCKT, SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) | SET_FIELD(SB_DEST_DPLL, SB_DEST) | SET_FIELD(0xf, SB_BYTE_ENABLE)); ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000); if (ret) { DRM_ERROR("timeout waiting for SB to idle after write\n"); return ret; } if (dpio_debug) { if (psb_sb_read(dev, reg, &temp) == 0) DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp); } return 0; }
inline void lpc176x_pin_select( const uint32_t pin, const lpc176x_pin_function function ) { assert( pin <= LPC176X_IO_INDEX_MAX && function < LPC176X_PIN_FUNCTION_COUNT ); const uint32_t pin_selected = LPC176X_PIN_SELECT( pin ); volatile uint32_t *const pinsel = &LPC176X_PINSEL[ pin_selected ]; const uint32_t shift = LPC176X_PIN_SELECT_SHIFT( pin ); *pinsel = SET_FIELD( *pinsel, function, LPC176X_PIN_SELECT_MASK << shift, shift ); }
/** * Creates a null surface. * * This is used when the shader doesn't write to any color output. An FB * write to target 0 will still be emitted, because that's how the thread is * terminated (and computed depth is returned), so we need to have the * hardware discard the target 0 color output.. */ static void gen7_emit_null_surface_state(struct brw_context *brw, unsigned width, unsigned height, unsigned samples, uint32_t *out_offset) { /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming * Notes): * * A null surface is used in instances where an actual surface is not * bound. When a write message is generated to a null surface, no * actual surface is written to. When a read message (including any * sampling engine message) is generated to a null surface, the result * is all zeros. Note that a null surface type is allowed to be used * with all messages, even if it is not specificially indicated as * supported. All of the remaining fields in surface state are ignored * for null surfaces, with the following exceptions: Width, Height, * Depth, LOD, and Render Target View Extent fields must match the * depth buffer’s corresponding state for all render target surfaces, * including null. */ uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32, out_offset); memset(surf, 0, 8 * 4); /* From the Ivybridge PRM, Volume 4, Part 1, page 65, * Tiled Surface: Programming Notes: * "If Surface Type is SURFTYPE_NULL, this field must be TRUE." */ surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT | BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT | GEN7_SURFACE_TILING_Y; surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) | SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT); gen7_check_surface_setup(surf, true /* is_render_target */); }
void lpc176x_pin_set_mode( const uint32_t pin, const lpc176x_pin_mode mode ) { assert( pin <= LPC176X_IO_INDEX_MAX && mode < LPC176X_PIN_MODE_COUNT ); const uint32_t pin_selected = LPC176X_PIN_SELECT( pin ); volatile uint32_t *const pinmode = &LPC176X_PINMODE[ pin_selected ]; const uint32_t shift = LPC176X_PIN_SELECT_SHIFT( pin ); *pinmode = SET_FIELD( *pinmode, mode, LPC176X_PIN_SELECT_MASK << shift, shift ); }
/** * 3DSTATE_PS * * Pixel shader dispatch is disabled above in 3DSTATE_WM, dw1.29. Despite * that, thread dispatch info must still be specified. * - Maximum Number of Threads (dw4.24:31) must be nonzero, as the * valid range for this field is [0x3, 0x2f]. * - A dispatch mode must be given; that is, at least one of the * "N Pixel Dispatch Enable" (N=8,16,32) fields must be set. This was * discovered through simulator error messages. */ static void gen7_blorp_emit_ps_config(struct brw_context *brw, const brw_blorp_params *params, uint32_t prog_offset, brw_blorp_prog_data *prog_data) { uint32_t dw2, dw4, dw5; const int max_threads_shift = brw->is_haswell ? HSW_PS_MAX_THREADS_SHIFT : IVB_PS_MAX_THREADS_SHIFT; dw2 = dw4 = dw5 = 0; dw4 |= (brw->max_wm_threads - 1) << max_threads_shift; /* If there's a WM program, we need to do 16-pixel dispatch since that's * what the program is compiled for. If there isn't, then it shouldn't * matter because no program is actually being run. However, the hardware * gets angry if we don't enable at least one dispatch mode, so just enable * 16-pixel dispatch unconditionally. */ dw4 |= GEN7_PS_16_DISPATCH_ENABLE; if (brw->is_haswell) dw4 |= SET_FIELD(1, HSW_PS_SAMPLE_MASK); /* 1 sample for now */ if (params->use_wm_prog) { dw2 |= 1 << GEN7_PS_SAMPLER_COUNT_SHIFT; /* Up to 4 samplers */ dw4 |= GEN7_PS_PUSH_CONSTANT_ENABLE; dw5 |= prog_data->first_curbe_grf << GEN7_PS_DISPATCH_START_GRF_SHIFT_0; } switch (params->fast_clear_op) { case GEN7_FAST_CLEAR_OP_FAST_CLEAR: dw4 |= GEN7_PS_RENDER_TARGET_FAST_CLEAR_ENABLE; break; case GEN7_FAST_CLEAR_OP_RESOLVE: dw4 |= GEN7_PS_RENDER_TARGET_RESOLVE_ENABLE; break; default: break; } BEGIN_BATCH(8); OUT_BATCH(_3DSTATE_PS << 16 | (8 - 2)); OUT_BATCH(params->use_wm_prog ? prog_offset : 0); OUT_BATCH(dw2); OUT_BATCH(0); OUT_BATCH(dw4); OUT_BATCH(dw5); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); }
/** * Enable hardware binding tables and set up the binding table pool. */ void gen7_enable_hw_binding_tables(struct brw_context *brw) { if (!brw->use_resource_streamer) return; if (!brw->hw_bt_pool.bo) { /* We use a single re-usable buffer object for the lifetime of the * context and size it to maximum allowed binding tables that can be * programmed per batch: * * From the Haswell PRM, Volume 7: 3D Media GPGPU, * 3DSTATE_BINDING_TABLE_POOL_ALLOC > Programming Note: * "A maximum of 16,383 Binding tables are allowed in any batch buffer" */ static const int max_size = 16383 * 4; brw->hw_bt_pool.bo = drm_intel_bo_alloc(brw->bufmgr, "hw_bt", max_size, 64); brw->hw_bt_pool.next_offset = 0; } /* From the Haswell PRM, Volume 7: 3D Media GPGPU, * 3DSTATE_BINDING_TABLE_POOL_ALLOC > Programming Note: * * "When switching between HW and SW binding table generation, SW must * issue a state cache invalidate." */ brw_emit_pipe_control_flush(brw, PIPE_CONTROL_STATE_CACHE_INVALIDATE); int pkt_len = brw->gen >= 8 ? 4 : 3; uint32_t dw1 = BRW_HW_BINDING_TABLE_ENABLE; if (brw->is_haswell) { dw1 |= SET_FIELD(GEN7_MOCS_L3, GEN7_HW_BT_POOL_MOCS) | HSW_BT_POOL_ALLOC_MUST_BE_ONE; } else if (brw->gen >= 8) { dw1 |= BDW_MOCS_WB; } BEGIN_BATCH(pkt_len); OUT_BATCH(_3DSTATE_BINDING_TABLE_POOL_ALLOC << 16 | (pkt_len - 2)); if (brw->gen >= 8) { OUT_RELOC64(brw->hw_bt_pool.bo, I915_GEM_DOMAIN_SAMPLER, 0, dw1); OUT_BATCH(brw->hw_bt_pool.bo->size); } else { OUT_RELOC(brw->hw_bt_pool.bo, I915_GEM_DOMAIN_SAMPLER, 0, dw1); OUT_RELOC(brw->hw_bt_pool.bo, I915_GEM_DOMAIN_SAMPLER, 0, brw->hw_bt_pool.bo->size); } ADVANCE_BATCH(); }
/** * 3DSTATE_PS * * Pixel shader dispatch is disabled above in 3DSTATE_WM, dw1.29. Despite * that, thread dispatch info must still be specified. * - Maximum Number of Threads (dw4.24:31) must be nonzero, as the * valid range for this field is [0x3, 0x2f]. * - A dispatch mode must be given; that is, at least one of the * "N Pixel Dispatch Enable" (N=8,16,32) fields must be set. This was * discovered through simulator error messages. */ static void gen7_blorp_emit_ps_config(struct brw_context *brw, const struct brw_blorp_params *params) { const struct brw_blorp_prog_data *prog_data = params->wm_prog_data; uint32_t dw2, dw4, dw5, ksp0, ksp2; const int max_threads_shift = brw->is_haswell ? HSW_PS_MAX_THREADS_SHIFT : IVB_PS_MAX_THREADS_SHIFT; dw2 = dw4 = dw5 = ksp0 = ksp2 = 0; dw4 |= (brw->max_wm_threads - 1) << max_threads_shift; if (brw->is_haswell) dw4 |= SET_FIELD(1, HSW_PS_SAMPLE_MASK); /* 1 sample for now */ if (params->wm_prog_data) { dw4 |= GEN7_PS_PUSH_CONSTANT_ENABLE; dw5 |= prog_data->first_curbe_grf_0 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0; dw5 |= prog_data->first_curbe_grf_2 << GEN7_PS_DISPATCH_START_GRF_SHIFT_2; ksp0 = params->wm_prog_kernel; ksp2 = params->wm_prog_kernel + params->wm_prog_data->ksp_offset_2; if (params->wm_prog_data->dispatch_8) dw4 |= GEN7_PS_8_DISPATCH_ENABLE; if (params->wm_prog_data->dispatch_16) dw4 |= GEN7_PS_16_DISPATCH_ENABLE; } else { /* The hardware gets angry if we don't enable at least one dispatch * mode, so just enable 16-pixel dispatch if we don't have a program. */ dw4 |= GEN7_PS_16_DISPATCH_ENABLE; } if (params->src.mt) dw2 |= 1 << GEN7_PS_SAMPLER_COUNT_SHIFT; /* Up to 4 samplers */ dw4 |= params->fast_clear_op; BEGIN_BATCH(8); OUT_BATCH(_3DSTATE_PS << 16 | (8 - 2)); OUT_BATCH(ksp0); OUT_BATCH(dw2); OUT_BATCH(0); OUT_BATCH(dw4); OUT_BATCH(dw5); OUT_BATCH(0); /* kernel 1 pointer */ OUT_BATCH(ksp2); ADVANCE_BATCH(); }
static int modify_returncode(int xc_handle, uint32_t domid) { vcpu_guest_context_any_t ctxt; xc_dominfo_t info; xen_capabilities_info_t caps; struct domain_info_context _dinfo = {}; struct domain_info_context *dinfo = &_dinfo; int rc; if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 ) { PERROR("Could not get domain info"); return -1; } if ( info.hvm ) { /* HVM guests without PV drivers have no return code to modify. */ unsigned long irq = 0; xc_get_hvm_param(xc_handle, domid, HVM_PARAM_CALLBACK_IRQ, &irq); if ( !irq ) return 0; /* HVM guests have host address width. */ if ( xc_version(xc_handle, XENVER_capabilities, &caps) != 0 ) { PERROR("Could not get Xen capabilities\n"); return -1; } dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4; } else { /* Probe PV guest address width. */ dinfo->guest_width = pv_guest_width(xc_handle, domid); if ( dinfo->guest_width < 0 ) return -1; } if ( (rc = xc_vcpu_getcontext(xc_handle, domid, 0, &ctxt)) != 0 ) return rc; SET_FIELD(&ctxt, user_regs.eax, 1); if ( (rc = xc_vcpu_setcontext(xc_handle, domid, 0, &ctxt)) != 0 ) return rc; return 0; }
/* ILT (PSWRQ2) PF */ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; struct qed_dma_mem *p_shdw; u32 line, rt_offst, i; qed_ilt_bounds_init(p_hwfn); qed_ilt_vf_bounds_init(p_hwfn); p_mngr = p_hwfn->p_cxt_mngr; p_shdw = p_mngr->ilt_shadow; clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, clients) { if (!clients[i].active) continue; /** Client's 1st val and RT array are absolute, ILT shadows' * lines are relative. */ line = clients[i].first.val - p_mngr->pf_start_line; rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + clients[i].first.val * ILT_ENTRY_IN_REGS; for (; line <= clients[i].last.val - p_mngr->pf_start_line; line++, rt_offst += ILT_ENTRY_IN_REGS) { u64 ilt_hw_entry = 0; /** p_virt could be NULL incase of dynamic * allocation */ if (p_shdw[line].p_virt) { SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, (p_shdw[line].p_phys >> 12)); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", rt_offst, line, i, (u64)(p_shdw[line].p_phys >> 12)); } STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); } }