/** * Check if any seamless mode is enabled. * Seamless is only relevant for the newer Xorg modules. * * @returns the result of the query * (true = seamless enabled, false = seamless not enabled) * @param pScrn Screen info pointer. */ Bool vboxGuestIsSeamless(ScrnInfoPtr pScrn) { VMMDevSeamlessMode mode; VBOXPtr pVBox = pScrn->driverPrivate; TRACE_ENTRY(); if (!pVBox->useDevice) return FALSE; if (RT_FAILURE(VbglR3SeamlessGetLastEvent(&mode))) return FALSE; return (mode != VMMDev_Seamless_Disabled); }
static int tierfs_fasync(int fd, struct file *file, int flag) { int rc = 0; struct file *lower_file = NULL; TRACE_ENTRY(); lower_file = tierfs_file_to_lower(file); if (lower_file->f_op && lower_file->f_op->fasync) rc = lower_file->f_op->fasync(fd, lower_file, flag); TRACE_EXIT(); return rc; }
static void isert_handle_wc_error(struct ib_wc *wc) { struct isert_wr *wr = _u64_to_ptr(wc->wr_id); struct isert_cmnd *isert_pdu = wr->pdu; struct isert_connection *isert_conn = wr->conn; struct isert_buf *isert_buf = wr->buf; struct isert_device *isert_dev = wr->isert_dev; struct ib_device *ib_dev = isert_dev->ib_dev; TRACE_ENTRY(); if (wc->status != IB_WC_WR_FLUSH_ERR) pr_err("conn:%p wr_id:0x%p status:%s vendor_err:0x%0x\n", isert_conn, wr, wr_status_str(wc->status), wc->vendor_err); switch (wr->wr_op) { case ISER_WR_SEND: if (unlikely(wr->send_wr.num_sge == 0)) /* Drain WR */ isert_sched_conn_drained(isert_conn); else isert_pdu_err(&isert_pdu->iscsi); break; case ISER_WR_RDMA_READ: if (isert_buf->sg_cnt != 0) { ib_dma_unmap_sg(ib_dev, isert_buf->sg, isert_buf->sg_cnt, isert_buf->dma_dir); isert_buf->sg_cnt = 0; } isert_pdu_err(&isert_pdu->iscsi); break; case ISER_WR_RECV: /* this should be the Flush, no task has been created yet */ break; case ISER_WR_RDMA_WRITE: if (isert_buf->sg_cnt != 0) { ib_dma_unmap_sg(ib_dev, isert_buf->sg, isert_buf->sg_cnt, isert_buf->dma_dir); isert_buf->sg_cnt = 0; } /* RDMA-WR and SEND response of a READ task are sent together, so when receiving RDMA-WR error, wait until SEND error arrives to complete the task */ break; default: pr_err("unexpected opcode %d, wc:%p wr_id:%p conn:%p\n", wr->wr_op, wc, wr, isert_conn); break; } TRACE_EXIT(); }
void _nasm_interface::set( __in const nasm_arguments &arguments ) { TRACE_ENTRY(); SERIALIZE_CALL_RECUR(m_interface_lock); nasm_interface::clear(); m_arguments = arguments; TRACE_EXIT("Return Value: 0x%x", NULL); }
bool _nasm_interface::has_arguments(void) { bool result; TRACE_ENTRY(); SERIALIZE_CALL_RECUR(m_interface_lock); result = !m_arguments.empty(); TRACE_EXIT("Return Value: 0x%x", result); return result; }
static int isert_send_locally(struct iscsi_cmnd *req, unsigned int cmd_count) { int res = 0; TRACE_ENTRY(); req_cmnd_pre_release(req); res = isert_process_all_writes(req->conn); cmnd_put(req); TRACE_EXIT_RES(res); return res; }
size_t _test_suite::get_test_count(void) { size_t result; TRACE_ENTRY(); SERIALIZE_CALL_RECURSIVE(m_test_suite_lock); result = m_test_map.size(); TRACE_EXIT_MESSAGE("%lu", result); return result; }
bool _test_suite::is_result_valid(void) { bool result; TRACE_ENTRY(); SERIALIZE_CALL_RECURSIVE(m_test_suite_lock); result = (m_run_count > 0); TRACE_EXIT_MESSAGE("%i", result); return result; }
size_t _lexer_base::size(void) { size_t result; TRACE_ENTRY(); SERIALIZE_CALL_RECURSIVE(m_lexer_base_lock); result = m_raw_input.size(); TRACE_EXIT_MESSAGE("%lu", result); return result; }
bool _lexer_base::has_source_path(void) { bool result; TRACE_ENTRY(); SERIALIZE_CALL_RECURSIVE(m_lexer_base_lock); result = !m_source_path.empty(); TRACE_EXIT_MESSAGE("%i", result); return result; }
bool _lexer_base::has_previous_character(void) { bool result; TRACE_ENTRY(); SERIALIZE_CALL_RECURSIVE(m_lexer_base_lock); result = (m_character_position > 0); TRACE_EXIT_MESSAGE("%i", result); return result; }
bool _lexer_base::has_next_character(void) { bool result; TRACE_ENTRY(); SERIALIZE_CALL_RECURSIVE(m_lexer_base_lock); result = (lexer_base::get_character() != CHARACTER_EOS); TRACE_EXIT_MESSAGE("%i", result); return result; }
void _interpreter::clear(void) { TRACE_ENTRY(); SERIALIZE_CALL_RECUR(m_lock); parser::reset(); flush_scopes(); // TODO: setup global scope here TRACE_EXIT("Return Value: 0x%x", NULL); }
size_t _nasm_interface::get_argument_count(void) { size_t result; TRACE_ENTRY(); SERIALIZE_CALL_RECUR(m_interface_lock); result = m_arguments.size(); TRACE_EXIT("Return Value: %lu", result); return result; }
int isert_data_out_ready(struct iscsi_cmnd *cmnd) { int res = 0; TRACE_ENTRY(); #ifdef CONFIG_SCST_EXTRACHECKS cmnd->conn->rd_task = current; #endif cmnd_rx_end(cmnd); TRACE_EXIT_RES(res); return res; }
/* Unload */ void fd_ext_fini(void) { TRACE_ENTRY(); /* Unregister the cb */ CHECK_FCT_DO( fd_rt_out_unregister ( rtd_hdl, NULL ), /* continue */ ); /* Destroy the data */ rtd_fini(); /* Done */ return ; }
static int tierfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int rc; TRACE_ENTRY(); rc = filemap_write_and_wait(file->f_mapping); if (rc) return rc; TRACE_EXIT(); return vfs_fsync(tierfs_file_to_lower(file), datasync); }
void _json_token_factory::destroy(void) { TRACE_ENTRY(); SERIALIZE_CALL_RECUR(m_lock); if(m_initialized) { clear(); m_initialized = false; } TRACE_EXIT("Return Value: 0x%x", 0); }
_json_token::_json_token( __in type_t type, __in_opt type_t subtype ) : m_line(0), m_value(0) { TRACE_ENTRY(); set_type(type, subtype); TRACE_EXIT("Return Value: 0x%x", 0); }
static void __exit exit_this_scst_driver(void) { TRACE_ENTRY(); #ifdef SCST_REGISTER_INITIATOR_DRIVER scsi_unregister_module(MODULE_SCSI_HA, &driver_template); #endif scst_unregister_target_template(&driver_target_template); TRACE_EXIT(); return; }
struct isert_cmnd *isert_tx_pdu_alloc(struct isert_connection *isert_conn, size_t size) { struct isert_cmnd *pdu = NULL; int err; TRACE_ENTRY(); pdu = isert_pdu_alloc(); if (unlikely(!pdu)) { pr_err("Failed to alloc pdu\n"); goto out; } err = isert_alloc_for_rdma(pdu, 4, isert_conn); if (unlikely(err)) { pr_err("Failed to alloc sge and wr for tx pdu\n"); goto out; } err = isert_buf_alloc_data_buf(isert_conn->isert_dev->ib_dev, &pdu->buf, size, DMA_TO_DEVICE); if (unlikely(err)) { pr_err("Failed to alloc tx pdu buf sz:%zd\n", size); goto buf_alloc_failed; } err = isert_pdu_tx_buf_init(pdu, isert_conn); if (unlikely(err < 0)) { pr_err("Failed to init tx pdu wr:%p size:%zd err:%d\n", &pdu->wr, size, err); goto buf_init_failed; } isert_tx_pdu_init(pdu, isert_conn); isert_pdu_set_hdr_plain(pdu); list_add_tail(&pdu->pool_node, &isert_conn->tx_free_list); goto out; buf_init_failed: isert_buf_release(&pdu->buf); buf_alloc_failed: isert_pdu_kfree(pdu); pdu = NULL; out: TRACE_EXIT(); return pdu; }
void *HdmFcpTargetInit::Remove_Hash_Entry(U32 key) { Xlt *pX, *pXlast; TRACE_ENTRY(HdmFcpTargetInit::Add_Hash_Entry); // get the first entry for this key or NULL pX = pXlt[Hash(key)]; if (pX == NULL) return(NULL); // bad key if (pX->pNext == NULL) // exact match and no children pXlt[Hash(key)] = NULL; return(pX); // in case the first entry matches pXlast = NULL; // shuffle through list looking for our key while(pX->pNext) { if (key == pX->key.l) break; pXlast = pX; pX = pX->pNext; } if (pX) { // not NULL, we found it at last. // remove our match from the list if (pXlast) { // not first pXlast->pNext = pX->pNext; } else { // first in list pXlt[Hash(key)] = pX->pNext; } } // pX could be NULL if we did not find our key return(pX); } // Remove_Hash_Entry
int isert_login_req_rx(struct iscsi_cmnd *login_req) { struct isert_conn_dev *dev = isert_get_priv(login_req->conn); int res = 0; TRACE_ENTRY(); if (!dev) { PRINT_ERROR("Received PDU %p on invalid connection", login_req); res = -EINVAL; goto out; } switch (dev->state) { case CS_INIT: case CS_RSP_FINISHED: if (unlikely(dev->login_req != NULL)) sBUG(); break; case CS_REQ_BHS: /* Got login request before done handling old one */ break; case CS_REQ_DATA: case CS_REQ_FINISHED: case CS_RSP_BHS: case CS_RSP_DATA: PRINT_WARNING("Received login PDU while handling previous one. State:%d", dev->state); res = -EINVAL; goto out; default: sBUG(); } spin_lock(&dev->pdu_lock); dev->login_req = login_req; dev->read_len = sizeof(login_req->pdu.bhs); dev->read_buf = (char *)&login_req->pdu.bhs; dev->state = CS_REQ_BHS; spin_unlock(&dev->pdu_lock); wake_up(&dev->waitqueue); out: TRACE_EXIT_RES(res); return res; }
_json_token::_json_token( __in const _json_token &other ) : json_uuid_class(other), m_line(other.m_line), m_source(other.m_source), m_subtype(other.m_subtype), m_text(other.m_text), m_type(other.m_type), m_value(other.m_value) { TRACE_ENTRY(); TRACE_EXIT("Return Value: 0x%x", 0); }
STATUS DriveMonitorIsm::DmTblUpdModifyDevDesc(void *pClientContext, STATUS status) { DM_TBL_CONTEXT *pTC = (DM_TBL_CONTEXT *)pClientContext; DeviceDescriptor *pTD1, *pTD = (DeviceDescriptor *)pTC->pDMState->pDevice; TRACE_ENTRY(DmTblUpdModifyDevDesc); if (status != ercOK) { TRACE_HEX(TRACE_L8, "\n\rDmTblUpdModifyDevDesc: status = ", status); if ((status == ercEOF) || (status == ercKeyNotFound)) { status = DmTblUpdAddDevDesc(pTC, ercOK); return status; } } // Modify fields that changed (maybe) pTD1 = pTC->pTDTableRow; pTD1->CurrentStatus = pTD->CurrentStatus; pTD1->InqData = pTD->InqData; strcpy(pTD1->SerialNumber, pTD->SerialNumber); // Allocate an Modify Row object for the DeviceDescriptor Table. m_ModifyRow = new(tUNCACHED) TSModifyRow; // Initialize the modify row operation. status = m_ModifyRow->Initialize( this, // DdmServices pDdmServices, DEVICE_DESC_TABLE, // String64 rgbTableName, "rid", // String64 prgbKeyFieldName, (void*)&pTC->pDMState->rid, // void* pKeyFieldValue, sizeof(rowID), // U32 cbKeyFieldValue, pTC->pTDTableRow, // void* prgbRowData, sizeof(DeviceDescriptor), // U32 cbRowData, 0, // U32 cRowsToModify, NULL, // U32 *pcRowsModifiedRet, NULL, // rowID *pRowIDRet, 0, // U32 cbMaxRowID, (pTSCallback_t)&DmTableUpdateDevDEnd, // pTSCallback_t pCallback, (void*)pTC // void* pContext ); // Initiate the Modify Row operation. if (status == ercOK) m_ModifyRow->Send(); return status; } // DmTblUpdModifyDevDesc
/* The callback called on new messages */ static int rtd_out(void * cbdata, struct msg ** pmsg, struct fd_list * candidates) { struct msg * msg = *pmsg; TRACE_ENTRY("%p %p %p", cbdata, msg, candidates); CHECK_PARAMS(msg && candidates); /* Simply pass it to the appropriate function */ if (FD_IS_LIST_EMPTY(candidates)) { return 0; } else { return rtd_process( msg, candidates ); } }
int __init isert_init_login_devs(unsigned int ndevs) { int res; unsigned int i; TRACE_ENTRY(); n_devs = ndevs; res = alloc_chrdev_region(&devno, 0, n_devs, "isert_scst"); isert_major = MAJOR(devno); if (unlikely(res < 0)) { PRINT_ERROR("isert: can't get major %d\n", isert_major); goto out; } /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ isert_conn_devices = kzalloc(n_devs * sizeof(struct isert_conn_dev), GFP_KERNEL); if (unlikely(!isert_conn_devices)) { res = -ENOMEM; goto fail; /* Make this more graceful */ } isert_class = class_create(THIS_MODULE, "isert_scst"); isert_setup_listener_cdev(&isert_listen_dev); /* Initialize each device. */ for (i = 0; i < n_devs; i++) isert_setup_cdev(&isert_conn_devices[i], i); res = isert_datamover_init(); if (unlikely(res)) { PRINT_ERROR("Unable to initialize datamover: %d\n", res); goto fail; } out: TRACE_EXIT_RES(res); return res; fail: isert_cleanup_login_devs(); goto out; }
static void __exit exit_scst_modisk_driver(void) { TRACE_ENTRY(); #ifdef CONFIG_SCST_PROC scst_dev_handler_destroy_std_proc(&modisk_devtype_perf); scst_dev_handler_destroy_std_proc(&modisk_devtype); #endif scst_unregister_dev_driver(&modisk_devtype_perf); scst_unregister_dev_driver(&modisk_devtype); TRACE_EXIT(); return; }
void _json_token::set_metadata( __in const std::string &source, __in_opt size_t line ) { TRACE_ENTRY(); SERIALIZE_CALL_RECUR(m_lock); m_source = source; m_line = line; TRACE_EXIT("Return Value: 0x%x", 0); }
STATUS EchoScsiIsm::Initialize(Message *pMsg) { STATUS status; TRACE_ENTRY(EchoScsiIsm::Initialize); TRACE_DUMP_HEX(TRACE_L8, "\n\rEchoScsiIsm::Initialize Config Data", (U8 *)&config, sizeof(ES_CONFIG)); Reply(pMsg); return OS_DETAIL_STATUS_SUCCESS; } // Initialize