int sgdt_append_virtual( sgd_tbl_t* sgdt, MV_PVOID virtual_address, MV_PVOID translation_ctx, MV_U32 size ) { sgd_t* sg; #ifdef USES_64B_POINTER sgd_v64_t* vsg; #else sgd_v32_t* vsg; #endif if( translation_ctx == 0 ) return sgdt_append_virtual_wo_xctx(sgdt,virtual_address,size); sg = &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; #ifdef USES_64B_POINTER vsg = (sgd_v64_t*) sg; MV_ASSERT( sgdt->Valid_Entry_Count+2<=sgdt->Max_Entry_Count ); if( sgdt->Valid_Entry_Count + 2 > sgdt->Max_Entry_Count ) return -1; // not enough space sgdt_clear_eot(sgdt); vsg->u1.vaddr = virtual_address; vsg->u2.xctx = translation_ctx; vsg->flags = SGD_WIDE | SGD_VIRTUAL | SGD_EOT; vsg->flagsEx = SGD_X64; sgdt->Valid_Entry_Count++; #else // USES_64B_POINTER vsg = (sgd_v32_t*) sg; MV_ASSERT( sgdt->Valid_Entry_Count+1<=sgdt->Max_Entry_Count ); if( sgdt->Valid_Entry_Count + 1 > sgdt->Max_Entry_Count ) return -1; // not enough space sgdt_clear_eot(sgdt); vsg->vaddr = virtual_address; vsg->xctx = translation_ctx; vsg->flags = SGD_VIRTUAL | SGD_EOT; #endif // !USES_64B_POINTER vsg->size = size; sgdt->Valid_Entry_Count++; sgdt->Byte_Count += size; return 0; }
int mvdev_process_recv(mv_rbuf * v) { mvdev_connection_t *c = &(mvdev.connections[v->rank]); if(v->rpool) { DECR_RPOOL(((mv_rpool *) (v->rpool))); CHECK_RPOOL(((mv_rpool *) (v->rpool))); } if(v->has_header) { /* is this something that could have gone over rcfp ? */ if(v->byte_len < 2048 && !c->rcfp_recv_enabled && ++(c->rcfp_messages) > mvparams.rcfp_threshold && (c->rc_enabled || c->xrc_enabled) && mvdev.rcfp_connections < mvparams.max_rcfp_connections) { ++(mvdev.rcfp_connections); MV_Setup_RC_FP(c); } mvdev_packet_header *p = (mvdev_packet_header *) v->header_ptr; mvdev_process_ack(c, p->last_recv); switch(p->type) { case MVDEV_PACKET_EAGER_START: MV_SBUF_SET_DATA(v, mvdev_packet_eager_start); break; case MVDEV_PACKET_EAGER_NEXT: MV_SBUF_SET_DATA(v, mvdev_packet_eager_next); break; case MVDEV_PACKET_R3_DATA: MV_SBUF_SET_DATA(v, mvdev_packet_r3_data); break; } switch(p->type) { case MVDEV_PACKET_ACK: MV_ASSERT(v->seqnum == 0); release_mv_rbuf(v); break; default: MV_ASSERT(v->seqnum != 0); ACK_CREDIT_CHECK(c, v); #ifdef MV_PROFILE c->msg_info.total_recv_bytes += v->byte_len_full; c->msg_info.total_recvb_bytes += v->max_data_size; #endif mvdev_place_recvwin(c, v); } } else { ACK_CREDIT_CHECK(c, v); mvdev_place_recvwin(c, v); } return 0; }
void Module_AssignModuleExtension(MV_PVOID device_extension, MV_U16 max_io) { MV_PTR_INTEGER ptemp = (MV_PTR_INTEGER)device_extension; PHBA_Extension pHBA = NULL; PModule_Manage module_manage = NULL; PModule_Header header = NULL; MV_U8 module_id; MV_U32 require; MV_ASSERT(MODULE_HBA==0); pHBA = (PHBA_Extension)( (MV_PTR_INTEGER)device_extension+MODULE_HEADER_SIZE ); module_manage = &pHBA->Module_Manage; for (module_id=0; module_id<MAX_MODULE_NUMBER; module_id++) { if (module_set[module_id].get_mem_size==NULL ) continue; require = module_set[module_id].get_mem_size(RESOURCE_CACHED_MEMORY, max_io); require = ROUNDING(require, 8); header = (PModule_Header)ptemp; header->extension_size = require; header->header_size = MODULE_HEADER_SIZE; header->module_id = module_id; header->hba_extension = pHBA; module_manage->resource[module_id].module_extension = (MV_PVOID)(ptemp+MODULE_HEADER_SIZE); module_manage->resource[module_id].extension_size = require; ptemp += MODULE_HEADER_SIZE+require; } }
void Module_AssignUncachedMemory( IN PModule_Manage module_manage, IN MV_PVOID virtual_addr, IN MV_PHYSICAL_ADDR physical_addr, IN MV_U32 memory_size, IN MV_U16 max_io, MV_U8 module_id ) { MV_PTR_INTEGER temp_virtual = (MV_PTR_INTEGER)virtual_addr; //MV_PHYSICAL_ADDR temp_physical = physical_addr; MV_U32 require; /* Assign Uncached Memory */ if ( module_set[module_id].get_mem_size == NULL ) return; require = module_set[module_id].get_mem_size(RESOURCE_UNCACHED_MEMORY, max_io); require = ROUNDING(require, 8); module_manage->resource[module_id].uncached_size = require; module_manage->resource[module_id].uncached_address = (MV_PVOID)virtual_addr; module_manage->resource[module_id].uncached_physical_address = physical_addr; temp_virtual += require; /* Do we have enough uncached memory? */ MV_ASSERT( (temp_virtual-(MV_PTR_INTEGER)virtual_addr)<=memory_size ); }
int sgdt_append_vp( sgd_tbl_t* sgdt, MV_PVOID virtual_address, MV_U32 size, MV_U32 address, MV_U32 addressHigh ) { sgd_vp_t* sg = (sgd_vp_t*) &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; MV_ASSERT( sgdt->Valid_Entry_Count+2<=sgdt->Max_Entry_Count ); if( sgdt->Valid_Entry_Count + 2 > sgdt->Max_Entry_Count ) return -1; // not enough space sgdt_clear_eot(sgdt); sg->baseAddr.parts.low = address; sg->baseAddr.parts.high = addressHigh; sg->flags = SGD_VP | SGD_WIDE | SGD_EOT; sg->size = size; sg->u.vaddr = virtual_address; sg->flagsEx = SGD_X64; sgdt->Valid_Entry_Count += 2; sgdt->Byte_Count += size; return 0; }
static int sgdt_append_virtual_wo_xctx( sgd_tbl_t* sgdt, MV_PVOID virtual_address, MV_U32 size ) { sgd_t* sg = &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; sgd_v_t* vsg = (sgd_v_t*) sg; MV_ASSERT( sgdt->Valid_Entry_Count+1<=sgdt->Max_Entry_Count ); if( sgdt->Valid_Entry_Count + 1 > sgdt->Max_Entry_Count ) return -1; // not enough space sgdt_clear_eot(sgdt); vsg->flags = SGD_EOT | SGD_VWOXCTX; vsg->size = size; vsg->u.vaddr = virtual_address; sgdt->Valid_Entry_Count++; sgdt->Byte_Count += size; return 0; }
void sgdt_append_pctx( sgd_tbl_t* sgdt, MV_U32 address, MV_U32 addressHigh, MV_U32 size, MV_PVOID xctx ) { sgd_pctx_t* pSGEntry = (sgd_pctx_t*) &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; MV_ASSERT( sgdt->Valid_Entry_Count+2<=sgdt->Max_Entry_Count ); sgdt_clear_eot(sgdt); sgdt->Valid_Entry_Count += 2; sgdt->Byte_Count += size; pSGEntry->flags = SGD_PCTX | SGD_WIDE | SGD_EOT; pSGEntry->baseAddr.parts.low = address; pSGEntry->baseAddr.parts.high = addressHigh; pSGEntry->size = size; pSGEntry->u.xctx = xctx; pSGEntry->flagsEx = SGD_X64; pSGEntry->rsvd = 0; }
///// Public Methods std_map* std_map_ctor(std_map* self, std_map_compare_func compare, std_map_dtor_func key_dtor, std_map_dtor_func data_dtor, std_map_node_type eType) { std_map_node* temp; if (!self) { self = (std_map*) GaloisMalloc( sizeof(std_map) ); } if (self) { self->compare = compare; self->key_dtor = key_dtor; self->data_dtor = data_dtor; self->m_eType = eType; self->uiSize = 0; temp = self->m_pNil = (std_map_node*) GaloisMalloc(sizeof(std_map_node)); MV_ASSERT(temp); temp->pParent = temp->pLeft = temp->pRight = temp; temp->bRed = FALSE; temp->pKey = 0; temp->pData = 0; temp = self->m_pRoot = (std_map_node*) GaloisMalloc(sizeof(std_map_node)); MV_ASSERT(temp); temp->pParent = temp->pLeft = temp->pRight = self->m_pNil; temp->bRed = FALSE; temp->pKey = 0; temp->pData = 0; } return self; }
static void hba_proc_msg(struct mv_hba_msg *pmsg) { PHBA_Extension phba; struct scsi_device *psdev; /* we don't do things without pmsg->data */ if (NULL == pmsg->data) return; phba = (PHBA_Extension) pmsg->data; MV_DBG(DMSG_HBA, "__MV__ In hba_proc_msg.\n"); MV_ASSERT(pmsg); switch (pmsg->msg) { case EVENT_DEVICE_ARRIVAL: if (scsi_add_device(phba->host, 0, pmsg->param, 0)) MV_DBG(DMSG_SCSI, "__MV__ add scsi disk %d-%d-%d failed.\n", 0, pmsg->param, 0); else MV_DBG(DMSG_SCSI, "__MV__ add scsi disk %d-%d-%d.\n", 0, pmsg->param, 0); break; case EVENT_DEVICE_REMOVAL: psdev = scsi_device_lookup(phba->host, 0, pmsg->param, 0); if (NULL != psdev) { MV_DBG(DMSG_SCSI, "__MV__ remove scsi disk %d-%d-%d.\n", 0, pmsg->param, 0); scsi_remove_device(psdev); scsi_device_put(psdev); } else { MV_DBG(DMSG_SCSI, "__MV__ no disk to remove %d-%d-%d\n", 0, pmsg->param, 0); } break; case EVENT_HOT_PLUG: sata_hotplug(pmsg->data, pmsg->param); break; default: break; } }
void mvdev_recv_ud_zcopy(MPIR_RHANDLE * rhandle) { mv_qp_pool_entry *rqp; D_PRINT("got zcopy start -- len: %d\n", rhandle->len); /* only way buffer is NULL is if length is zero */ if (NULL == rhandle->buf) { rhandle->buf = &nullrbuffer; } /* we need to make sure we have a QP available -- * otherwise we don't want to take this path */ if(NULL == mvdev.rndv_pool_qps_free_head) { D_PRINT("No QPs available -- using R3\n"); rhandle->protocol = MVDEV_PROTOCOL_R3; mvdev_recv_r3(rhandle); return; } /* try to register the buffer directly */ rhandle->dreg_entry = dreg_register(rhandle->buf, rhandle->len, DREG_ACL_WRITE); if (NULL == rhandle->dreg_entry) { /* failed to register memory, revert to R3 */ D_PRINT("Cannot register mem -- using R3\n"); rhandle->protocol = MVDEV_PROTOCOL_R3; mvdev_recv_r3(rhandle); return; } GET_RNDV_QP(rqp); rhandle->qp_entry = rqp; MV_ASSERT(rhandle->qp_entry != NULL); D_PRINT("before posting recv\n"); mvdev_post_zcopy_recv(rhandle); D_PRINT("Finished posting buffers\n"); MV_Rndv_Send_Reply(rhandle); }
void Module_InitializeAll(PModule_Manage p_module_manage, MV_U16 max_io) { MV_I8 i = 0; MV_PVOID module_extension = NULL; MV_U32 extension_size = 0; /* Module initialization is one synchronized function. */ for ( i=MAX_MODULE_NUMBER-1; i>=0; i-- ) { /* I use this chance to check whether the module_set matches with Module_Id */ MV_ASSERT( module_set[i].module_id==i ); if ( module_set[i].module_initialize ) { module_extension = p_module_manage->resource[i].module_extension; extension_size = p_module_manage->resource[i].extension_size; module_set[i].module_initialize(module_extension, extension_size, max_io); } } }
void MV_Rndv_Receive_R3_Data_Next(mv_rbuf * v, mvdev_connection_t * c) { MPIR_RHANDLE *rhandle = (MPIR_RHANDLE *) c->rhandle;; memcpy(((char *) rhandle->buf) + rhandle->bytes_copied_to_user, v->data_ptr, v->byte_len_data); rhandle->bytes_copied_to_user += v->byte_len_data; if(rhandle->bytes_copied_to_user > rhandle->len) { fprintf(stderr, "copied: %d, len: %d, this: %d\n", rhandle->bytes_copied_to_user, rhandle->len, v->byte_len_data); } MV_ASSERT(rhandle->bytes_copied_to_user <= rhandle->len); if (rhandle->bytes_copied_to_user == rhandle->len) { RECV_COMPLETE(rhandle); D_PRINT("R3 recv complete from rank %d total %d", c->global_rank, rhandle->len); } }
void sgd_iter_init( sgd_iter_t* iter, sgd_t* sgd, MV_U32 offset, MV_U32 count ) { MV_U32 sz; sgd_getsz(sgd,sz); while( sz <= offset ) { offset -= sz; MV_ASSERT( !sgd_eot(sgd) ); sgd_inc(sgd); sgd_getsz(sgd,sz); } iter->sgd = sgd; iter->offset = offset; iter->remainCnt = count; }
void Module_StartAll(PModule_Manage p_module_manage, MV_U8 begin_module) { MV_I8 i = 0; /* * Start module from the lower level, the first one is the core driver. * Every time we only start one module. */ for ( i=begin_module; i>=0; i-- ) { MV_ASSERT(begin_module<MAX_MODULE_NUMBER); if ( module_set[i].module_start ) { module_set[i].module_start( p_module_manage->resource[i].module_extension); return; } /* If the module_start function is NULL, continue to the next. */ p_module_manage->status |= (1<<i); } }
void sgdt_append( sgd_tbl_t* sgdt, MV_U32 address, MV_U32 addressHigh, MV_U32 size ) { sgd_t* pSGEntry = &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; MV_ASSERT( sgdt->Valid_Entry_Count+1<=sgdt->Max_Entry_Count ); sgdt_clear_eot(sgdt); sgdt->Valid_Entry_Count += 1; sgdt->Byte_Count += size; pSGEntry->flags = 0; pSGEntry->baseAddr.parts.low = address; pSGEntry->baseAddr.parts.high = addressHigh; pSGEntry->size = size; sgd_mark_eot(pSGEntry); }
void MV_Rndv_Receive_R3_Data(mv_rbuf * v, mvdev_connection_t * c, mvdev_packet_r3_data * h) { MPIR_RHANDLE *rhandle = (MPIR_RHANDLE *) ID_TO_REQ(h->rreq); memcpy(((char *) rhandle->buf) + rhandle->bytes_copied_to_user, v->data_ptr, v->byte_len_data); rhandle->bytes_copied_to_user += v->byte_len_data; if(rhandle->bytes_copied_to_user > rhandle->len) { fprintf(stderr, "copied: %d, len: %d, this: %d\n", rhandle->bytes_copied_to_user, rhandle->len, v->byte_len_data); } MV_ASSERT(rhandle->bytes_copied_to_user <= rhandle->len); if (rhandle->bytes_copied_to_user == rhandle->len) { RECV_COMPLETE(rhandle); D_PRINT("VI %3d R3 RECV COMPLETE total %d\n", c->global_rank, rhandle->len); } else { c->rhandle = rhandle; } }
void sgdt_append_sgd( sgd_tbl_t* sgdt, sgd_t* sgd ) { sgd_t* pSGEntry = &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; MV_U8 cnt = 1; MV_U32 sgdsz; sgd_getsz(sgd,sgdsz); if( sgd->flags & SGD_WIDE ) cnt++; MV_ASSERT( sgdt->Valid_Entry_Count+cnt<=sgdt->Max_Entry_Count ); sgdt_clear_eot(sgdt); sgdt->Valid_Entry_Count += cnt; sgdt->Byte_Count += sgdsz; MV_CopyMemory( pSGEntry, sgd, sizeof(sgd_t) * cnt ); sgd_mark_eot(pSGEntry); }
static inline void mvdev_post_zcopy_recv(MPIR_RHANDLE * rhandle) { mv_qp_pool_entry *rqp = (mv_qp_pool_entry *) rhandle->qp_entry; char * current_buf; int posted_buffers = ceil((double) rhandle->len / mvparams.mtu);; rhandle->start_seq = 0; if(posted_buffers <= 0) { fprintf(stderr, "Posted buffers are zero or less! %d\n", posted_buffers); } { int current_len = 0, bytes_to_post = 0; int i = 0, j = 0; struct ibv_recv_wr *bad_wr; struct ibv_recv_wr rr[50]; struct ibv_sge sge_entry[100]; current_buf = rhandle->buf; while(current_len < rhandle->len) { for(i = 0; i < 50; i++) { MV_ASSERT(j < posted_buffers); bytes_to_post = MIN(mvparams.mtu, (rhandle->len - current_len)); if(i > 0) { rr[i - 1].next = &(rr[i]); } rr[i].next = NULL; rr[i].wr_id = j; rr[i].num_sge = 2; rr[i].sg_list = &(sge_entry[i * 2]); sge_entry[i * 2].addr = (uintptr_t) mvdev.grh_buf; sge_entry[i * 2].length = 40; sge_entry[i * 2].lkey = mvdev.grh_mr[0]->lkey; sge_entry[i * 2 + 1].addr = (uintptr_t) (current_buf + current_len); sge_entry[i * 2 + 1].length = bytes_to_post; sge_entry[i * 2 + 1].lkey = ((dreg_entry *) (rhandle->dreg_entry))->memhandle[0]->lkey; current_len += bytes_to_post; j++; if(current_len >= rhandle->len) { break; } } if(ibv_post_recv(rqp->ud_qp, rr, &bad_wr)) { error_abort_all(IBV_RETURN_ERR,"cannot post recv (%d)\n", i); } } MV_ASSERT(current_len == rhandle->len); } }
void mvdev_rendezvous_push_zcopy(MPIR_SHANDLE * s, mvdev_connection_t *c) { int pkt_count, malloc_count, bytes_to_send, i, ne; struct ibv_send_wr *sr, *bad_wr; struct ibv_sge *sg_entry; struct ibv_wc wc_list[30]; mv_qp *qp = &(mvdev.rndv_qp[s->hca_index]); MV_ASSERT(s->dreg_entry != NULL); pkt_count = ceil((double) s->bytes_total / mvparams.mtu); malloc_count = MIN(pkt_count, 64); D_PRINT("Sending %u of data for shandle %p\n", s->bytes_total, REQ_TO_ID(s)); D_PRINT("Local addr: %p\n", s->local_address); sr = (struct ibv_send_wr *) malloc(sizeof(struct ibv_send_wr) * malloc_count); sg_entry = (struct ibv_sge *) malloc(sizeof(struct ibv_sge) * malloc_count); D_PRINT("Entering push zcopy (%d)\n", s->bytes_total); c->last_ah = (c->last_ah + 1) % mvparams.max_lmc_total; while(s->bytes_sent < s->bytes_total) { int empty = 0; do { ne = ibv_poll_cq(mvdev.rndv_cq[s->hca_index], 30, wc_list); if(ne < 0) { error_abort_all(IBV_RETURN_ERR, "Error polling RNDV CQ\n"); } else if (ne > 0) { for(i = 0; i < ne; i++) { if(wc_list[i].status != IBV_WC_SUCCESS) { error_abort_all(IBV_STATUS_ERR, "got completion with " "error code %d, wr_id: %lu\n", wc_list[i].status, wc_list[i].wr_id); } qp->send_wqes_avail++; if(wc_list[i].wr_id) { mv_sbuf * v = (mv_sbuf *) ((mv_sdescriptor *) wc_list[i].wr_id)->parent; v->left_to_send--; if(0 == v->left_to_send) { v->in_progress = 0; if(0 == v->seqnum) { release_mv_sbuf(v); } } } } empty = 0; } else { empty = 1; } } while(qp->send_wqes_avail < 500 || !empty); for(i = 0; i < malloc_count; i++) { bytes_to_send = MIN(s->bytes_total - s->bytes_sent, mvparams.mtu); if(i > 0) { sr[i-1].next = &(sr[i]); } sr[i].next = NULL; sr[i].opcode = IBV_WR_SEND_WITH_IMM; sr[i].wr_id = 0; sr[i].num_sge = 1; sr[i].sg_list = &(sg_entry[i]); sr[i].imm_data = s->seqnum++; sr[i].send_flags = IBV_SEND_SIGNALED; sr[i].wr.ud.ah = c->data_ud_ah[c->last_ah]; sr[i].wr.ud.remote_qpn = s->remote_qpn; sr[i].wr.ud.remote_qkey = 0; sg_entry[i].addr = (uintptr_t) ((char *) (s->local_address) + s->bytes_sent); sg_entry[i].length = bytes_to_send; sg_entry[i].lkey = ((dreg_entry *) s->dreg_entry)->memhandle[0]->lkey; s->bytes_sent += bytes_to_send; qp->send_wqes_avail--; if(s->bytes_total == s->bytes_sent) { break; } } if(ibv_post_send(qp->qp, sr, &bad_wr)) { error_abort_all(IBV_RETURN_ERR,"Error posting to UD RNDV QP (%d) - %lu\n", qp->send_wqes_avail, bad_wr->wr_id ); } } MV_ASSERT(s->bytes_total == s->bytes_sent); mvdev_ud_zcopy_finish(s, c->last_ah); s->nearly_complete = 1; }
int sgd_iter_get_next( sgd_iter_t* iter, sgd_t* sgd ) { MV_U32 sz; if( iter->remainCnt == 0 ) return 0; sgd_getsz(iter->sgd,sz); while( iter->offset >= sz ) { if( sgd_eot(iter->sgd) ) { iter->remainCnt = 0; return 0; } iter->offset -= sz; sgd_inc(iter->sgd); sgd_getsz(iter->sgd,sz); } again: if( iter->sgd->flags & (SGD_REFTBL|SGD_REFSGD) ) { sgd_iter_t sub_iter; sgd_t* refSgd; sgd_tbl_t* refSgdt; MV_U32 sub_cnt = sz - iter->offset; MV_U32 offRef; if( sub_cnt > iter->remainCnt ) sub_cnt = iter->remainCnt; sgd_get_reftbl(iter->sgd,refSgdt); if( iter->sgd->flags & SGD_REFTBL ) refSgd = refSgdt->Entry_Ptr; else refSgd = (sgd_t*) refSgdt; sgd_get_refoff(iter->sgd,offRef); sgd_iter_init( &sub_iter, refSgd, offRef + iter->offset, sub_cnt ); if( !sgd_iter_get_next( &sub_iter, sgd ) ) { if( sgd_eot(iter->sgd) ) { iter->remainCnt = 0; return 0; } sgd_inc(iter->sgd); iter->offset = 0; goto again; } else if( sgd->flags & SGD_NEXT_TBL ) { MV_ASSERT( MV_FALSE ); // TODO } else { sgd_getsz(sgd,sz); if( sz > iter->remainCnt ) sgd_setsz(sgd,iter->remainCnt); iter->offset += sz; iter->remainCnt -= sz; } return 1; } else { sgd_copy( sgd, iter->sgd ); sgd->baseAddr = U64_ADD_U32(sgd->baseAddr,iter->offset); if( sgd->flags & SGD_VP ) { ((sgd_vp_t*)sgd)->u.vaddr = ((MV_U8*) ((sgd_vp_t*)sgd)->u.vaddr) + iter->offset; } if (sgd->flags & SGD_PCTX) { ((sgd_pctx_t *)sgd)->rsvd += iter->offset; } sz -= iter->offset; sgd_setsz( sgd, sz ); } if( sz > iter->remainCnt ) { sgd_setsz( sgd, iter->remainCnt ); sz = iter->remainCnt; } iter->remainCnt -= sz; if( sgd_eot(iter->sgd) || iter->remainCnt == 0 ) { iter->remainCnt = 0; return 1; } iter->offset = 0; sgd_inc(iter->sgd); return 1; }
int sgdt_append_ref( sgd_tbl_t* sgdt, MV_PVOID ref, MV_U32 offset, MV_U32 size, MV_BOOLEAN refTbl ) { sgd_t* sg; if( sgdt->Valid_Entry_Count ) { sgdt_get_lastsgd(sgdt,sg); if( sg->flags&(SGD_REFTBL|SGD_REFSGD) ) { MV_PVOID lastRef; MV_U32 lastOffset; sgd_get_ref(sg, lastRef); sgd_get_refoff(sg, lastOffset); if( lastRef == ref && lastOffset + sg->size == offset ) { // contiguous items! sg->size += size; sgdt->Byte_Count += size; return 0; } } } sg = &sgdt->Entry_Ptr[sgdt->Valid_Entry_Count]; { #ifdef USES_64B_POINTER sgd_ref64_t* rsg = (sgd_ref64_t*) sg; MV_ASSERT( sgdt->Valid_Entry_Count+2<=sgdt->Max_Entry_Count ); if( sgdt->Valid_Entry_Count + 2 > sgdt->Max_Entry_Count ) return -1; // not enough space sgdt_clear_eot(sgdt); rsg->u.ref = ref; sgdt->Valid_Entry_Count++; rsg->flags = SGD_WIDE | SGD_EOT | (refTbl ? SGD_REFTBL : SGD_REFSGD); rsg->flagsEx = SGD_X64; #else sgd_ref32_t* rsg = (sgd_ref32_t*) sg; MV_ASSERT( sgdt->Valid_Entry_Count+1<=sgdt->Max_Entry_Count ); if( sgdt->Valid_Entry_Count + 1 > sgdt->Max_Entry_Count ) return -1; // not enough space sgdt_clear_eot(sgdt); rsg->ref = ref; rsg->flags = SGD_EOT | (refTbl ? SGD_REFTBL : SGD_REFSGD); #endif rsg->offset = offset; rsg->size = size; sgdt->Valid_Entry_Count++; sgdt->Byte_Count += size; } return 0; }
void sgdt_copy_partial( sgd_tbl_t* sgdt, sgd_t** ppsgd, MV_PU32 poff, MV_U32 size ) { MV_U32 sgdsz; MV_U32 tmpSize; sgd_t sgd[2]; while( size ) { sgd_getsz( *ppsgd, sgdsz ); MV_ASSERT( sgdsz > *poff ); tmpSize = MV_MIN( size, sgdsz - *poff ); if( sgdt ) { sgd_copy( sgd, *ppsgd ); sgd_setsz( sgd, tmpSize ); if( *poff ) { if( sgd->flags & (SGD_REFTBL|SGD_REFSGD) ) { MV_U32 refoff; sgd_get_refoff( sgd, refoff ); sgd_set_refoff( sgd, refoff+(*poff) ); } else { sgd->baseAddr = U64_ADD_U32( sgd->baseAddr, (*poff) ); if( sgd->flags & SGD_VP ) { sgd_vp_t* vp = (sgd_vp_t*) sgd; vp->u.vaddr = ((MV_U8*)vp->u.vaddr) + (*poff); } if (sgd->flags & SGD_PCTX) { sgd_pctx_t *pctx = (sgd_pctx_t *)sgd; pctx->rsvd += (*poff); } } } sgdt_append_sgd( sgdt, sgd ); } if( size == sgdsz - *poff || tmpSize == sgdsz - *poff ) { sgd_inc( *ppsgd ); (*poff) = 0; } else (*poff) += tmpSize; size -= tmpSize; } }
static int PRDTablePrepareVisitor(sgd_t* sg, MV_PVOID _ctx) { PRDTableWalkCtx* ctx = (PRDTableWalkCtx*) _ctx; if( !ctx->avail ) return 0; if( sg->flags & (SGD_VIRTUAL|SGD_VWOXCTX) ) { MV_U32 totalSize, thisSize; MV_PVOID vaddr; MV_PVOID xctx; MV_U64 paddr; #ifdef _OS_LINUX MV_ASSERT( 0 ); #endif /* _OS_LINUX */ sgd_getsz( sg, totalSize ); if( sg->flags & SGD_VIRTUAL ) { sgd_get_vaddr( sg, vaddr ); sgd_get_xctx( sg, xctx ); } else { vaddr = ((sgd_v_t*)sg)->u.vaddr; xctx = 0; } while( 1 ) { thisSize = totalSize; if( !HBA_ModuleGetPhysicalAddress( ctx->pCore, vaddr, xctx, &paddr, &thisSize ) ) return 0; ctx->avail--; ctx->itemCnt++; ctx->pSg->flags = 0; #ifdef ODIN_DRIVER ctx->pSg->size = MV_CPU_TO_LE32(thisSize); #else ctx->pSg->size = MV_CPU_TO_LE32(thisSize - 1); #endif ctx->pSg->baseAddr.parts.low = MV_CPU_TO_LE32(paddr.parts.low); ctx->pSg->baseAddr.parts.high = MV_CPU_TO_LE32(paddr.parts.high); ctx->pSg++; totalSize -= thisSize; if( totalSize == 0 ) break; if( !ctx->avail ) return 0; vaddr = (MV_PVOID)((MV_PU8) vaddr + thisSize); } } else { // including SGD_VP/SGD_PCTX ctx->avail--; ctx->itemCnt++; ctx->pSg->flags = 0; #ifdef ODIN_DRIVER ctx->pSg->size = MV_CPU_TO_LE32(sg->size); #else ctx->pSg->size = MV_CPU_TO_LE32(sg->size - 1); #endif ctx->pSg->baseAddr.parts.low = MV_CPU_TO_LE32(sg->baseAddr.parts.low); ctx->pSg->baseAddr.parts.high = MV_CPU_TO_LE32(sg->baseAddr.parts.high); ctx->pSg++; } return 1; }
int sg_iter_walk( IN sgd_t* sgd, IN MV_U32 offset, IN MV_U32 count, IN sgd_visitor_t visitor, IN MV_PVOID context ) { sgd_t sg[2]; int sg_cnt = 0; MV_U32 sz; sgd_getsz(sgd,sz); while( sz <= offset ) { offset -= sz; MV_ASSERT( !sgd_eot(sgd) ); sg_cnt++; sgd_inc(sgd); sgd_getsz(sgd,sz); } while(1) { if( sgd->flags & (SGD_REFTBL|SGD_REFSGD) ) { MV_U32 copy_count = sz - offset; MV_U32 offRef; sgd_tbl_t* refSgdt; sgd_t* refSgd; sgd_get_reftbl(sgd,refSgdt); if( sgd->flags & SGD_REFTBL ) refSgd = refSgdt->Entry_Ptr; else refSgd = (sgd_t*) refSgdt; sgd_get_refoff(sgd,offRef); if( copy_count > count ) copy_count = count; if( !sg_iter_walk( refSgd, offRef + offset, copy_count, visitor, context ) ) return 0; count -= copy_count; } else if( sgd->flags & SGD_NEXT_TBL ) { MV_ASSERT( MV_FALSE ); // TODO } else { sgd_copy( sg, sgd ); if( offset ) { sg[0].baseAddr = U64_ADD_U32(sg[0].baseAddr,offset); if( sgd->flags & SGD_VP ) { ((sgd_vp_t*)sg)->u.vaddr = ((MV_U8*) ((sgd_vp_t*)sg)->u.vaddr) + offset; } if (sgd->flags & SGD_PCTX) { ((sgd_pctx_t *)sg)->rsvd += offset; } sg[0].size -= offset; } if( sg[0].size > count ) sg[0].size = count; if( !visitor( sg, context ) ) return 0; count -= sg[0].size; } sg_cnt++; if( sgd_eot(sgd) || count==0 ) { MV_ASSERT( count == 0 ); break; } offset = 0; sgd_inc(sgd); sgd_getsz(sgd,sz); } return sg_cnt; }