Esempio n. 1
0
void __pc_ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
{

  struct pc_length *len;

  if (init_hcas_q == 0) {
    return;
  }
  if (wc->opcode == IBV_WC_RDMA_WRITE) {
      lq_init_it(&pc_q);

      while ((len = (struct pc_length*)lq_next(&pc_q)) != NULL) {
	struct pc_hca *hca;
	uint16_t lid;
	if (len->ctx == cq->context) {
	  lid = get_lid(cq->context);
	  hca = get_pc_hca(lid);

	  pthread_mutex_lock(&pc_mutex);
	  hca->co->pdg_num--;
	  hca->co->pdg_size = hca->co->pdg_size - len->length;
	  pthread_mutex_unlock(&pc_mutex);

	  lq_remove(&pc_q, len);
	  free(len);
	  lq_fin_it(&pc_q);

#ifdef DEBUG
	  	  fprintf(stderr, "[ %f ] post_cq   : pdg_num: %lu , pdg_size: %lu \n", get_dtime(), hca->co->pdg_num, hca->co->pdg_size);
#ifdef DETAIL
	  	  struct ibv_port_attr pattr;
	  	  ibv_query_port(cq->context, 1, &pattr);
	  	  fprintf(stderr, "\tcp:%p, context:%p(more_ops:%p, abi_compat:%p, num_comp_vectors:%d, slid:%u), :\n", cq, cq->context, cq->context->more_ops, cq->context->abi_compat, cq->context->num_comp_vectors, pattr.lid);
	  	  fprintf(stderr, "\twr_id:%lu, status:%d, wc->opcode:%d, vendor_err:%u, byte_len:%u, imm_data:%u, qp_num:%p src_qp:%p, wc_flags:%d,  pkey_index:%u, slid:%u, sl:%u, dlid_path_bits:%u, num_entries:%d \n", wc->wr_id,  wc->status, wc->opcode, wc->vendor_err, wc->byte_len, wc->imm_data, wc->qp_num, wc->src_qp, wc->wc_flags, wc->pkey_index, wc->slid, wc->sl, wc->dlid_path_bits, num_entries);
#endif
#endif
	  return;
	}
      }
      lq_fin_it(&pc_q);
  }
  return;
}
Esempio n. 2
0
static int issue_lq(int core, int thread, int quant)
{
	struct linked_list_t *lq = THREAD.lq;
	struct uop_t *load;

	/* Debug */
	if (esim_debug_file)
		uop_lnlist_check_if_ready(lq);
	
	/* Process lq */
	linked_list_head(lq);
	while (!linked_list_is_end(lq) && quant)
	{
		/* Get element from load queue. If it is not ready, go to the next one */
		load = linked_list_get(lq);
		if (!load->ready && !rf_ready(load))
		{
			linked_list_next(lq);
			continue;
		}
		load->ready = 1;

		/* Check that memory system is accessible */
		if (!mod_can_access(THREAD.data_mod, load->phy_addr))
		{
			linked_list_next(lq);
			continue;
		}

		/* Remove from load queue */
		assert(load->uinst->opcode == x86_uinst_load);
		lq_remove(core, thread);

		/* Access memory system */
		mod_access(THREAD.data_mod, mod_entry_cpu, mod_access_read,
			load->phy_addr, NULL, CORE.eventq, load);

		/* The cache system will place the load at the head of the
		 * event queue when it is ready. For now, mark "in_eventq" to
		 * prevent the uop from being freed. */
		load->in_eventq = 1;
		load->issued = 1;
		load->issue_when = cpu->cycle;
		
		/* Instruction issued */
		CORE.issued[load->uinst->opcode]++;
		CORE.lsq_reads++;
		CORE.rf_int_reads += load->ph_int_idep_count;
		CORE.rf_fp_reads += load->ph_fp_idep_count;
		THREAD.issued[load->uinst->opcode]++;
		THREAD.lsq_reads++;
		THREAD.rf_int_reads += load->ph_int_idep_count;
		THREAD.rf_fp_reads += load->ph_fp_idep_count;
		cpu->issued[load->uinst->opcode]++;
		quant--;
		
		/* MMU statistics */
		if (*mmu_report_file_name)
			mmu_access_page(load->phy_addr, mmu_access_read);

		/* Debug */
		esim_debug("uop action=\"update\", core=%d, seq=%llu,"
			" stg_issue=1, in_lsq=0, issued=1\n",
			load->core, (long long unsigned) load->di_seq);
	}
	
	return quant;
}