void opencl_context::softmax(matrix& operand) { opencl::matrix* impl = (opencl::matrix*)operand.implementation(); _matrix_softmax_exp_kernel->setArg(0, *impl->get()); _matrix_softmax_exp_kernel->setArg(1, operand.row_count()); _matrix_softmax_exp_kernel->setArg(2, operand.column_count()); cl::NDRange offset(0U, 0U); cl::NDRange exp_size(operand.row_count(), operand.column_count()); _command_queue->enqueueNDRangeKernel(*_matrix_softmax_exp_kernel, offset, exp_size); _matrix_softmax_normalize_kernel->setArg(0U, *impl->get()); _matrix_softmax_normalize_kernel->setArg(1U, operand.row_count()); _matrix_softmax_normalize_kernel->setArg(2U, operand.column_count()); cl::NDRange normalize_size(operand.row_count(), 1); _command_queue->enqueueNDRangeKernel(*_matrix_softmax_normalize_kernel, offset, normalize_size); _command_queue->finish(); }
/* * SUT: rest_request * call_rest throws an error while in HA mode * and the failover method finds an active IP so the second * call to call_rest does no throw an exception */ void test__rest_request__callRestThrowsHAFirstTime(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* the second call from ha_failover */ expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called(call_rest); /* test */ rest_request(hadoop_uri, client_context, restMsg); pfree(hadoop_uri); pfree(client_context); } /* * SUT: rest_request * call_rest throws an error while in HA mode * and the failover method finds an an active IP so the second * call to call_rest is issued on the second IP. This call also throws * an exception - but this time the exception is not caught. */ void test__rest_request__callRestThrowsHASecondTime(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &FirstException, NULL); /* the second call from ha_failover */ expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called_with_sideeffect(call_rest, &SecondException, NULL); /* test */ PG_TRY(); { rest_request(hadoop_uri, client_context, restMsg); } PG_CATCH(); { pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); CurrentMemoryContext = 1; ErrorData *edata = CopyErrorData(); /*Validate the type of expected error */ assert_string_equal(edata->message, "second exception"); /* the first exception was caught by rest_request() */ return; } PG_END_TRY(); assert_true(false); } /* * SUT: rest_request * the first time call_rest is called we succeed, since the first IP is valid * No exceptions are thrown */ void test__rest_request__callRestHASuccessFromTheFirstCall(void **state) { GPHDUri *hadoop_uri = (GPHDUri*) palloc0(sizeof(GPHDUri)); hadoop_uri->host = pstrdup("host1"); hadoop_uri->port = pstrdup("port1"); NNHAConf *ha_nodes = (NNHAConf*) palloc0(sizeof(NNHAConf)); hadoop_uri->ha_nodes = ha_nodes; ha_nodes->nodes = (char *[]){"host1", "host2"}; ha_nodes->restports = (char *[]){"port1", "port2"}; ha_nodes->numn = 2; ClientContext* client_context = (ClientContext*) palloc0(sizeof(ClientContext)); char *restMsg = "empty message"; expect_any(call_rest, hadoop_uri); expect_any(call_rest, client_context); expect_any(call_rest, rest_msg); will_be_called(call_rest); /* test */ rest_request(hadoop_uri, client_context, restMsg); pfree(hadoop_uri->host); pfree(hadoop_uri->port); pfree(hadoop_uri); pfree(client_context); } void test__normalize_size(void **state) { float4 result = normalize_size(10000000, "B"); assert_int_equal(result, 10000000); result = normalize_size(10000000, "KB"); assert_int_equal(result, 10240000000); result = normalize_size(500, "MB"); assert_int_equal(result, 524288000); result = normalize_size(10, "GB"); assert_int_equal(result, 10737418240); result = normalize_size(10000, "TB"); assert_int_equal(result, 10995116277760000); } int main(int argc, char *argv[]) { cmockery_parse_arguments(argc, argv); const UnitTest tests[] = { unit_test(test__rest_request__callRestThrowsNoHA), unit_test(test__rest_request__callRestThrowsHAFirstTime), unit_test(test__rest_request__callRestThrowsHASecondTime), unit_test(test__rest_request__callRestHASuccessFromTheFirstCall), unit_test(test__normalize_size) }; return run_tests(tests); }
/* Handle the TX ring */ static int dev_pos_oc3_handle_txring(struct pos_oc3_data *d) { u_char pkt[POS_OC3_MAX_PKT_SIZE],*pkt_ptr; m_uint32_t clen,tot_len,norm_len; m_uint32_t tx_start,addr; struct tx_desc txd0,ctxd,*ptxd; int i,done = FALSE; if ((d->tx_start == 0) || (d->nio == NULL)) return(FALSE); /* Copy the current txring descriptor */ tx_start = d->tx_current; ptxd = &txd0; txdesc_read(d,d->tx_current,ptxd); /* If we don't own the descriptor, we cannot transmit */ if (!(txd0.tdes[0] & POS_OC3_TXDESC_OWN)) return(FALSE); #if DEBUG_TRANSMIT POS_LOG(d,"pos_oc3_handle_txring: 1st desc: tdes[0]=0x%x, tdes[1]=0x%x\n", ptxd->tdes[0],ptxd->tdes[1]); #endif pkt_ptr = pkt; tot_len = 0; i = 0; do { #if DEBUG_TRANSMIT POS_LOG(d,"pos_oc3_handle_txring: loop: tdes[0]=0x%x, tdes[1]=0x%x\n", ptxd->tdes[0],ptxd->tdes[1]); #endif if (!(ptxd->tdes[0] & POS_OC3_TXDESC_OWN)) { POS_LOG(d,"pos_oc3_handle_txring: descriptor not owned!\n"); return(FALSE); } clen = ptxd->tdes[0] & POS_OC3_TXDESC_LEN_MASK; /* Be sure that we have length not null */ if (clen != 0) { addr = ptxd->tdes[1]; norm_len = normalize_size(clen,4,0); physmem_copy_from_vm(d->vm,pkt_ptr,addr,norm_len); mem_bswap32(pkt_ptr,norm_len); } pkt_ptr += clen; tot_len += clen; /* Clear the OWN bit if this is not the first descriptor */ if (i != 0) physmem_copy_u32_to_vm(d->vm,d->tx_current,0); /* Go to the next descriptor */ txdesc_set_next(d,ptxd); /* Copy the next txring descriptor */ if (ptxd->tdes[0] & POS_OC3_TXDESC_CONT) { txdesc_read(d,d->tx_current,&ctxd); ptxd = &ctxd; i++; } else done = TRUE; }while(!done); if (tot_len != 0) { #if DEBUG_TRANSMIT POS_LOG(d,"sending packet of %u bytes (flags=0x%4.4x)\n", tot_len,txd0.tdes[0]); mem_dump(log_file,pkt,tot_len); #endif /* send it on wire */ netio_send(d->nio,pkt,tot_len); } /* Clear the OWN flag of the first descriptor */ txd0.tdes[0] &= ~POS_OC3_TXDESC_OWN; physmem_copy_u32_to_vm(d->vm,tx_start,txd0.tdes[0]); /* Interrupt on completion */ pci_dev_trigger_irq(d->vm,d->pci_dev); return(TRUE); }