示例#1
0
void MyServer::incomingConnection(qintptr socketDescriptor)
{
    MyClient *client = new MyClient(_loader, this);
    client->setSocket(socketDescriptor);
    _clients.push_back(client);
    DeleteNotActive();
}
示例#2
0
int main(int argc, char *argv[])
{
    QApplication a(argc, argv);
    MyClient w;
    w.show();

    return a.exec();
}
	/**
	 * Called for each advertising BLE server.
	 */
	void onResult(BLEAdvertisedDevice advertisedDevice) {
		ESP_LOGD(LOG_TAG, "Advertised Device: %s", advertisedDevice.toString().c_str());

		if (advertisedDevice.haveServiceUUID() && advertisedDevice.isAdvertisingService(serviceUUID)) {
			advertisedDevice.getScan()->stop();

			ESP_LOGD(LOG_TAG, "Found our device!  address: %s", advertisedDevice.getAddress().toString().c_str());
			MyClient* pMyClient = new MyClient();
			pMyClient->setStackSize(18000);
			pMyClient->start(new BLEAddress(*advertisedDevice.getAddress().getNative()));
		} // Found our server
	} // onResult
示例#4
0
int main(int argc, char *argv[])   
{  
	MyClient client;  
	client.open();  

	while(true)  
	{  
		ACE_Reactor::instance()->handle_events();   
	}  

	return 0;   
}  
示例#5
0
void Initialize()
{
    MyClient* client = new MyClient;
    client->initialize( 1, Nesting(), ClientStackSize[0] );
    MyFactory factory;
    memset( &factory, 0, sizeof(factory) );
    factory.open();
    MyFactory::server_type* server; 
    factory.make_server( server, *client );
    client->set_server( server );
    n_available_hw_threads = server->default_concurrency();
    client->expect_close_connection = true;
    server->request_close_connection();
    // Client deletes itself when it sees call to acknowledge_close_connection from server.
    factory.close();
}
示例#6
0
void MyApp::OnIdle(wxIdleEvent& event)
{
    if ( m_client )
        m_client->StartNextTestIfNecessary();

    event.Skip();
}
示例#7
0
int reactor_test(int argc, char *argv[]) 
{
    MyClient client;
    bool ret = client.open();
	if(!ret) return -1;

	int count = 0;
    while(true)
     {
         ACE_Reactor::instance()->handle_events(); 
		 count ++;
		 if(count > argc) break;
		 else ACE_OS::sleep(1);
     }

    return 0; 
}
示例#8
0
void MyNetNode::doIO()
{
    while (doIOOver)
    {
        mynet::EventBase *evt = pool.pullEvent();
        if (evt)
        {
            if (evt->isErr())
            {
                MyClient *conn = (MyClient*)evt->target;
                if (conn)
                {
                    printf("删除网络连接%p\n", evt->target);
                    conn->destroy();
                }
                continue;
            }
            if (evt->isOut())
            {
                mynet::Connection *conn = (mynet::Connection*) evt->target;
                if (conn)
                {
                    conn->doSend(evt);
                }
                printf("out\n");
            }
            if (evt->isIn())
            {
                mynet::Connection *conn = (mynet::Connection*) evt->target;
                if (conn)
                {
                    conn->doRead(evt);
                }
                else
                {
                    printf("error!!!!!\n");
                }
                printf("in\n");
            }
        }
    }
    printf("over\n");
}
示例#9
0
void DoClientSpecificVerification( MyServer&, int n_thread )
{
    MyClient* client = new MyClient;
    client->initialize( n_thread, Nesting(), ClientStackSize[0] );
    MyFactory factory;
    memset( &factory, 0, sizeof(factory) );
    MyFactory::status_type status = factory.open();
    ASSERT( status!=MyFactory::st_not_found, "could not find RML library" );
    ASSERT( status!=MyFactory::st_incompatible, NULL );
    ASSERT( status==MyFactory::st_success, NULL );
    MyFactory::server_type* server; 
    status = factory.make_server( server, *client );
    ASSERT( status==MyFactory::st_success, NULL );
    client->set_server( server );
    client->expect_close_connection = true;
    server->request_close_connection();
    // Client deletes itself when it sees call to acknowledge_close_connection from server.
    factory.close();
}
示例#10
0
// The `main program' equivalent, creating the windows and returning the
// main frame
bool MyApp::OnInit()
{
    if ( !wxApp::OnInit() )
        return false;

    // Create a new client
    m_client = new MyClient;
    bool retval = m_client->Connect("localhost", "4242", "IPC TEST");

    wxLogMessage("Client host=\"localhost\" port=\"4242\" topic=\"IPC TEST\" %s",
                 retval ? "connected" : "failed to connect");

    return retval;
}
extern "C" void Cplusplus() {
    MyClient client;
    Version = client.version();
    REPORT("done\n");
}
示例#12
0
void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) {
    ASSERT( max_thread>=0, NULL );
#if _WIN32||_WIN64
    ::rml::server::execution_resource_t me;
    server.register_master( me );
#endif /* _WIN32||_WIN64 */
    client.server = &server;
    MyTeam team(server,size_t(max_thread));
    MyServer::size_type n_thread = 0;
    for( int iteration=0; iteration<4; ++iteration ) {
        for( size_t i=0; i<team.max_thread; ++i )
            team.info[i].ran = false;
        switch( iteration ) {
            default:
                n_thread = int(max_thread);
                break;
            case 1:
                // No change in number of threads
                break;
            case 2:
                // Decrease number of threads.
                n_thread = int(max_thread)/2;
                break;
            // Case 3 is same code as the default, but has effect of increasing the number of threads.
        }
        team.barrier = 0;
        REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) );
        server.independent_thread_number_changed( n_extra );
        if( checker ) {
            // Give RML time to respond to change in number of threads.
            Harness::Sleep(1);
        }
        int n_delivered = server.try_increase_load( n_thread, StrictTeam );
        ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" );
        if( n_delivered<0 ) {
            REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered );
            server.independent_thread_number_changed( -n_extra );
            n_delivered = 0;
        } else {
            team.n_thread = n_delivered;
            ::rml::job* job_array[JobArraySize];
            job_array[n_delivered] = (::rml::job*)intptr_t(-1);
            server.get_threads( n_delivered, &team, job_array );
            __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL );
            for( int i=0; i<n_delivered; ++i ) {
                MyJob* j = static_cast<MyJob*>(job_array[i]);
                int s = j->state;
                ASSERT( s==MyJob::idle||s==MyJob::busy, NULL );
            }
            server.independent_thread_number_changed( -n_extra );
            REMARK("client %d: team size is %d\n", client.client_id(), n_delivered);
            if( checker ) {
                checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra );
            }
            // Protocol requires that master wait until workers have called "done_processing"
            while( team.barrier!=n_delivered ) {
                ASSERT( team.barrier>=0, NULL );
                ASSERT( team.barrier<=n_delivered, NULL );
                __TBB_Yield();
            }
            REMARK("client %d: team completed\n", client.client_id() );
            for( int i=0; i<n_delivered; ++i ) {
                ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" );
            }
        }
        for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) {
            ASSERT( !team.info[i].ran, "thread on team ran with illegal index" );
        }
    }
#if _WIN32||_WIN64
    server.unregister_master( me );
#endif
}
示例#13
0
void FireUpJobs( MyServer& server, MyClient& client, int n_thread, int n_extra, Checker* checker ) {
    REMARK("client %d: calling adjust_job_count_estimate(%d)\n", client.client_id(),n_thread);
    // Exercise independent_thread_number_changed, even for zero values.
    server.independent_thread_number_changed( n_extra );
#if _WIN32||_WIN64
    ::rml::server::execution_resource_t me;
    server.register_master( me );
#endif /* _WIN32||_WIN64 */
    // Experiments indicate that when oversubscribing, the main thread should wait a little
    // while for the RML worker threads to do some work. 
    if( checker ) {
        // Give RML time to respond to change in number of threads.
        Harness::Sleep(1);
        for( int k=0; k<n_thread; ++k )
            client.job_array[k].processing_count = 0;
    }
    //close the gate to keep worker threads from returning to RML until a snapshot is taken
    client.close_the_gate();
    server.adjust_job_count_estimate( n_thread );
    int n_used = 0;
    if( checker ) {
        Harness::Sleep(100);
        for( int k=0; k<n_thread; ++k )
            if( client.job_array[k].processing_count )
                ++n_used;
    }
    // open the gate
    client.open_the_gate();
    // Logic further below presumes that jobs never starve, so undo previous call
    // to independent_thread_number_changed before waiting on those jobs.
    server.independent_thread_number_changed( -n_extra );
    REMARK("client %d: wait for each job to be processed at least once\n",client.client_id());
    // Calculate the number of jobs that are expected to get threads.
    int expected = n_thread;
    // Wait for expected number of jobs to be processed.
#if RML_USE_WCRM
    int default_concurrency = server.default_concurrency();
    if( N_TestConnections>0 ) {
        if( default_concurrency+1>=8 && n_thread<=3 && N_TestConnections<=3 && (default_concurrency/int(N_TestConnections)-1)>=n_thread ) {
#endif /* RML_USE_WCRM */
            for(;;) {
                int n = 0;
                for( int k=0; k<n_thread; ++k ) 
                    if( client.job_array[k].processing_count!=0 ) 
                        ++n;
                    if( n>=expected ) break;
                server.yield();
            }
#if RML_USE_WCRM
        } else if( n_thread>0 ) {
            for( int m=0; m<20; ++m ) {
                int n = 0;
                for( int k=0; k<n_thread; ++k ) 
                    if( client.job_array[k].processing_count!=0 ) 
                        ++n;
                if( n>=expected ) break;
                Harness::Sleep(1);
            }
        }
    }
#endif /* RML_USE_WCRM */
    server.adjust_job_count_estimate(-n_thread);
#if _WIN32||_WIN64
    server.unregister_master( me );
#endif
    // Give RML some time to respond
    if( checker ) {
        Harness::Sleep(1);
        checker->check_number_of_threads_delivered( n_used, n_thread, n_extra );
    }
}
示例#14
0
void MyServer::incomingConnection(qintptr handle)
{
    MyClient *client = new MyClient(this);
    client->SetSocket(handle);
}