void handlesession(){ //handle a session once it's established unsigned int rsize,strncmpval; rsize=recv_size(); printf("**rsz=%d\n",rsize); if (rsize>0){ if (recv0(buf,rsize/*min(24,rsize)*/)>0){ //get enough characters to distinguish the request buf[rsize]=0; printf("%s\n",buf); if (strncmp((char *)buf,"POST /",6)==0){ bagelspturn(); //players turn with type=post } else if (strncmp((char *)buf,"GET /favicon",12)==0){ sendnak(); //no favicon here } else if (strncmp((char *)buf,"GET /?G=",8)==0){ bagelsturn(); //give player his turn } else if (strncmp((char *)buf,"GET /",5)==0){ bagelsinit(0); //initialize game, send the form } else{ printf("\nmystery meat\n"); bagelsinit(0); //initialize game, send the form } } } if (rsize>0) flush(rsize); //get rid of the received data disconnect0(); //in any case, we're done here printf("done\n>\n"); }
inline void W5100_poll(void) { uint8_t sockstat; uint16_t rsize; uint8_t sockreg = 0; sockstat = SPI_Read(W5100_S0_SR); switch (sockstat) { case SOCK_CLOSED: if (socket(sockreg, MR_TCP, W5100_TCP_PORT) > 0) { // Listen to Socket 0 if (listen(sockreg) <= 0) _delay_ms(1); } break; case SOCK_ESTABLISHED: rsize = recv_size(); if (rsize > 0) { // Now read the client Request if (recv(sockreg, W5100_buffer, rsize) <= 0) break; // Create the HTTP Response Header http_header_xml(W5100_buffer); //Start with a http header D.get_xml_data(W5100_buffer); // sensor data if (send(sockreg, W5100_buffer, strlen((char *) W5100_buffer)) <= 0) break; // send out _delay_us(1); disconnect(sockreg); } else { _delay_us(10); // Wait for request } break; case SOCK_FIN_WAIT: case SOCK_CLOSING: case SOCK_TIME_WAIT: case SOCK_CLOSE_WAIT: case SOCK_LAST_ACK: // Force to close the socket close(sockreg); break; }; };
AW_Result aw_default_post (AW_Session *sess, AW_Method method) { const AW_Char *len_str, *data_str; AW_Size len; AW_Result r; len_str = aw_session_get_request(sess, "Content-Length"); if (!len_str) { AW_ERROR(("cannot get content length")); send_resp(sess, 411); return AW_ERR_SYNTAX; } len = strtol(len_str, NULL, 10); if (len == LONG_MAX) { AW_ERROR(("illegal content length")); send_resp(sess, 400); return AW_ERR_SYNTAX; } r = recv_size(sess, len); if (r <= 0) { AW_ERROR(("receive post data failed")); send_resp(sess, 400); return AW_ERR_SYNTAX; } append_char(sess, 0); data_str = sess->buf + sess->pos; if ((r = aw_parse_params(data_str, &sess->post_hash)) != AW_OK) { send_resp(sess, 400); return r; } return aw_default_get(sess, AW_METHOD_GET); }
void communicate_field_data( const stk::mesh::BulkData & mesh , const std::vector< const stk::mesh::FieldBase * > & fields ) { if ( fields.empty() ) { return; } const unsigned parallel_size = mesh.parallel_size(); const unsigned parallel_rank = mesh.parallel_rank(); // Sizing for send and receive const unsigned zero = 0 ; std::vector<unsigned> send_size( parallel_size , zero ); std::vector<unsigned> recv_size( parallel_size , zero ); std::vector<unsigned> procs ; for ( std::vector<stk::mesh::Entity*>::const_iterator i = mesh.entity_comm().begin() ; i != mesh.entity_comm().end() ; ++i ) { stk::mesh::Entity & e = **i ; unsigned size = 0 ; for ( std::vector<const stk::mesh::FieldBase *>::const_iterator fi = fields.begin() ; fi != fields.end() ; ++fi ) { const stk::mesh::FieldBase & f = **fi ; size += stk::mesh::field_data_size( f , e ); } if ( size ) { if ( e.owner_rank() == parallel_rank ) { // owner sends stk::mesh::comm_procs( e , procs ); for ( std::vector<unsigned>::iterator ip = procs.begin() ; ip != procs.end() ; ++ip ) { send_size[ *ip ] += size ; } } else { // non-owner receives recv_size[ e.owner_rank() ] += size ; } } } // Allocate send and receive buffers: stk::CommAll sparse ; { const unsigned * const s_size = & send_size[0] ; const unsigned * const r_size = & recv_size[0] ; sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, r_size); } // Send packing: for ( std::vector<stk::mesh::Entity*>::const_iterator i = mesh.entity_comm().begin() ; i != mesh.entity_comm().end() ; ++i ) { stk::mesh::Entity & e = **i ; if ( e.owner_rank() == parallel_rank ) { stk::mesh::comm_procs( e , procs ); for ( std::vector<const stk::mesh::FieldBase *>::const_iterator fi = fields.begin() ; fi != fields.end() ; ++fi ) { const stk::mesh::FieldBase & f = **fi ; const unsigned size = stk::mesh::field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(stk::mesh::field_data( f , e )); for ( std::vector<unsigned>::iterator ip = procs.begin() ; ip != procs.end() ; ++ip ) { stk::CommBuffer & b = sparse.send_buffer( *ip ); b.pack<unsigned char>( ptr , size ); } } } } } // Communicate: sparse.communicate(); // Unpack for recv: for ( std::vector<stk::mesh::Entity*>::const_iterator i = mesh.entity_comm().begin() ; i != mesh.entity_comm().end() ; ++i ) { stk::mesh::Entity & e = **i ; if ( e.owner_rank() != parallel_rank ) { for ( std::vector<const stk::mesh::FieldBase *>::const_iterator fi = fields.begin() ; fi != fields.end() ; ++fi ) { const stk::mesh::FieldBase & f = **fi ; const unsigned size = stk::mesh::field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(stk::mesh::field_data( f , e )); stk::CommBuffer & b = sparse.recv_buffer( e.owner_rank() ); b.unpack<unsigned char>( ptr , size ); } } } } }
void communicate_field_data( const Ghosting & ghosts , const std::vector< const FieldBase *> & fields ) { if ( fields.empty() ) { return; } const BulkData & mesh = BulkData::get(ghosts); const unsigned parallel_size = mesh.parallel_size(); const unsigned parallel_rank = mesh.parallel_rank(); const std::vector<const FieldBase *>::const_iterator fe = fields.end(); const std::vector<const FieldBase *>::const_iterator fb = fields.begin(); std::vector<const FieldBase *>::const_iterator fi ; // Sizing for send and receive const unsigned zero = 0 ; std::vector<unsigned> send_size( parallel_size , zero ); std::vector<unsigned> recv_size( parallel_size , zero ); for ( std::vector<Entity*>::const_iterator i = mesh.entity_comm().begin() ; i != mesh.entity_comm().end() ; ++i ) { Entity & e = **i ; const bool owned = e.owner_rank() == parallel_rank ; unsigned e_size = 0 ; for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; e_size += field_data_size( f , e ); } for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) { if ( ghosts.ordinal() == ec->ghost_id ) { if ( owned ) { send_size[ ec->proc ] += e_size ; } else { recv_size[ ec->proc ] += e_size ; } } } } // Allocate send and receive buffers: CommAll sparse ; { const unsigned * const s_size = & send_size[0] ; const unsigned * const r_size = & recv_size[0] ; sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, r_size); } // Send packing: for ( std::vector<Entity*>::const_iterator i = mesh.entity_comm().begin() ; i != mesh.entity_comm().end() ; ++i ) { Entity & e = **i ; if ( e.owner_rank() == parallel_rank ) { for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; const unsigned size = field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) { if ( ghosts.ordinal() == ec->ghost_id ) { CommBuffer & b = sparse.send_buffer( ec->proc ); b.pack<unsigned char>( ptr , size ); } } } } } } // Communicate: sparse.communicate(); // Unpack for recv: for ( std::vector<Entity*>::const_iterator i = mesh.entity_comm().begin() ; i != mesh.entity_comm().end() ; ++i ) { Entity & e = **i ; if ( e.owner_rank() != parallel_rank ) { for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; const unsigned size = field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) { if ( ghosts.ordinal() == ec->ghost_id ) { CommBuffer & b = sparse.recv_buffer( ec->proc ); b.unpack<unsigned char>( ptr , size ); } } } } } } }
void communicate_field_data( ParallelMachine machine, const std::vector<EntityProc> & domain , const std::vector<EntityProc> & range , const std::vector<const FieldBase *> & fields) { if ( fields.empty() ) { return; } const unsigned parallel_size = parallel_machine_size( machine ); const unsigned parallel_rank = parallel_machine_rank( machine ); const bool asymmetric = & domain != & range ; const std::vector<const FieldBase *>::const_iterator fe = fields.end(); const std::vector<const FieldBase *>::const_iterator fb = fields.begin(); std::vector<const FieldBase *>::const_iterator fi ; // Sizing for send and receive const unsigned zero = 0 ; std::vector<unsigned> send_size( parallel_size , zero ); std::vector<unsigned> recv_size( parallel_size , zero ); std::vector<EntityProc>::const_iterator i ; for ( i = domain.begin() ; i != domain.end() ; ++i ) { Entity & e = * i->first ; const unsigned p = i->second ; if ( asymmetric || parallel_rank == e.owner_rank() ) { unsigned e_size = 0 ; for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; e_size += field_data_size( f , e ); } send_size[ p ] += e_size ; } } for ( i = range.begin() ; i != range.end() ; ++i ) { Entity & e = * i->first ; const unsigned p = i->second ; if ( asymmetric || p == e.owner_rank() ) { unsigned e_size = 0 ; for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; e_size += field_data_size( f , e ); } recv_size[ p ] += e_size ; } } // Allocate send and receive buffers: CommAll sparse ; { const unsigned * const s_size = & send_size[0] ; const unsigned * const r_size = & recv_size[0] ; sparse.allocate_buffers( machine, parallel_size / 4 , s_size, r_size); } // Pack for send: for ( i = domain.begin() ; i != domain.end() ; ++i ) { Entity & e = * i->first ; const unsigned p = i->second ; if ( asymmetric || parallel_rank == e.owner_rank() ) { CommBuffer & b = sparse.send_buffer( p ); for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; const unsigned size = field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); b.pack<unsigned char>( ptr , size ); } } } } // Communicate: sparse.communicate(); // Unpack for recv: for ( i = range.begin() ; i != range.end() ; ++i ) { Entity & e = * i->first ; const unsigned p = i->second ; if ( asymmetric || p == e.owner_rank() ) { CommBuffer & b = sparse.recv_buffer( p ); for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; const unsigned size = field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); b.unpack<unsigned char>( ptr , size ); } } } } }
//---------------------------------------------------------------------- void communicate_field_data( const Ghosting & ghosts , const std::vector< const FieldBase *> & fields ) { if ( fields.empty() ) { return; } const BulkData & mesh = ghosts.mesh(); const int parallel_size = mesh.parallel_size(); const int parallel_rank = mesh.parallel_rank(); const unsigned ghost_id = ghosts.ordinal(); const std::vector<const FieldBase *>::const_iterator fe = fields.end(); const std::vector<const FieldBase *>::const_iterator fb = fields.begin(); std::vector<const FieldBase *>::const_iterator fi ; // Sizing for send and receive const unsigned zero = 0 ; std::vector<unsigned> send_size( parallel_size , zero ); std::vector<unsigned> recv_size( parallel_size , zero ); for ( EntityCommListInfoVector::const_iterator i = mesh.internal_comm_list().begin() , iend = mesh.internal_comm_list().end(); i != iend ; ++i ) { Entity e = i->entity; const MeshIndex meshIdx = mesh.mesh_index(e); const unsigned bucketId = meshIdx.bucket->bucket_id(); const bool owned = i->owner == parallel_rank ; unsigned e_size = 0 ; for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; if(is_matching_rank(f, *meshIdx.bucket)) { e_size += field_bytes_per_entity( f , bucketId ); } } if (e_size == 0) { continue; } const EntityCommInfoVector& infovec = i->entity_comm->comm_map; PairIterEntityComm ec(infovec.begin(), infovec.end()); if ( owned ) { for ( ; ! ec.empty() ; ++ec ) { if (ec->ghost_id == ghost_id) { send_size[ ec->proc ] += e_size ; } } } else { for ( ; ! ec.empty() ; ++ec ) { if (ec->ghost_id == ghost_id) { recv_size[ i->owner ] += e_size ; break;//jump out since we know we're only recving 1 msg from the 1-and-only owner } } } } // Allocate send and receive buffers: CommAll sparse ; { const unsigned * const snd_size = send_size.data() ; const unsigned * const rcv_size = recv_size.data() ; sparse.allocate_buffers( mesh.parallel(), snd_size, rcv_size); } // Send packing: for (int phase = 0; phase < 2; ++phase) { for ( EntityCommListInfoVector::const_iterator i = mesh.internal_comm_list().begin(), iend = mesh.internal_comm_list().end() ; i != iend ; ++i ) { if ( (i->owner == parallel_rank && phase == 0) || (i->owner != parallel_rank && phase == 1) ) { Entity e = i->entity; const MeshIndex meshIdx = mesh.mesh_index(e); const unsigned bucketId = meshIdx.bucket->bucket_id(); for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; if(!is_matching_rank(f, e)) continue; const unsigned size = field_bytes_per_entity( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(stk::mesh::field_data( f , bucketId, meshIdx.bucket_ordinal, size )); const EntityCommInfoVector& infovec = i->entity_comm->comm_map; PairIterEntityComm ec(infovec.begin(), infovec.end()); if (phase == 0) { // send for ( ; !ec.empty() ; ++ec ) { if (ec->ghost_id == ghost_id) { CommBuffer & b = sparse.send_buffer( ec->proc ); b.pack<unsigned char>( ptr , size ); } } } else { //recv for ( ; !ec.empty(); ++ec ) { if (ec->ghost_id == ghost_id) { CommBuffer & b = sparse.recv_buffer( i->owner ); b.unpack<unsigned char>( ptr , size ); break; } } } } } } } if (phase == 0) { sparse.communicate(); } } }
int main(void) { /* Ledstrips inits */ /* W5100 defines */ unsigned char sockstat; unsigned int rsize; char radiostat0[10], radiostat1[10]; int postidx, getidx; /* Initial variable used */ sockreg = 0; tempvalue = 0; ledmode = 0; Init_timer1(); Init_timers(); /*Init_shift();*/ OSCTUN = 21; PLLFBD = 38; /* M=40 */ CLKDIVbits.PLLPOST = 0; /* N1=2 */ CLKDIVbits.PLLPRE = 0; /* N2=2 */ /* Eraseleds();*/ /* even ledstrips have to be mirrored */ /* Mirror(patt); */ /* LCD inits */ Init_mcp(); Init_LCD(); Write_LCD(startup); /* W5100 inits */ Init_pin_SPI(); Init_SPI(); W5100_Init(gtw_addr,mac_addr,sub_mask,ip_addr); T_SPI_CS; SPI_CS = 1; Init_UART(); for (;;) { sockstat = SPI_Read(S0_SR); switch (sockstat) { case SOCK_CLOSED: if (socket(sockreg, MR_TCP, TCP_PORT) > 0) { /* Listen to Socket 0 */ if (listen(sockreg) <= 0) Delayms(1); } break; case SOCK_ESTABLISHED: /* Get the client request size */ rsize = recv_size(); if (rsize > 0) { /* Now read the client Request */ if (recv(sockreg, buf, rsize) <= 0) break; Putstr(buf); /* printf("%s",buf);*/ /* Check the Request Header */ getidx = strindex((char *) buf, "GET /"); postidx = strindex((char *) buf, "POST /"); if (getidx >= 0 || postidx >= 0) { /* Now check the Radio Button for POST request */ if (postidx >= 0) { if (strindex((char *) buf, "uBoard new color") > 0) ledmode++; } /* Create the HTTP Response Header */ strncpy((char *)buf,("HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n" "<body style=\"background-color:FFFFFF;\">\r\n"),96); strcat((char *)buf,("[\n" " {\n" " \"id\": \"1\",\n" " \"name\": \"uBoard webboard\",\n" " \"ipaddr\": \"192.168.0.102\",\n" " \"subnetmask\": \"255.255.255.0\",\n" " \"gateway\": \"192.168.0.1\",\n" " \"adjustSpeedOfPattern\": \"int\",\n" " \"turnLedsOnOff\": \"boolean\"\n" " }\n" "]\n")); /* Now Send the HTTP Response */ if (send(sockreg,buf,strlen((char *)buf)) <= 0) break; /* TODO: add status */ LCD_Clear(); LCD_PutByte(ledmode); if (ledmode == 1) { strncpy(radiostat0,"",0); strncpy(radiostat1,("checked"),7); } else { strncpy(radiostat0,("checked"),7); strncpy(radiostat1,"",0); } /* Create the HTTP Radio Button Response */ strncpy((char *)buf,("<p><input type=\"radio\" name=\"radio\" value=\"0\" "),52); strcat((char *)buf,radiostat0); strcat((char *)buf,(">Turn off\r\n")); strcat((char *)buf,("<br><input type=\"radio\" name=\"radio\" value=\"1\" ")); strcat((char *)buf,radiostat1); strcat((char *)buf,(">Lounge mode\r\n")); strcat((char *)buf,("</strong><p>\r\n")); strcat((char *)buf,("<input type=\"submit\">\r\n")); strcat((char *)buf,("</form></span></body></html>\r\n")); /* Now Send the HTTP Remaining Response */ if (send(sockreg,buf,strlen((char *)buf)) <= 0) break; } /* Disconnect the socket */ disconnect(sockreg); } else Delayms(1); /* Wait for request */ break; case SOCK_FIN_WAIT: case SOCK_CLOSING: case SOCK_TIME_WAIT: case SOCK_CLOSE_WAIT: case SOCK_LAST_ACK: /* Force to close the socket */ close(sockreg); break; } } return 0; }
inline void parallel_sum_including_ghosts( const BulkData & mesh , const std::vector< const FieldBase *> & fields ) { if ( fields.empty() ) { return; } const int parallel_size = mesh.parallel_size(); const int parallel_rank = mesh.parallel_rank(); const std::vector<const FieldBase *>::const_iterator fe = fields.end(); const std::vector<const FieldBase *>::const_iterator fb = fields.begin(); std::vector<const FieldBase *>::const_iterator fi ; // Sizing for send and receive const unsigned zero = 0 ; std::vector<unsigned> send_size( parallel_size , zero ); std::vector<unsigned> recv_size( parallel_size , zero ); const EntityCommListInfoVector& comm_info_vec = mesh.internal_comm_list(); size_t comm_info_vec_size = comm_info_vec.size(); for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; for (size_t i=0; i<comm_info_vec_size; ++i) { if (!mesh.is_valid(comm_info_vec[i].entity)) { ThrowAssertMsg(mesh.is_valid(comm_info_vec[i].entity),"parallel_sum_including_ghosts found invalid entity"); } const Bucket* bucket = comm_info_vec[i].bucket; unsigned e_size = 0 ; if(is_matching_rank(f, *bucket)) { const unsigned bucketId = bucket->bucket_id(); e_size += field_bytes_per_entity( f , bucketId ); } if (e_size == 0) { continue; } const bool owned = comm_info_vec[i].owner == parallel_rank ; if ( !owned ) { send_size[ comm_info_vec[i].owner ] += e_size ; } else { const EntityCommInfoVector& infovec = comm_info_vec[i].entity_comm->comm_map; size_t info_vec_size = infovec.size(); for (size_t j=0; j<info_vec_size; ++j ) { recv_size[ infovec[j].proc ] += e_size ; } } } } // Allocate send and receive buffers: CommAll sparse ; { const unsigned * const snd_size = & send_size[0] ; const unsigned * const rcv_size = & recv_size[0] ; sparse.allocate_buffers( mesh.parallel(), snd_size, rcv_size); } // Send packing: for (int phase = 0; phase < 2; ++phase) { for ( fi = fb ; fi != fe ; ++fi ) { const FieldBase & f = **fi ; for (size_t i=0; i<comm_info_vec_size; ++i) { const bool owned = comm_info_vec[i].owner == parallel_rank; if ( (!owned && phase == 0) || (owned && phase == 1) ) { const Bucket* bucket = comm_info_vec[i].bucket; if(!is_matching_rank(f, *bucket)) continue; const unsigned bucketId = bucket->bucket_id(); const size_t bucket_ordinal = comm_info_vec[i].bucket_ordinal; const unsigned scalars_per_entity = field_scalars_per_entity(f, bucketId); if ( scalars_per_entity > 0 ) { int owner = comm_info_vec[i].owner; if (f.data_traits().is_floating_point && f.data_traits().size_of == 8) { send_or_recv_field_data_for_assembly<double>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal); } else if (f.data_traits().is_floating_point && f.data_traits().size_of == 4) { send_or_recv_field_data_for_assembly<float>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal); } else if (f.data_traits().is_integral && f.data_traits().size_of == 4 && f.data_traits().is_unsigned) { send_or_recv_field_data_for_assembly<unsigned>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal); } else if (f.data_traits().is_integral && f.data_traits().size_of == 4 && f.data_traits().is_signed) { send_or_recv_field_data_for_assembly<int>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal); } else { ThrowRequireMsg(false,"Unsupported field type in parallel_sum_including_ghosts"); } } } } } if (phase == 0) { sparse.communicate(); } } copy_from_owned(mesh, fields); }