/** * pka_subscription_deliver_sample: * @subscription: A #PkaSubscription. * * Delivers @sample from @source to the @subscription. @manifest should * be the current manifest for the source that has already been sent * to pka_subscription_deliver_manifest(). * * Returns: None. * Side effects: None. */ void pka_subscription_deliver_sample (PkaSubscription *subscription, /* IN */ PkaSource *source, /* IN */ PkaManifest *manifest, /* IN */ PkaSample *sample) /* IN */ { GValue params[3] = { { 0 } }; guint8 *buffer = NULL; gsize buffer_len = 0; PkaSample *samples[1] = { sample }; g_return_if_fail(subscription != NULL); g_return_if_fail(sample != NULL); g_return_if_fail(PKA_IS_SOURCE(source)); /* * TODO: In the recent rewrite of this, we didn't implement buffering. * We need to add back support for buffering based on timeouts or * size of raw-data. */ ENTRY; g_static_rw_lock_reader_lock(&subscription->rw_lock); if (G_LIKELY(subscription->sample_closure)) { if (!pka_encoder_encode_samples(NULL, manifest, samples, 1, &buffer, &buffer_len)) { WARNING(Subscription, "Subscription %d failed to encode sample.", subscription->id); GOTO(failed); } DUMP_BYTES(Sample, buffer, buffer_len); /* * XXX: It should be obvious that this marshalling isn't very fast. * But I've certainly done worse. */ g_value_init(¶ms[0], PKA_TYPE_SUBSCRIPTION); g_value_init(¶ms[1], G_TYPE_POINTER); g_value_init(¶ms[2], G_TYPE_ULONG); g_value_set_boxed(¶ms[0], subscription); g_value_set_pointer(¶ms[1], buffer); g_value_set_ulong(¶ms[2], buffer_len); g_closure_invoke(subscription->sample_closure, NULL, 3, ¶ms[0], NULL); g_value_unset(¶ms[0]); g_value_unset(¶ms[1]); g_value_unset(¶ms[2]); g_free(buffer); } failed: g_static_rw_lock_reader_unlock(&subscription->rw_lock); EXIT; }
/** * pka_subscription_deliver_manifest: * @subscription: A #PkaSubscription. * @source: A #PkaSource. * @manifest: A #PkaManifest. * * Delivers @manifest from @souce to the subscriptions handlers. * * Returns: None. * Side effects: None. */ void pka_subscription_deliver_manifest (PkaSubscription *subscription, /* IN */ PkaSource *source, /* IN */ PkaManifest *manifest) /* IN */ { GValue params[3] = { { 0 } }; guint8 *buffer = NULL; gsize buffer_len = 0; g_return_if_fail(subscription != NULL); g_return_if_fail(manifest != NULL); g_return_if_fail(PKA_IS_SOURCE(source)); ENTRY; g_static_rw_lock_reader_lock(&subscription->rw_lock); if (G_LIKELY(subscription->manifest_closure)) { if (!pka_encoder_encode_manifest(NULL, manifest, &buffer, &buffer_len)) { WARNING(Subscription, "Subscription %d failed to encode manifest.", subscription->id); GOTO(failed); } DUMP_BYTES(Manifest, buffer, buffer_len); /* * XXX: It should be obvious that this marshalling isn't very fast. * But I've certainly done worse. At least it handles things cleanly * with regard to using libffi. */ g_value_init(¶ms[0], PKA_TYPE_SUBSCRIPTION); g_value_init(¶ms[1], G_TYPE_POINTER); g_value_init(¶ms[2], G_TYPE_ULONG); g_value_set_boxed(¶ms[0], subscription); g_value_set_pointer(¶ms[1], buffer); g_value_set_ulong(¶ms[2], buffer_len); g_closure_invoke(subscription->manifest_closure, NULL, 3, ¶ms[0], NULL); g_value_unset(¶ms[0]); g_value_unset(¶ms[1]); g_value_unset(¶ms[2]); g_free(buffer); } failed: g_static_rw_lock_reader_unlock(&subscription->rw_lock); EXIT; }
static void * mock_server_worker (void *data) { mongoc_buffer_t buffer; mongoc_stream_t *stream; mock_server_t *server; mongoc_rpc_t rpc; bson_error_t error; int32_t msg_len; void **closure = data; ENTRY; BSON_ASSERT(closure); server = closure[0]; stream = closure[1]; _mongoc_buffer_init(&buffer, NULL, 0, NULL, NULL); again: if (_mongoc_buffer_fill (&buffer, stream, 4, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", __FUNCTION__, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= 4); memcpy (&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); GOTO (failure); } if (_mongoc_buffer_fill (&buffer, stream, msg_len, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", __FUNCTION__, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= (unsigned)msg_len); DUMP_BYTES (buffer, buffer.data + buffer.off, buffer.len); if (!_mongoc_rpc_scatter(&rpc, buffer.data + buffer.off, msg_len)) { MONGOC_WARNING ("%s():%d: %s", __FUNCTION__, __LINE__, "Failed to scatter"); GOTO (failure); } _mongoc_rpc_swab_from_le(&rpc); if (!handle_command(server, stream, &rpc)) { server->handler(server, stream, &rpc, server->handler_data); } memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; GOTO (again); failure: mongoc_stream_close (stream); mongoc_stream_destroy (stream); bson_free(closure); _mongoc_buffer_destroy (&buffer); RETURN (NULL); }
static void * mock_server_worker (void *data) { mongoc_buffer_t buffer; mongoc_stream_t *stream; mock_server_t *server; mongoc_rpc_t rpc; bson_error_t error; int32_t msg_len; void **closure = data; BSON_ASSERT(closure); server = closure[0]; stream = closure[1]; _mongoc_buffer_init(&buffer, NULL, 0, NULL); again: if (_mongoc_buffer_fill (&buffer, stream, 4, INT_MAX, &error) == -1) { MONGOC_WARNING ("%s", error.message); goto failure; } assert (buffer.len >= 4); memcpy(&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE(msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); goto failure; } if (_mongoc_buffer_fill (&buffer, stream, msg_len, INT_MAX, &error) == -1) { MONGOC_WARNING ("%s", error.message); goto failure; } assert (buffer.len >= msg_len); DUMP_BYTES (buffer, buffer.data + buffer.off, buffer.len); if (!_mongoc_rpc_scatter(&rpc, buffer.data + buffer.off, msg_len)) { MONGOC_WARNING ("Failed to scatter"); goto failure; } _mongoc_rpc_swab_from_le(&rpc); if (!handle_command(server, stream, &rpc)) { server->handler(server, stream, &rpc, server->handler_data); } memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; goto again; failure: mongoc_stream_close(stream); mongoc_stream_destroy(stream); bson_free(closure); return NULL; }
static int mms_data_fixup(const struct ip_ct_mms_expect *ct_mms_info, struct ip_conntrack *ct, struct sk_buff **pskb, enum ip_conntrack_info ctinfo, struct ip_conntrack_expect *expect) { u_int32_t newip; struct ip_conntrack_tuple t; struct iphdr *iph = (*pskb)->nh.iph; struct tcphdr *tcph = (void *) iph + iph->ihl * 4; char *data = (char *)tcph + tcph->doff * 4; int i, j, k, port; u_int16_t mms_proto; u_int32_t *mms_chunkLenLV = (u_int32_t *)(data + MMS_SRV_CHUNKLENLV_OFFSET); u_int32_t *mms_chunkLenLM = (u_int32_t *)(data + MMS_SRV_CHUNKLENLM_OFFSET); u_int32_t *mms_messageLength = (u_int32_t *)(data + MMS_SRV_MESSAGELENGTH_OFFSET); int zero_padding; char buffer[28]; /* "\\255.255.255.255\UDP\65635" * 2 (for unicode) */ char unicode_buffer[75]; /* 27*2 (unicode) + 20 + 1 */ char proto_string[6]; /* what was the protocol again ? */ mms_proto = expect->tuple.dst.protonum; sprintf(proto_string, "%u", mms_proto); DEBUGP("ip_nat_mms: mms_data_fixup: info (seq %u + %u) in %u, proto %s\n", expect->seq, ct_mms_info->len, ntohl(tcph->seq), mms_proto == IPPROTO_UDP ? "UDP" : mms_proto == IPPROTO_TCP ? "TCP":proto_string); newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; /* Alter conntrack's expectations. */ t = expect->tuple; t.dst.ip = newip; for (port = ct_mms_info->port; port != 0; port++) { t.dst.u.tcp.port = htons(port); if (ip_conntrack_change_expect(expect, &t) == 0) { DEBUGP("ip_nat_mms: mms_data_fixup: using port %d\n", port); break; } } if(port == 0) return 0; sprintf(buffer, "\\\\%u.%u.%u.%u\\%s\\%u", NIPQUAD(newip), expect->tuple.dst.protonum == IPPROTO_UDP ? "UDP" : expect->tuple.dst.protonum == IPPROTO_TCP ? "TCP":proto_string, port); DEBUGP("ip_nat_mms: new unicode string=%s\n", buffer); memset(unicode_buffer, 0, sizeof(char)*75); for (i=0; i<strlen(buffer); ++i) *(unicode_buffer+i*2)=*(buffer+i); DEBUGP("ip_nat_mms: mms_data_fixup: padding: %u len: %u\n", ct_mms_info->padding, ct_mms_info->len); DEBUGP("ip_nat_mms: mms_data_fixup: offset: %u\n", MMS_SRV_UNICODE_STRING_OFFSET+ct_mms_info->len); DUMP_BYTES(data+MMS_SRV_UNICODE_STRING_OFFSET, 60); /* add end of packet to it */ for (j=0; j<ct_mms_info->padding; ++j) { DEBUGP("ip_nat_mms: mms_data_fixup: i=%u j=%u byte=%u\n", i, j, (u8)*(data+MMS_SRV_UNICODE_STRING_OFFSET+ct_mms_info->len+j)); *(unicode_buffer+i*2+j) = *(data+MMS_SRV_UNICODE_STRING_OFFSET+ct_mms_info->len+j); } /* pad with zeroes at the end ? see explanation of weird math below */ zero_padding = (8-(strlen(buffer)*2 + ct_mms_info->padding + 4)%8)%8; for (k=0; k<zero_padding; ++k) *(unicode_buffer+i*2+j+k)= (char)0; DEBUGP("ip_nat_mms: mms_data_fixup: zero_padding = %u\n", zero_padding); DEBUGP("ip_nat_mms: original=> chunkLenLV=%u chunkLenLM=%u messageLength=%u\n", *mms_chunkLenLV, *mms_chunkLenLM, *mms_messageLength); /* explanation, before I forget what I did: strlen(buffer)*2 + ct_mms_info->padding + 4 must be divisable by 8; divide by 8 and add 3 to compute the mms_chunkLenLM field, but note that things may have to be padded with zeroes to align by 8 bytes, hence we add 7 and divide by 8 to get the correct length */ *mms_chunkLenLM = (u_int32_t) (3+(strlen(buffer)*2+ct_mms_info->padding+11)/8); *mms_chunkLenLV = *mms_chunkLenLM+2; *mms_messageLength = *mms_chunkLenLV*8; DEBUGP("ip_nat_mms: modified=> chunkLenLV=%u chunkLenLM=%u messageLength=%u\n", *mms_chunkLenLV, *mms_chunkLenLM, *mms_messageLength); ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, expect->seq - ntohl(tcph->seq), ct_mms_info->len + ct_mms_info->padding, unicode_buffer, strlen(buffer)*2 + ct_mms_info->padding + zero_padding); DUMP_BYTES(unicode_buffer, 60); return 1; }
static guint8 * mongo_message_query_save_to_data (MongoMessage *message, gsize *length) { static const guint8 empty_bson[] = { 5, 0, 0, 0, 0 }; MongoMessageQueryPrivate *priv; MongoMessageQuery *query = (MongoMessageQuery *)message; GByteArray *bytes; guint32 v32; guint8 *ret; ENTRY; g_assert(MONGO_IS_MESSAGE_QUERY(query)); g_assert(length); priv = query->priv; bytes = g_byte_array_sized_new(64); v32 = 0; g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); v32 = GINT32_TO_LE(mongo_message_get_request_id(message)); g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); v32 = GINT32_TO_LE(mongo_message_get_response_to(message)); g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); v32 = GUINT32_TO_LE(MONGO_OPERATION_QUERY); g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); /* Query flags. */ v32 = GUINT32_TO_LE(priv->flags); g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); /* Collection name */ g_byte_array_append(bytes, (guint8 *)(priv->collection ?: ""), strlen(priv->collection ?: "") + 1); /* Number to skip */ v32 = GUINT32_TO_LE(priv->skip); g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); /* Number to return */ v32 = GUINT32_TO_LE(priv->limit); g_byte_array_append(bytes, (guint8 *)&v32, sizeof v32); /* Query */ if (priv->query) { g_byte_array_append(bytes, priv->query->data, priv->query->len); } else { g_byte_array_append(bytes, empty_bson, G_N_ELEMENTS(empty_bson)); } /* Fields */ if (priv->fields) { g_byte_array_append(bytes, priv->fields->data, priv->fields->len); } /* Update the message length */ v32 = GUINT32_TO_LE(bytes->len); memcpy(bytes->data, &v32, sizeof v32); *length = bytes->len; DUMP_BYTES(buf, bytes->data, bytes->len); ret = g_byte_array_free(bytes, FALSE); RETURN(ret); }