示例#1
0
static gssize
socket_send_message (NiceSocket *sock,
    const NiceOutputMessage *message, gboolean reliable)
{
  TcpPriv *priv = sock->priv;
  gssize ret;
  GError *gerr = NULL;
  gsize message_len;

  /* Socket has been closed: */
  if (sock->priv == NULL)
    return -1;

  /* Don't try to access the socket if it had an error, otherwise we risk a
   * crash with SIGPIPE (Broken pipe) */
  if (priv->error)
    return -1;

  message_len = output_message_get_size (message);

  /* First try to send the data, don't send it later if it can be sent now
   * this way we avoid allocating memory on every send */
  if (g_queue_is_empty (&priv->send_queue)) {
    ret = g_socket_send_message (sock->fileno, NULL, message->buffers,
        message->n_buffers, NULL, 0, G_SOCKET_MSG_NONE, NULL, &gerr);

    if (ret < 0) {
      if (g_error_matches (gerr, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK) ||
          g_error_matches (gerr, G_IO_ERROR, G_IO_ERROR_FAILED)) {
        /* Queue the message and send it later. */
        nice_socket_queue_send_with_callback (&priv->send_queue,
            message, 0, message_len, FALSE, sock->fileno, &priv->io_source,
            priv->context, (GSourceFunc) socket_send_more, sock);
        ret = message_len;
      }

      g_error_free (gerr);
    } else if ((gsize) ret < message_len) {
      /* Partial send. */
      nice_socket_queue_send_with_callback (&priv->send_queue,
          message, ret, message_len, TRUE, sock->fileno, &priv->io_source,
          priv->context, (GSourceFunc) socket_send_more, sock);
      ret = message_len;
    }
  } else {
    /* Only queue if we're sending reliably  */
    if (reliable) {
      /* Queue the message and send it later. */
      nice_socket_queue_send_with_callback (&priv->send_queue,
          message, 0, message_len, FALSE, sock->fileno, &priv->io_source,
          priv->context, (GSourceFunc) socket_send_more, sock);
      ret = message_len;
    } else {
      /* non reliable send, so we shouldn't queue the message */
      ret = 0;
    }
  }

  return ret;
}
示例#2
0
static gssize
socket_send_message (NiceSocket *sock, const NiceOutputMessage *message)
{
  TcpPriv *priv = sock->priv;
  gssize ret;
  GError *gerr = NULL;
  gsize message_len;

  /* Don't try to access the socket if it had an error, otherwise we risk a
   * crash with SIGPIPE (Broken pipe) */
  if (priv->error)
    return -1;

  message_len = output_message_get_size (message);

  /* First try to send the data, don't send it later if it can be sent now
   * this way we avoid allocating memory on every send */
  if (g_queue_is_empty (&priv->send_queue)) {
    ret = g_socket_send_message (sock->fileno, NULL, message->buffers,
        message->n_buffers, NULL, 0, G_SOCKET_MSG_NONE, NULL, &gerr);

    if (ret < 0) {
      if (g_error_matches (gerr, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK) ||
          g_error_matches (gerr, G_IO_ERROR, G_IO_ERROR_FAILED)) {
        /* Queue the message and send it later. */
        add_to_be_sent (sock, message, 0, message_len, FALSE);
        ret = message_len;
      }

      g_error_free (gerr);
    } else if ((gsize) ret < message_len) {
      /* Partial send. */
      add_to_be_sent (sock, message, ret, message_len, TRUE);
      ret = message_len;
    }
  } else {
    /* FIXME: This dropping will break http/socks5/etc
     * We probably need a way to the upper layer to control reliability
     */
    /* If the queue is too long, drop whatever packets we can. */
    if (g_queue_get_length (&priv->send_queue) >= MAX_QUEUE_LENGTH) {
      guint peek_idx = 0;
      struct to_be_sent *tbs = NULL;

      while ((tbs = g_queue_peek_nth (&priv->send_queue, peek_idx)) != NULL) {
        if (tbs->can_drop) {
          tbs = g_queue_pop_nth (&priv->send_queue, peek_idx);
          free_to_be_sent (tbs);
          break;
        } else {
          peek_idx++;
        }
      }
    }

    /* Queue the message and send it later. */
    add_to_be_sent (sock, message, 0, message_len, FALSE);
    ret = message_len;
  }

  return ret;
}
示例#3
0
static gssize
socket_send_message (NiceSocket *sock, const NiceAddress *to,
    const NiceOutputMessage *message)
{
  TurnTcpPriv *priv = sock->priv;
  guint8 padbuf[3] = {0, 0, 0};
  GOutputVector *local_bufs;
  NiceOutputMessage local_message;
  guint j;
  gint ret;
  guint n_bufs;
  guint16 header_buf;
  guint offset = 0;

  /* Count the number of buffers. */
  if (message->n_buffers == -1) {
    n_bufs = 0;

    for (j = 0; message->buffers[j].buffer != NULL; j++)
      n_bufs++;
  } else {
    n_bufs = message->n_buffers;
  }

  /* Allocate a new array of buffers, covering all the buffers in the input
   * @message, but with an additional one for a header and one for a footer. */
  local_bufs = g_malloc_n (n_bufs + 1, sizeof (GOutputVector));
  local_message.buffers = local_bufs;
  local_message.n_buffers = n_bufs + 1;

  if (priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_GOOGLE) {
    header_buf = htons (output_message_get_size (message));
    local_bufs[0].buffer = &header_buf;
    local_bufs[0].size = sizeof (header_buf);
    offset = 1;
  } else if (priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_DRAFT9 ||
      priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_RFC5766) {
    gsize message_len = output_message_get_size (message);
    gsize padlen = (message_len % 4) ? 4 - (message_len % 4) : 0;

    local_bufs[n_bufs].buffer = &padbuf;
    local_bufs[n_bufs].size = padlen;
  } else {
    local_message.n_buffers = n_bufs;
  }

  /* Copy the existing buffers across. */
  for (j = 0; j < n_bufs; j++) {
    local_bufs[j + offset].buffer = message->buffers[j].buffer;
    local_bufs[j + offset].size = message->buffers[j].size;
  }


  ret = nice_socket_send_messages (priv->base_socket, to, &local_message, 1);

  if (ret == 1)
    ret = output_message_get_size (&local_message);

  g_free (local_bufs);

  return ret;
}
示例#4
0
static gssize
socket_send_message (NiceSocket *sock, const NiceAddress *to,
    const NiceOutputMessage *message, gboolean reliable)
{
  TurnTcpPriv *priv = sock->priv;
  guint8 padbuf[3] = {0, 0, 0};
  GOutputVector *local_bufs;
  NiceOutputMessage local_message;
  guint j;
  gint ret;
  guint n_bufs;
  union {
    guint16 google_len;
    struct {
      guint8 pt;
      guint8 zero;
    } msoc;
  } header_buf;
  guint offset = 0;

  /* Socket has been closed: */
  if (sock->priv == NULL)
    return -1;

  /* Count the number of buffers. */
  if (message->n_buffers == -1) {
    n_bufs = 0;

    for (j = 0; message->buffers[j].buffer != NULL; j++)
      n_bufs++;
  } else {
    n_bufs = message->n_buffers;
  }

  /* Allocate a new array of buffers, covering all the buffers in the input
   * @message, but with an additional one for a header and one for a footer. */
  local_bufs = g_malloc_n (n_bufs + 1, sizeof (GOutputVector));
  local_message.buffers = local_bufs;
  local_message.n_buffers = n_bufs + 1;

  if (priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_GOOGLE) {
    header_buf.google_len = htons (output_message_get_size (message));
    local_bufs[0].buffer = &header_buf;
    local_bufs[0].size = sizeof (guint16);
    offset = 1;
  } else if (priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_DRAFT9 ||
      priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_RFC5766) {
    gsize message_len = output_message_get_size (message);
    gsize padlen = (message_len % 4) ? 4 - (message_len % 4) : 0;

    local_bufs[n_bufs].buffer = &padbuf;
    local_bufs[n_bufs].size = padlen;
  } else if (priv->compatibility == NICE_TURN_SOCKET_COMPATIBILITY_OC2007) {
    union {
      guint32 u32;
      guint8 u8[4];
    } cookie;
    guint16 len = output_message_get_size (message);

    /* Copy the cookie from possibly split messages */
    cookie.u32 = 0;
    if (len > sizeof (TURN_MAGIC_COOKIE) + MAGIC_COOKIE_OFFSET) {
      guint16 buf_offset = 0;
      guint i;

      for (i = 0; i < n_bufs; i++) {
        if (message->buffers[i].size >
            (gsize) (MAGIC_COOKIE_OFFSET - buf_offset)) {
          /* If the cookie is split, we assume it's data */
          if (message->buffers[i].size > sizeof (TURN_MAGIC_COOKIE) +
              MAGIC_COOKIE_OFFSET - buf_offset) {
            const guint8 *buf = message->buffers[i].buffer;
            memcpy (&cookie.u8, buf + MAGIC_COOKIE_OFFSET - buf_offset,
                sizeof (TURN_MAGIC_COOKIE));
          }
          break;
        } else {
          buf_offset += message->buffers[i].size;
        }
      }
    }

    cookie.u32 = ntohl(cookie.u32);
    header_buf.msoc.zero = 0;
    if (cookie.u32 == TURN_MAGIC_COOKIE)
      header_buf.msoc.pt = MS_TURN_CONTROL_MESSAGE;
    else
      header_buf.msoc.pt = MS_TURN_END_TO_END_DATA;

    local_bufs[0].buffer = &header_buf;
    local_bufs[0].size = sizeof(header_buf.msoc);
    offset = 1;
  } else {
    local_message.n_buffers = n_bufs;
  }

  /* Copy the existing buffers across. */
  for (j = 0; j < n_bufs; j++) {
    local_bufs[j + offset].buffer = message->buffers[j].buffer;
    local_bufs[j + offset].size = message->buffers[j].size;
  }


  if (reliable)
    ret = nice_socket_send_messages_reliable (priv->base_socket, to,
        &local_message, 1);
  else
    ret = nice_socket_send_messages (priv->base_socket, to, &local_message, 1);

  if (ret == 1)
    ret = output_message_get_size (&local_message);

  g_free (local_bufs);

  return ret;
}