Example #1
0
/**
 * Directly insert all skbs from @skb_list into @sk TCP write queue regardless
 * write buffer size. This allows directly forward modified packets without
 * copying. See do_tcp_sendpages() and tcp_sendmsg() in linux/net/ipv4/tcp.c.
 *
 * Can be called in softirq context as well as from kernel thread.
 *
 * TODO use MSG_MORE untill we reach end of message.
 */
int
ss_send(struct sock *sk, SsSkbList *skb_list, bool pass_skb)
{
	int r = 0;
	struct sk_buff *skb, *skb_copy;
	SsWork sw = {
		.sk	= sk,
		.action	= SS_SEND,
	};

	BUG_ON(!sk);
	BUG_ON(ss_skb_queue_empty(skb_list));
	SS_DBG("%s: cpu=%d sk=%p (cpu=%d) state=%s\n", __func__,
	       smp_processor_id(), sk, sk->sk_incoming_cpu,
	       ss_statename[sk->sk_state]);

	/*
	 * Remove the skbs from Tempesta lists if we won't use them,
	 * or copy them if they're going to be used by Tempesta during
	 * and after the transmission.
	 */
	if (pass_skb) {
		sw.skb_list = *skb_list;
		ss_skb_queue_head_init(skb_list);
	} else {
		ss_skb_queue_head_init(&sw.skb_list);
		for (skb = ss_skb_peek(skb_list); skb; skb = ss_skb_next(skb)) {
			/* tcp_transmit_skb() will clone the skb. */
			skb_copy = pskb_copy_for_clone(skb, GFP_ATOMIC);
			if (!skb_copy) {
				SS_WARN("Unable to copy an egress SKB.\n");
				r = -ENOMEM;
				goto err;
			}
			ss_skb_queue_tail(&sw.skb_list, skb_copy);
		}
	}

	/*
	 * Schedule the socket for TX softirq processing.
	 * Only part of @skb_list could be passed to send queue.
	 */
	if (ss_wq_push(&sw)) {
		SS_WARN("Cannot schedule socket %p for transmission\n", sk);
		r = -EBUSY;
		goto err;
	}

	return 0;
err:
	if (!pass_skb)
		while ((skb = ss_skb_dequeue(&sw.skb_list)))
			kfree_skb(skb);
	return r;
}
Example #2
0
static void
ss_sock_cpu_check(struct sock *sk)
{
	if (unlikely(sk->sk_incoming_cpu != smp_processor_id()))
		SS_WARN("Bad socket cpu locality:"
			" sk=%p old_cpu=%d curr_cpu=%d\n",
			sk, sk->sk_incoming_cpu, smp_processor_id());
}
Example #3
0
/**
 * Fragment @skb to add some room if @len > 0 or delete data otherwise.
 */
static int
__skb_fragment(struct sk_buff *skb, struct sk_buff *pskb,
	       char *pspt, int len, TfwStr *it)
{
	int i, ret;
	long offset;
	unsigned int d_size;
	struct sk_buff *f_skb, **next_fdp;

	SS_DBG("[%d]: %s: in: len [%d] pspt [%p], skb [%p]: head [%p]"
		" data [%p] tail [%p] end [%p] len [%u] data_len [%u]"
		" truesize [%u] nr_frags [%u]\n",
		smp_processor_id(), __func__, len, pspt, skb, skb->head,
		skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
		skb->len, skb->data_len, skb->truesize,
		skb_shinfo(skb)->nr_frags);
	BUG_ON(!len);

	if (abs(len) > PAGE_SIZE) {
		SS_WARN("Attempt to add or delete too much data: %u\n", len);
		return -EINVAL;
	}

	/*
	 * Use @it to hold the return values from __split_pgfrag()
	 * and __split_linear_data(). @it->ptr, @it->skb, and
	 * @it->flags may be set to actual values. If a new SKB is
	 * allocated, then it is stored in @it->skb. @it->ptr holds
	 * the pointer either to data after the deleted data, or to
	 * the area for new data. @it->flags is set when @it->ptr
	 * points to data in @it->skb. Otherwise, @it->ptr points
	 * to data in @skb.
	 *
	 * Determine where the split begins within the SKB, then do
	 * the job using the right function.
	 */

	/* See if the split starts in the linear data. */
	d_size = skb_headlen(skb);
	offset = pspt - (char *)skb->data;

	if ((offset >= 0) && (offset < d_size)) {
		int t_size = d_size - offset;
		len = max(len, -t_size);
		ret = __split_linear_data(skb, pspt, len, it);
		goto done;
	}

	/* See if the split starts in the page fragments data. */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		d_size = skb_frag_size(frag);
		offset = pspt - (char *)skb_frag_address(frag);

		if ((offset >= 0) && (offset < d_size)) {
			int t_size = d_size - offset;
			len = max(len, -t_size);
			ret = __split_pgfrag(skb, i, offset, len, it);
			goto done;
		}
	}

	/* See if the split starts in the SKB fragments data. */
	skb_walk_frags(skb, f_skb) {
		ret = __skb_fragment(f_skb, skb, pspt, len, it);
		if (ret != -ENOENT)
			return ret;
	}
Example #4
0
/**
 * Delete @len (the value is positive now) bytes from @frag.
 *
 * @return 0 on success, -errno on failure.
 * @return SKB in @it->skb if new SKB is allocated.
 * @return pointer to data after the deleted area in @it->ptr.
 * @return @it->flags is set if @it->ptr points to data in it->skb.
 */
static int
__split_pgfrag_del(struct sk_buff *skb, int i, int off, int len, TfwStr *it)
{
	int tail_len;
	struct sk_buff *skb_dst;
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
	struct skb_shared_info *si = skb_shinfo(skb);

	SS_DBG("[%d]: %s: skb [%p] i [%d] off [%d] len [%d] fragsize [%d]\n",
		smp_processor_id(), __func__,
		skb, i, off, len, skb_frag_size(frag));

	if (unlikely(off + len > skb_frag_size(frag))) {
		SS_WARN("Attempt to delete too much\n");
		return -EFAULT;
	}

	/* Fast path: delete a full fragment. */
	if (!off && len == skb_frag_size(frag)) {
		ss_skb_adjust_data_len(skb, -len);
		__skb_frag_unref(frag);
		if (i + 1 < si->nr_frags)
			memmove(&si->frags[i], &si->frags[i + 1],
				(si->nr_frags - i - 1) * sizeof(skb_frag_t));
		--si->nr_frags;
		goto lookup_next_ptr;
	}
	/* Fast path: delete the head part of a fragment. */
	if (!off) {
		frag->page_offset += len;
		skb_frag_size_sub(frag, len);
		ss_skb_adjust_data_len(skb, -len);
		it->ptr = skb_frag_address(frag);
		return 0;
	}
	/* Fast path: delete the tail part of a fragment. */
	if (off + len == skb_frag_size(frag)) {
		skb_frag_size_sub(frag, len);
		ss_skb_adjust_data_len(skb, -len);
		++i;
		goto lookup_next_ptr;
	}

	/*
	 * Delete data in the middle of a fragment. After the data
	 * is deleted the fragment will contain only the head part,
	 * and the tail part is moved to another fragment.
	 * [frag @i] [frag @i+1 - tail data]
	 *
	 * Make room for a fragment right after the @i fragment
	 * to move the tail part of data there.
	 */
	if (__extend_pgfrags(skb, i + 1, 1, it))
		return -EFAULT;

	/* Find the SKB for tail data. */
	skb_dst = (i < MAX_SKB_FRAGS - 1) ? skb : it->skb;

	/* Calculate the length of the tail part. */
	tail_len = skb_frag_size(frag) - off - len;

	/* Trim the fragment with the head part. */
	skb_frag_size_sub(frag, len + tail_len);

	/* Make the fragment with the tail part. */
	i = (i + 1) % MAX_SKB_FRAGS;
	__skb_fill_page_desc(skb_dst, i, skb_frag_page(frag),
			     frag->page_offset + off + len, tail_len);
	__skb_frag_ref(frag);

	/* Adjust SKB data lengths. */
	ss_skb_adjust_data_len(skb, -len);
	if (skb != skb_dst) {
		ss_skb_adjust_data_len(skb, -tail_len);
		ss_skb_adjust_data_len(skb_dst, tail_len);
	}

	/* Get the SKB and the address of data after the deleted area. */
	it->flags = (skb != skb_dst);
	it->ptr = skb_frag_address(&skb_shinfo(skb_dst)->frags[i]);
	return 0;

lookup_next_ptr:
	/* Get the next fragment after the deleted fragment. */
	if (i < si->nr_frags)
		it->ptr = skb_frag_address(&si->frags[i]);
	return 0;
}
Example #5
0
/**
 * This is main body of the socket close function in Sync Sockets.
 *
 * inet_release() can sleep (as well as tcp_close()), so we make our own
 * non-sleepable socket closing.
 *
 * This function must be used only for data sockets.
 * Use standard sock_release() for listening sockets.
 *
 * In most cases it is called in softirq context and from ksoftirqd which
 * processes data from the socket (RSS and RPS distribute packets that way).
 *
 * Note: it used to be called in process context as well, at the time when
 * Tempesta starts or stops. That's not the case right now, but it may change.
 *
 * TODO In some cases we need to close socket agresively w/o FIN_WAIT_2 state,
 * e.g. by sending RST. So we need to add second parameter to the function
 * which says how to close the socket.
 * One of the examples is rcl_req_limit() (it should reset connections).
 * See tcp_sk(sk)->linger2 processing in standard tcp_close().
 *
 * Called with locked socket.
 */
static void
ss_do_close(struct sock *sk)
{
	struct sk_buff *skb;
	int data_was_unread = 0;
	int state;

	if (unlikely(!sk))
		return;
	SS_DBG("Close socket %p (%s): cpu=%d account=%d refcnt=%d\n",
	       sk, ss_statename[sk->sk_state], smp_processor_id(),
	       sk_has_account(sk), atomic_read(&sk->sk_refcnt));
	assert_spin_locked(&sk->sk_lock.slock);
	ss_sock_cpu_check(sk);
	BUG_ON(sk->sk_state == TCP_LISTEN);
	/* We must return immediately, so LINGER option is meaningless. */
	WARN_ON(sock_flag(sk, SOCK_LINGER));
	/* We don't support virtual containers, so TCP_REPAIR is prohibited. */
	WARN_ON(tcp_sk(sk)->repair);
	/* The socket must have atomic allocation mask. */
	WARN_ON(!(sk->sk_allocation & GFP_ATOMIC));

	/* The below is mostly copy-paste from tcp_close(). */
	sk->sk_shutdown = SHUTDOWN_MASK;

	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
			  tcp_hdr(skb)->fin;
		data_was_unread += len;
		SS_DBG("free rcv skb %p\n", skb);
		__kfree_skb(skb);
	}

	sk_mem_reclaim(sk);

	if (sk->sk_state == TCP_CLOSE)
		goto adjudge_to_death;

	if (data_was_unread) {
		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
		tcp_set_state(sk, TCP_CLOSE);
		tcp_send_active_reset(sk, sk->sk_allocation);
	}
	else if (tcp_close_state(sk)) {
		/* The code below is taken from tcp_send_fin(). */
		struct tcp_sock *tp = tcp_sk(sk);
		int mss_now = tcp_current_mss(sk);

		skb = tcp_write_queue_tail(sk);

		if (tcp_send_head(sk) != NULL) {
			/* Send FIN with data if we have any. */
			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
			TCP_SKB_CB(skb)->end_seq++;
			tp->write_seq++;
		}
		else {
			/* No data to send in the socket, allocate new skb. */
			skb = alloc_skb_fclone(MAX_TCP_HEADER,
					       sk->sk_allocation);
			if (!skb) {
				SS_WARN("can't send FIN due to bad alloc");
			} else {
				skb_reserve(skb, MAX_TCP_HEADER);
				tcp_init_nondata_skb(skb, tp->write_seq,
						     TCPHDR_ACK | TCPHDR_FIN);
				tcp_queue_skb(sk, skb);
			}
		}
		__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
	}

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * SS sockets are processed in softirq only,
	 * so backlog queue should be empty.
	 */
	WARN_ON(sk->sk_backlog.tail);

	percpu_counter_inc(sk->sk_prot->orphan_count);

	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
		return;

	if (sk->sk_state == TCP_FIN_WAIT2) {
		const int tmo = tcp_fin_time(sk);
		if (tmo > TCP_TIMEWAIT_LEN) {
			inet_csk_reset_keepalive_timer(sk,
						tmo - TCP_TIMEWAIT_LEN);
		} else {
			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
			return;
		}
	}
	if (sk->sk_state != TCP_CLOSE) {
		sk_mem_reclaim(sk);
		if (tcp_check_oom(sk, 0)) {
			tcp_set_state(sk, TCP_CLOSE);
			tcp_send_active_reset(sk, GFP_ATOMIC);
			NET_INC_STATS_BH(sock_net(sk),
					 LINUX_MIB_TCPABORTONMEMORY);
		}
	}
	if (sk->sk_state == TCP_CLOSE) {
		struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
		if (req != NULL)
			reqsk_fastopen_remove(sk, req, false);
		inet_csk_destroy_sock(sk);
	}
}
Example #6
0
  void
  Mesh::maybe_bind_vertices ()
  {
    size_t static_offset = 0;
    size_t dynamic_offset = 0;

    for (VertexFormat::const_iterator it = priv->vf.begin ();
        it != priv->vf.end (); it++)
    {
      const VertexFormat::DataMemberSpec &spec = *it;

      boost::shared_ptr<VertexDataBuffer> buf =
        spec.dynamic ? priv->dynamic_buf : priv->static_buf;
      
      const void *base =
        buf->get_address_of (0);
      
      GLenum gl_type =
        VertexFormat::to_gl_type (spec.type);
      
      GLsizei gl_stride =
        spec.dynamic ? priv->dynamic_size : priv->static_size;

      size_t &offset =
        spec.dynamic ? dynamic_offset : static_offset;

      const GLvoid *gl_ptr =
        (const GLvoid *) ((char *) base + offset);

      assert (buf != NULL);

      if (bound_vertices != priv.get ())
        buf->bind ();

      switch (spec.role)
      {
        case VertexFormat::DATA_ROLE_POS:

          if (bound_vertices != priv.get ())
            glVertexPointer (spec.count, gl_type, gl_stride, gl_ptr);
          glEnableClientState (GL_VERTEX_ARRAY);
          break;

        case VertexFormat::DATA_ROLE_NORMAL:

          if (bound_vertices != priv.get ())
            glNormalPointer (gl_type, gl_stride, gl_ptr);
          glEnableClientState (GL_NORMAL_ARRAY);
          break;

        case VertexFormat::DATA_ROLE_COLOR:

          if (bound_vertices != priv.get ())
            glColorPointer (spec.count, gl_type, gl_stride, gl_ptr);
          glEnableClientState (GL_COLOR_ARRAY);
          break;

        default:

          SS_WARN ("unhandled role %u, implement it NOW", spec.role);
          break;
      }

      offset += spec.count * VertexFormat::size_of_type (spec.type);
    }

    bound_vertices = priv.get ();
  }
Example #7
0
  void
  TextRenderer::populate (const std::string &text)
  {
    Text cached_text =
      Text (Texture2D (priv->proxy, Texture2D::FORMAT_ALPHA_UBYTE, true),
            boost::shared_ptr<GLDisplayList> (new GLDisplayList ()));

    unsigned curr_x = 0;
    unsigned char prev = 0;
    unsigned height = 0;
    for (unsigned i = 0; i < text.length (); i++)
    {
      unsigned char ch = text[i];

      if (!priv->glyphs[ch].ft_glyph)
      {
        SS_WARN ("unknown char %c", ch);
        continue;
      }

      if (prev && priv->kern)
      {
        FT_Vector d = {0, 0};

        FT_Error err = FT_Get_Kerning (priv->ft_face,
            priv->glyphs[prev].glyph_index,
            priv->glyphs[ch].glyph_index, FT_KERNING_DEFAULT, &d);

        if (err)
        {
          SS_WARN ("failed to get kerning due to FreeType error %d", err);
          d.x = d.y = 0;
        }

        curr_x += d.x / 64;
      }

      FT_BitmapGlyph glyph = FT_BitmapGlyph (priv->glyphs[ch].ft_glyph);

      ptrdiff_t stride = -glyph->bitmap.pitch;
      unsigned char *buf =
        glyph->bitmap.buffer + glyph->bitmap.pitch * (glyph->bitmap.rows - 1);

      if (stride >= 0)
      {
        buf = glyph->bitmap.buffer;
      }

      cached_text.tex.blit (curr_x + glyph->left, 0, glyph->bitmap.width,
          glyph->bitmap.rows, buf, stride);

      height = std::max (height, unsigned (glyph->bitmap.rows));

      curr_x += priv->glyphs[ch].ft_glyph->advance.x / 0xffff;

      prev = ch;
    }

    priv->height = std::max (height, priv->height);

    float height_ratio = 0.5f * float (height) / float (priv->height);
    float width = 0.5f * float (curr_x) / float (priv->height);

    float tex_x, tex_y;
    cached_text.tex.get_tex_coord (tex_x, tex_y, curr_x, height);
    
    glNewList (*cached_text.dl, GL_COMPILE);

    glPushAttrib (GL_ENABLE_BIT | GL_COLOR_BUFFER_BIT);
    glEnable (GL_TEXTURE_2D);

    glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    glEnable (GL_BLEND);

    glDisable (GL_LIGHTING);

    glBegin (GL_QUADS);
    {
      glTexCoord2f (0, 0);
      glVertex2f (-width, -height_ratio);

      glTexCoord2f (tex_x, 0);
      glVertex2f (width, -height_ratio);

      glTexCoord2f (tex_x, tex_y);
      glVertex2f (width, height_ratio);

      glTexCoord2f (0, tex_y);
      glVertex2f (-width, height_ratio);
    }
    glEnd ();

    glPopAttrib ();

    glEndList ();

    priv->cache.insert (std::make_pair (text, cached_text));
  }
Example #8
0
  TextRenderer::TextRenderer (const std::string &font,
                              unsigned height,
                              boost::shared_ptr<GLExtensionProxy> proxy)
    : priv (new Private (proxy))
  {
    priv->height = 0;

    FT_Error err;
    err = FT_New_Face (priv->lib,
                       font.c_str (),
                       0,
                       &priv->ft_face);

    if (err)
    {
      throw std::logic_error ("Failed to load font");
    }

    err = FT_Set_Pixel_Sizes (priv->ft_face,
                              0,
                              height);

    if (err)
    {
      throw std::logic_error ("Failed to set font height");
    }

    priv->glyphs.resize (256);
    priv->kern = FT_HAS_KERNING (priv->ft_face);

    FT_UInt glyph_index;
    for (FT_ULong char_code = FT_Get_First_Char (priv->ft_face, &glyph_index);
          glyph_index != 0;
          char_code = FT_Get_Next_Char (priv->ft_face, char_code, &glyph_index))
    {
      err = FT_Load_Glyph (priv->ft_face, glyph_index, FT_LOAD_DEFAULT);

      if (err)
      {
        SS_WARN ("Ignoring char %c because of Freetype error: %d", (char) char_code, err);
        continue;
      }

      if (char_code >= 256)
        continue;

      FT_Glyph tmp;
      err = FT_Get_Glyph (priv->ft_face->glyph, &tmp);

      if (err)
      {
        SS_WARN ("Ignoring char %c because of Freetype error: %d", (char) char_code, err);
        continue;
      }

      err = FT_Glyph_To_Bitmap (&tmp,
          FT_RENDER_MODE_LIGHT, 0, 1);

      if (err)
      {
        SS_WARN ("Ignoring char %c because of Freetype error: %d", (char) char_code, err);
        continue;
      }

      priv->glyphs[char_code].ft_glyph = tmp;
      priv->glyphs[char_code].glyph_index = glyph_index;
    }
  }