Example #1
0
static struct dm_snap_pending_exception *alloc_pending_exception(void)
{
	return mempool_alloc(pending_pool, GFP_NOIO);
}
HIDDEN int
dwarf_extract_proc_info_from_fde (unw_addr_space_t as, unw_accessors_t *a,
				  unw_word_t *addrp, unw_proc_info_t *pi,
				  int need_unwind_info, unw_word_t base,
				  void *arg)
{
  unw_word_t fde_end_addr, cie_addr, cie_offset_addr, aug_end_addr = 0;
  unw_word_t start_ip, ip_range, aug_size, addr = *addrp;
  int ret, ip_range_encoding;
  struct dwarf_cie_info dci;
  uint64_t u64val;
  uint32_t u32val;

  Debug (12, "FDE @ 0x%lx\n", (long) addr);

  memset (&dci, 0, sizeof (dci));

  if ((ret = dwarf_readu32 (as, a, &addr, &u32val, arg)) < 0)
    return ret;

  if (u32val != 0xffffffff)
    {
      int32_t cie_offset;

      /* In some configurations, an FDE with a 0 length indicates the
	 end of the FDE-table.  */
      if (u32val == 0)
	return -UNW_ENOINFO;

      /* the FDE is in the 32-bit DWARF format */

      *addrp = fde_end_addr = addr + u32val;
      cie_offset_addr = addr;

      if ((ret = dwarf_reads32 (as, a, &addr, &cie_offset, arg)) < 0)
	return ret;

      if (is_cie_id (cie_offset, base != 0))
	/* ignore CIEs (happens during linear searches) */
	return 0;

      if (base != 0)
        cie_addr = base + cie_offset;
      else
	/* DWARF says that the CIE_pointer in the FDE is a
	   .debug_frame-relative offset, but the GCC-generated .eh_frame
	   sections instead store a "pcrelative" offset, which is just
	   as fine as it's self-contained.  */
	cie_addr = cie_offset_addr - cie_offset;
    }
  else
    {
      int64_t cie_offset;

      /* the FDE is in the 64-bit DWARF format */

      if ((ret = dwarf_readu64 (as, a, &addr, &u64val, arg)) < 0)
	return ret;

      *addrp = fde_end_addr = addr + u64val;
      cie_offset_addr = addr;

      if ((ret = dwarf_reads64 (as, a, &addr, &cie_offset, arg)) < 0)
	return ret;

      if (is_cie_id (cie_offset, base != 0))
	/* ignore CIEs (happens during linear searches) */
	return 0;

      if (base != 0)
	cie_addr = base + cie_offset;
      else
	/* DWARF says that the CIE_pointer in the FDE is a
	   .debug_frame-relative offset, but the GCC-generated .eh_frame
	   sections instead store a "pcrelative" offset, which is just
	   as fine as it's self-contained.  */
	cie_addr = (unw_word_t) ((uint64_t) cie_offset_addr - cie_offset);
    }

  Debug (15, "looking for CIE at address %lx\n", (long) cie_addr);

  if ((ret = parse_cie (as, a, cie_addr, pi, &dci, base, arg)) < 0)
    return ret;

  /* IP-range has same encoding as FDE pointers, except that it's
     always an absolute value: */
  ip_range_encoding = dci.fde_encoding & DW_EH_PE_FORMAT_MASK;

  if ((ret = dwarf_read_encoded_pointer (as, a, &addr, dci.fde_encoding,
					 pi, &start_ip, arg)) < 0
      || (ret = dwarf_read_encoded_pointer (as, a, &addr, ip_range_encoding,
					    pi, &ip_range, arg)) < 0)
    return ret;
  pi->start_ip = start_ip;
  pi->end_ip = start_ip + ip_range;
  pi->handler = dci.handler;

  if (dci.sized_augmentation)
    {
      if ((ret = dwarf_read_uleb128 (as, a, &addr, &aug_size, arg)) < 0)
	return ret;
      aug_end_addr = addr + aug_size;
    }

  if ((ret = dwarf_read_encoded_pointer (as, a, &addr, dci.lsda_encoding,
					 pi, &pi->lsda, arg)) < 0)
    return ret;

  Debug (15, "FDE covers IP 0x%lx-0x%lx, LSDA=0x%lx\n",
	 (long) pi->start_ip, (long) pi->end_ip, (long) pi->lsda);

  if (need_unwind_info)
    {
      pi->format = UNW_INFO_FORMAT_TABLE;
      pi->unwind_info_size = sizeof (dci);
      pi->unwind_info = mempool_alloc (&dwarf_cie_info_pool);
      if (!pi->unwind_info)
	return -UNW_ENOMEM;

      if (dci.have_abi_marker)
	{
	  if ((ret = dwarf_readu16 (as, a, &addr, &dci.abi, arg)) < 0
	      || (ret = dwarf_readu16 (as, a, &addr, &dci.tag, arg)) < 0)
	    return ret;
	  Debug (13, "Found ABI marker = (abi=%u, tag=%u)\n",
		 dci.abi, dci.tag);
	}

      if (dci.sized_augmentation)
	dci.fde_instr_start = aug_end_addr;
      else
	dci.fde_instr_start = addr;
      dci.fde_instr_end = fde_end_addr;

      memcpy (pi->unwind_info, &dci, sizeof (dci));
    }
  return 0;
}
Example #3
0
static inline struct metapage *alloc_metapage(int gfp_mask)
{
	return mempool_alloc(metapage_mempool, gfp_mask);
}
Example #4
0
static int send_pptp_start_ctrl_conn_rply(struct pptp_conn_t *conn, int res_code, int err_code)
{
	struct pptp_start_ctrl_conn msg = {
		.header = PPTP_HEADER_CTRL(PPTP_START_CTRL_CONN_RPLY),
		.version = htons(PPTP_VERSION),
		.result_code = res_code,
		.error_code = err_code,
		.framing_cap = htonl(3),
		.bearer_cap = htonl(3),
		.max_channels = htons(1),
		.firmware_rev = htons(PPTP_FIRMWARE_VERSION),
	};

	memset(msg.hostname, 0, sizeof(msg.hostname));
	strcpy((char*)msg.hostname, PPTP_HOSTNAME);

	memset(msg.vendor, 0, sizeof(msg.vendor));
	strcpy((char*)msg.vendor, PPTP_VENDOR);

	if (conf_verbose)
		log_ppp_info2("send [PPTP Start-Ctrl-Conn-Reply <Version %i> <Result %i> <Error %i> <Framing %x> <Bearer %x> <Max-Chan %i>]\n", msg.version, msg.result_code, msg.error_code, ntohl(msg.framing_cap), ntohl(msg.bearer_cap), ntohs(msg.max_channels));

	return post_msg(conn, &msg, sizeof(msg));
}

static int pptp_start_ctrl_conn_rqst(struct pptp_conn_t *conn)
{
	struct pptp_start_ctrl_conn *msg = (struct pptp_start_ctrl_conn *)conn->in_buf;

	if (conf_verbose)
		log_ppp_info2("recv [PPTP Start-Ctrl-Conn-Request <Version %i> <Framing %x> <Bearer %x> <Max-Chan %i>]\n", msg->version, ntohl(msg->framing_cap), ntohl(msg->bearer_cap), ntohs(msg->max_channels));

	if (conn->state != STATE_IDLE) {
		log_ppp_warn("unexpected PPTP_START_CTRL_CONN_RQST\n");
		if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_EXISTS, 0))
			return -1;
		return 0;
	}

	if (msg->version != htons(PPTP_VERSION)) {
		log_ppp_warn("PPTP version mismatch: expecting %x, received %s\n", PPTP_VERSION, msg->version);
		if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_PROTOCOL, 0))
			return -1;
		return 0;
	}
	/*if (!(ntohl(msg->framing_cap) & PPTP_FRAME_SYNC)) {
		log_ppp_warn("connection does not supports sync mode\n");
		if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_GE, 0))
			return -1;
		return 0;
	}*/
	if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_SUCCESS, 0))
		return -1;

	triton_timer_mod(&conn->timeout_timer, 0);

	conn->state = STATE_ESTB;

	return 0;
}

static int send_pptp_out_call_rply(struct pptp_conn_t *conn, struct pptp_out_call_rqst *rqst, int call_id, int res_code, int err_code)
{
	struct pptp_out_call_rply msg = {
		.header = PPTP_HEADER_CTRL(PPTP_OUT_CALL_RPLY),
		.call_id = htons(call_id),
		.call_id_peer = rqst->call_id,
		.result_code = res_code,
		.error_code = err_code,
		.cause_code = 0,
		.speed = rqst->bps_max,
		.recv_size = rqst->recv_size,
		.delay = 0,
		.channel = 0,
	};

	if (conf_verbose)
		log_ppp_info2("send [PPTP Outgoing-Call-Reply <Call-ID %x> <Peer-Call-ID %x> <Result %i> <Error %i> <Cause %i> <Speed %i> <Window-Size %i> <Delay %i> <Channel %x>]\n", ntohs(msg.call_id), ntohs(msg.call_id_peer), msg.result_code, msg.error_code, ntohs(msg.cause_code), ntohl(msg.speed), ntohs(msg.recv_size), ntohs(msg.delay), ntohl(msg.channel));

	return post_msg(conn, &msg, sizeof(msg));
}

static int pptp_out_call_rqst(struct pptp_conn_t *conn)
{
	struct pptp_out_call_rqst *msg = (struct pptp_out_call_rqst *)conn->in_buf;
	struct sockaddr_pppox src_addr, dst_addr;
  struct sockaddr_in addr;
	socklen_t addrlen;
	int pptp_sock;

	if (conf_verbose)
		log_ppp_info2("recv [PPTP Outgoing-Call-Request <Call-ID %x> <Call-Serial %x> <Min-BPS %i> <Max-BPS %i> <Bearer %x> <Framing %x> <Window-Size %i> <Delay %i>]\n", ntohs(msg->call_id), ntohs(msg->call_sernum), ntohl(msg->bps_min), ntohl(msg->bps_max), ntohl(msg->bearer), ntohl(msg->framing), ntohs(msg->recv_size), ntohs(msg->delay));

	if (conn->state != STATE_ESTB) {
		log_ppp_warn("unexpected PPTP_OUT_CALL_RQST\n");
		if (send_pptp_out_call_rply(conn, msg, 0, PPTP_CALL_RES_GE, PPTP_GE_NOCONN))
			return -1;
		return 0;
	}

	memset(&src_addr, 0, sizeof(src_addr));
	src_addr.sa_family = AF_PPPOX;
	src_addr.sa_protocol = PX_PROTO_PPTP;
	src_addr.sa_addr.pptp.call_id = 0;
	addrlen = sizeof(addr);
	getsockname(conn->hnd.fd, (struct sockaddr*)&addr, &addrlen);
	src_addr.sa_addr.pptp.sin_addr = addr.sin_addr;

	memset(&dst_addr, 0, sizeof(dst_addr));
	dst_addr.sa_family = AF_PPPOX;
	dst_addr.sa_protocol = PX_PROTO_PPTP;
	dst_addr.sa_addr.pptp.call_id = htons(msg->call_id);
	addrlen = sizeof(addr);
	getpeername(conn->hnd.fd, (struct sockaddr*)&addr, &addrlen);
	dst_addr.sa_addr.pptp.sin_addr = addr.sin_addr;

	pptp_sock = socket(AF_PPPOX, SOCK_STREAM, PX_PROTO_PPTP);
	if (pptp_sock < 0) {
		log_ppp_error("failed to create PPTP socket (%s)\n", strerror(errno));
		return -1;
	}

	fcntl(pptp_sock, F_SETFD, fcntl(pptp_sock, F_GETFD) | FD_CLOEXEC);

	if (bind(pptp_sock, (struct sockaddr*)&src_addr, sizeof(src_addr))) {
		log_ppp_error("failed to bind PPTP socket (%s)\n", strerror(errno));
		close(pptp_sock);
		return -1;
	}
	addrlen = sizeof(src_addr);
	getsockname(pptp_sock, (struct sockaddr*)&src_addr, &addrlen);

	if (connect(pptp_sock, (struct sockaddr*)&dst_addr, sizeof(dst_addr))) {
		log_ppp_error("failed to connect PPTP socket (%s)\n", strerror(errno));
		close(pptp_sock);
		return -1;
	}

	if (send_pptp_out_call_rply(conn, msg, src_addr.sa_addr.pptp.call_id, PPTP_CALL_RES_OK, 0))
		return -1;

	conn->call_id = src_addr.sa_addr.pptp.call_id;
	conn->peer_call_id = msg->call_id;
	conn->ppp.fd = pptp_sock;
	conn->ppp.chan_name = _strdup(inet_ntoa(dst_addr.sa_addr.pptp.sin_addr));

	triton_event_fire(EV_CTRL_STARTED, &conn->ppp);

	if (establish_ppp(&conn->ppp)) {
		close(pptp_sock);
		//if (send_pptp_stop_ctrl_conn_rqst(conn, 0, 0))
		conn->state = STATE_FIN;
		return -1;
	}
	conn->state = STATE_PPP;
	__sync_sub_and_fetch(&stat_starting, 1);
	__sync_add_and_fetch(&stat_active, 1);
	
	if (conn->timeout_timer.tpd)
		triton_timer_del(&conn->timeout_timer);

	if (conf_echo_interval) {
		conn->echo_timer.period = conf_echo_interval * 1000;
		triton_timer_add(&conn->ctx, &conn->echo_timer, 0);
	}

	return 0;
}

static int send_pptp_call_disconnect_notify(struct pptp_conn_t *conn, int result)
{
	struct pptp_call_clear_ntfy msg = {
		.header = PPTP_HEADER_CTRL(PPTP_CALL_CLEAR_NTFY),
		.call_id = htons(conn->peer_call_id),
		.result_code = result,
		.error_code = 0,
		.cause_code = 0,
	};

	if (conf_verbose)
		log_ppp_info2("send [PPTP Call-Disconnect-Notify <Call-ID %x> <Result %i> <Error %i> <Cause %i>]\n", ntohs(msg.call_id), msg.result_code, msg.error_code, msg.cause_code);
	
	return post_msg(conn, &msg, sizeof(msg));
}

static int pptp_call_clear_rqst(struct pptp_conn_t *conn)
{
	struct pptp_call_clear_rqst *rqst = (struct pptp_call_clear_rqst *)conn->in_buf;

	if (conf_verbose)
		log_ppp_info2("recv [PPTP Call-Clear-Request <Call-ID %x>]\n", ntohs(rqst->call_id));

	if (conn->echo_timer.tpd)
		triton_timer_del(&conn->echo_timer);

	if (conn->state == STATE_PPP) {
		__sync_sub_and_fetch(&stat_active, 1);
		conn->state = STATE_CLOSE;
		ppp_terminate(&conn->ppp, TERM_USER_REQUEST, 1);
	}

	return send_pptp_call_disconnect_notify(conn, 4);
}

static int pptp_echo_rqst(struct pptp_conn_t *conn)
{
	struct pptp_echo_rqst *in_msg = (struct pptp_echo_rqst *)conn->in_buf;
	struct pptp_echo_rply out_msg = {
		.header = PPTP_HEADER_CTRL(PPTP_ECHO_RPLY),
		.identifier = in_msg->identifier,
		.result_code = 1,
	};

	if (conf_verbose) {
		log_ppp_debug("recv [PPTP Echo-Request <Identifier %x>]\n", in_msg->identifier);
		log_ppp_debug("send [PPTP Echo-Reply <Identifier %x>]\n", out_msg.identifier);
	}

	return post_msg(conn, &out_msg, sizeof(out_msg));
}

static int pptp_echo_rply(struct pptp_conn_t *conn)
{
	struct pptp_echo_rply *msg = (struct pptp_echo_rply *)conn->in_buf;
	
	if (conf_verbose)
		log_ppp_debug("recv [PPTP Echo-Reply <Identifier %x>]\n", msg->identifier);

	/*if (msg->identifier != conn->echo_sent) {
		log_ppp_warn("pptp:echo: identifier mismatch\n");
		//return -1;
	}*/
	conn->echo_sent = 0;
	return 0;
}
static void pptp_send_echo(struct triton_timer_t *t)
{
	struct pptp_conn_t *conn = container_of(t, typeof(*conn), echo_timer);
	struct pptp_echo_rqst msg = {
		.header = PPTP_HEADER_CTRL(PPTP_ECHO_RQST),
	};

	if (++conn->echo_sent == conf_echo_failure) {
		log_ppp_warn("pptp: no echo reply\n");
		disconnect(conn);
		return;
	}

	conn->echo_sent = random();
	msg.identifier = conn->echo_sent;

	if (conf_verbose)
		log_ppp_debug("send [PPTP Echo-Request <Identifier %x>]\n", msg.identifier);

	if (post_msg(conn, &msg, sizeof(msg)))
		disconnect(conn);
}

static int process_packet(struct pptp_conn_t *conn)
{
	struct pptp_header *hdr = (struct pptp_header *)conn->in_buf;
	switch(ntohs(hdr->ctrl_type))
	{
		case PPTP_START_CTRL_CONN_RQST:
			return pptp_start_ctrl_conn_rqst(conn);
		case PPTP_STOP_CTRL_CONN_RQST:
			return pptp_stop_ctrl_conn_rqst(conn);
		case PPTP_STOP_CTRL_CONN_RPLY:
			return pptp_stop_ctrl_conn_rply(conn);
		case PPTP_OUT_CALL_RQST:
			return pptp_out_call_rqst(conn);
		case PPTP_ECHO_RQST:
			return pptp_echo_rqst(conn);
		case PPTP_ECHO_RPLY:
			return pptp_echo_rply(conn);
		case PPTP_CALL_CLEAR_RQST:
			return pptp_call_clear_rqst(conn);
		case PPTP_SET_LINK_INFO:
			if (conf_verbose)
				log_ppp_info2("recv [PPTP Set-Link-Info]\n");
			return 0;
		default:
			log_ppp_warn("recv [PPTP Unknown (%x)]\n", ntohs(hdr->ctrl_type));
	}
	return 0;
}

static int pptp_read(struct triton_md_handler_t *h)
{
	struct pptp_conn_t *conn=container_of(h,typeof(*conn),hnd);
	struct pptp_header *hdr=(struct pptp_header *)conn->in_buf;
	int n;

	while(1) {
		n = read(h->fd, conn->in_buf + conn->in_size, PPTP_CTRL_SIZE_MAX - conn->in_size);
		if (n < 0) {
			if (errno == EINTR)
				continue;
			if (errno == EAGAIN)
				return 0;
			log_ppp_error("pptp: read: %s\n",strerror(errno));
			goto drop;
		}
		if (n == 0) {
			if (conf_verbose)
				log_ppp_info2("pptp: disconnect by peer\n");
			goto drop;
		}
		conn->in_size += n;
		if (conn->in_size >= sizeof(*hdr)) {
			if (hdr->magic != htonl(PPTP_MAGIC)) {
				log_ppp_error("pptp: invalid magic\n");
				goto drop;
			}
			if (ntohs(hdr->length) >= PPTP_CTRL_SIZE_MAX) {
				log_ppp_error("pptp: message is too long\n");
				goto drop;
			}
			if (ntohs(hdr->length) > conn->in_size)
				continue;
			if (ntohs(hdr->length) <= conn->in_size) {
				if (ntohs(hdr->length) != PPTP_CTRL_SIZE(ntohs(hdr->ctrl_type))) {
					log_ppp_error("pptp: invalid message length\n");
					goto drop;
				}
				if (process_packet(conn))
					goto drop;
				conn->in_size -= ntohs(hdr->length);
				if (conn->in_size)
					memmove(conn->in_buf, conn->in_buf + ntohs(hdr->length), conn->in_size);
			}
		}
	}
drop:
	disconnect(conn);
	return 1;
}
static int pptp_write(struct triton_md_handler_t *h)
{
	struct pptp_conn_t *conn = container_of(h, typeof(*conn), hnd);
	int n;

	while (1) {
		n = write(h->fd, conn->out_buf+conn->out_pos, conn->out_size-conn->out_pos);

		if (n < 0) {
			if (errno == EINTR)
				continue;
			if (errno == EAGAIN)
				n = 0;
			else {
				if (errno != EPIPE) {
					if (conf_verbose)
						log_ppp_info2("pptp: post_msg: %s\n", strerror(errno));
				}
				disconnect(conn);
				return 1;
			}
		}

		conn->out_pos += n;
		if (conn->out_pos == conn->out_size) {
			conn->out_pos = 0;
			conn->out_size = 0;
			triton_md_disable_handler(h, MD_MODE_WRITE);
			return 0;
		}
	}
}
static void pptp_timeout(struct triton_timer_t *t)
{
	struct pptp_conn_t *conn = container_of(t, typeof(*conn), timeout_timer);
	disconnect(conn);
}
static void pptp_close(struct triton_context_t *ctx)
{
	struct pptp_conn_t *conn = container_of(ctx, typeof(*conn), ctx);
	if (conn->state == STATE_PPP) {
		__sync_sub_and_fetch(&stat_active, 1);
		conn->state = STATE_CLOSE;
		ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 1);
		if (send_pptp_call_disconnect_notify(conn, 3)) {
			triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn);
			return;
		}
	} else {
		if (send_pptp_stop_ctrl_conn_rqst(conn, 0)) {
			triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn);
			return;
		}
	}

	if (conn->timeout_timer.tpd)
		triton_timer_mod(&conn->timeout_timer, 0);
	else
		triton_timer_add(ctx, &conn->timeout_timer, 0);
}
static void ppp_started(struct ppp_t *ppp)
{
	log_ppp_debug("pptp: ppp started\n");
}
static void ppp_finished(struct ppp_t *ppp)
{
	struct pptp_conn_t *conn = container_of(ppp, typeof(*conn), ppp);

	if (conn->state != STATE_CLOSE) {
		log_ppp_debug("pptp: ppp finished\n");
		conn->state = STATE_CLOSE;
		__sync_sub_and_fetch(&stat_active, 1);

		if (send_pptp_call_disconnect_notify(conn, 3))
			triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn);
		else if (send_pptp_stop_ctrl_conn_rqst(conn, 0))
			triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn);
		else {
			if (conn->timeout_timer.tpd)
				triton_timer_mod(&conn->timeout_timer, 0);
			else
				triton_timer_add(&conn->ctx, &conn->timeout_timer, 0);
		}
	}
}

//==================================

struct pptp_serv_t
{
	struct triton_context_t ctx;
	struct triton_md_handler_t hnd;
};

static int pptp_connect(struct triton_md_handler_t *h)
{
  struct sockaddr_in addr;
	socklen_t size = sizeof(addr);
	int sock;
	struct pptp_conn_t *conn;

	while(1) {
		sock = accept(h->fd, (struct sockaddr *)&addr, &size);
		if (sock < 0) {
			if (errno == EAGAIN)
				return 0;
			log_error("pptp: accept failed: %s\n", strerror(errno));
			continue;
		}

		if (ppp_shutdown) {
			close(sock);
			continue;
		}

		if (triton_module_loaded("connlimit") && connlimit_check(cl_key_from_ipv4(addr.sin_addr.s_addr))) {
			close(sock);
			return 0;
		}

		log_info2("pptp: new connection from %s\n", inet_ntoa(addr.sin_addr));

		if (iprange_client_check(addr.sin_addr.s_addr)) {
			log_warn("pptp: IP is out of client-ip-range, droping connection...\n");
			close(sock);
			continue;
		}

		if (fcntl(sock, F_SETFL, O_NONBLOCK)) {
			log_error("pptp: failed to set nonblocking mode: %s, closing connection...\n", strerror(errno));
			close(sock);
			continue;
		}

		conn = mempool_alloc(conn_pool);
		memset(conn, 0, sizeof(*conn));
		conn->hnd.fd = sock;
		conn->hnd.read = pptp_read;
		conn->hnd.write = pptp_write;
		conn->ctx.close = pptp_close;
		conn->ctx.before_switch = log_switch;
		conn->in_buf = _malloc(PPTP_CTRL_SIZE_MAX);
		conn->out_buf = _malloc(PPTP_CTRL_SIZE_MAX);
		conn->timeout_timer.expire = pptp_timeout;
		conn->timeout_timer.period = conf_timeout * 1000;
		conn->echo_timer.expire = pptp_send_echo;
		conn->ctrl.ctx = &conn->ctx;
		conn->ctrl.started = ppp_started;
		conn->ctrl.finished = ppp_finished;
		conn->ctrl.max_mtu = PPTP_MAX_MTU;
		conn->ctrl.type = CTRL_TYPE_PPTP;
		conn->ctrl.name = "pptp";
		
		conn->ctrl.calling_station_id = _malloc(17);
		conn->ctrl.called_station_id = _malloc(17);
		u_inet_ntoa(addr.sin_addr.s_addr, conn->ctrl.calling_station_id);
		getsockname(sock, &addr, &size);
		u_inet_ntoa(addr.sin_addr.s_addr, conn->ctrl.called_station_id);
	
		ppp_init(&conn->ppp);
		conn->ppp.ctrl = &conn->ctrl;

		triton_context_register(&conn->ctx, &conn->ppp);
		triton_md_register_handler(&conn->ctx, &conn->hnd);
		triton_md_enable_handler(&conn->hnd,MD_MODE_READ);
		triton_timer_add(&conn->ctx, &conn->timeout_timer, 0);
		triton_context_wakeup(&conn->ctx);

		triton_event_fire(EV_CTRL_STARTING, &conn->ppp);

		__sync_add_and_fetch(&stat_starting, 1);
	}
	return 0;
}
static void pptp_serv_close(struct triton_context_t *ctx)
{
	struct pptp_serv_t *s=container_of(ctx,typeof(*s),ctx);
	triton_md_unregister_handler(&s->hnd);
	close(s->hnd.fd);
	triton_context_unregister(ctx);
}

static struct pptp_serv_t serv=
{
	.hnd.read = pptp_connect,
	.ctx.close = pptp_serv_close,
	.ctx.before_switch = log_switch,
};

static int show_stat_exec(const char *cmd, char * const *fields, int fields_cnt, void *client)
{
	cli_send(client, "pptp:\r\n");
	cli_sendv(client,"  starting: %u\r\n", stat_starting);
	cli_sendv(client,"  active: %u\r\n", stat_active);

	return CLI_CMD_OK;
}

void __export pptp_get_stat(unsigned int **starting, unsigned int **active)
{
	*starting = &stat_starting;
	*active = &stat_active;
}

static void load_config(void)
{
	char *opt;

	opt = conf_get_opt("pptp", "timeout");
	if (opt && atoi(opt) > 0)
		conf_timeout = atoi(opt);
	
	opt = conf_get_opt("pptp", "echo-interval");
	if (opt && atoi(opt) >= 0)
		conf_echo_interval = atoi(opt);

	opt = conf_get_opt("pptp", "echo-failure");
	if (opt && atoi(opt) > 0)
		conf_echo_failure = atoi(opt);

	opt = conf_get_opt("pptp", "verbose");
	if (opt && atoi(opt) > 0)
		conf_verbose = 1;
}

static void pptp_init(void)
{
	struct sockaddr_in addr;
	char *opt;

	system("modprobe pptp");

	serv.hnd.fd = socket(PF_INET, SOCK_STREAM, 0);
  if (serv.hnd.fd < 0) {
    log_emerg("pptp: failed to create server socket: %s\n", strerror(errno));
    return;
  }
	
	fcntl(serv.hnd.fd, F_SETFD, fcntl(serv.hnd.fd, F_GETFD) | FD_CLOEXEC);
  
	addr.sin_family = AF_INET;
  addr.sin_port = htons(PPTP_PORT);

	opt = conf_get_opt("pptp", "bind");
	if (opt)
		addr.sin_addr.s_addr = inet_addr(opt);
	else
		addr.sin_addr.s_addr = htonl(INADDR_ANY);
  
  setsockopt(serv.hnd.fd, SOL_SOCKET, SO_REUSEADDR, &serv.hnd.fd, 4);  
  if (bind (serv.hnd.fd, (struct sockaddr *) &addr, sizeof (addr)) < 0) {
    log_emerg("pptp: failed to bind socket: %s\n", strerror(errno));
		close(serv.hnd.fd);
    return;
  }

  if (listen (serv.hnd.fd, 100) < 0) {
    log_emerg("pptp: failed to listen socket: %s\n", strerror(errno));
		close(serv.hnd.fd);
    return;
  }

	if (fcntl(serv.hnd.fd, F_SETFL, O_NONBLOCK)) {
    log_emerg("pptp: failed to set nonblocking mode: %s\n", strerror(errno));
		close(serv.hnd.fd);
    return;
	}
	
	conn_pool = mempool_create(sizeof(struct pptp_conn_t));

	load_config();

	triton_context_register(&serv.ctx, NULL);
	triton_md_register_handler(&serv.ctx, &serv.hnd);
	triton_md_enable_handler(&serv.hnd, MD_MODE_READ);
	triton_context_wakeup(&serv.ctx);

	cli_register_simple_cmd2(show_stat_exec, NULL, 2, "show", "stat");
	
	triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config);
}

DEFINE_INIT(20, pptp_init);
Example #5
0
File: klog.c Project: GoodOkk/tfs
struct klog_msg * klog_msg_alloc(void)
{
	return mempool_alloc(klog_msg_pool, GFP_ATOMIC);
}
Example #6
0
static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
				gfp_t mem_flags, dma_addr_t *dma_handler)
{
	return mempool_alloc(pool, mem_flags);
}
Example #7
0
/*
 * build new request AND message, calculate layout, and adjust file
 * extent as needed.
 *
 * if the file was recently truncated, we include information about its
 * old and new size so that the object can be updated appropriately.  (we
 * avoid synchronously deleting truncated objects because it's slow.)
 *
 * if @do_sync, include a 'startsync' command so that the osd will flush
 * data quickly.
 */
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
					       struct ceph_file_layout *layout,
					       struct ceph_vino vino,
					       u64 off, u64 *plen,
					       int opcode, int flags,
					       struct ceph_snap_context *snapc,
					       int do_sync,
					       u32 truncate_seq,
					       u64 truncate_size,
					       struct timespec *mtime,
					       bool use_mempool, int num_reply)
{
	struct ceph_osd_request *req;
	struct ceph_msg *msg;
	struct ceph_osd_request_head *head;
	struct ceph_osd_op *op;
	void *p;
	int num_op = 1 + do_sync;
	size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
	int i;

	if (use_mempool) {
		req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
		memset(req, 0, sizeof(*req));
	} else {
		req = kzalloc(sizeof(*req), GFP_NOFS);
	}
	if (req == NULL)
		return NULL;

	req->r_osdc = osdc;
	req->r_mempool = use_mempool;
	kref_init(&req->r_kref);
	init_completion(&req->r_completion);
	init_completion(&req->r_safe_completion);
	INIT_LIST_HEAD(&req->r_unsafe_item);
	req->r_flags = flags;

	WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);

	/* create reply message */
	if (use_mempool)
		msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
	else
		msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
				   OSD_OPREPLY_FRONT_LEN, GFP_NOFS);
	if (!msg) {
		ceph_osdc_put_request(req);
		return NULL;
	}
	req->r_reply = msg;

	/* create request message; allow space for oid */
	msg_size += 40;
	if (snapc)
		msg_size += sizeof(u64) * snapc->num_snaps;
	if (use_mempool)
		msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
	else
		msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, GFP_NOFS);
	if (!msg) {
		ceph_osdc_put_request(req);
		return NULL;
	}
	msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
	memset(msg->front.iov_base, 0, msg->front.iov_len);
	head = msg->front.iov_base;
	op = (void *)(head + 1);
	p = (void *)(op + num_op);

	req->r_request = msg;
	req->r_snapc = ceph_get_snap_context(snapc);

	head->client_inc = cpu_to_le32(1); /* always, for now. */
	head->flags = cpu_to_le32(flags);
	if (flags & CEPH_OSD_FLAG_WRITE)
		ceph_encode_timespec(&head->mtime, mtime);
	head->num_ops = cpu_to_le16(num_op);
	op->op = cpu_to_le16(opcode);

	/* calculate max write size */
	calc_layout(osdc, vino, layout, off, plen, req);
	req->r_file_layout = *layout;  /* keep a copy */

	if (flags & CEPH_OSD_FLAG_WRITE) {
		req->r_request->hdr.data_off = cpu_to_le16(off);
		req->r_request->hdr.data_len = cpu_to_le32(*plen);
		op->payload_len = cpu_to_le32(*plen);
	}
	op->extent.truncate_size = cpu_to_le64(truncate_size);
	op->extent.truncate_seq = cpu_to_le32(truncate_seq);

	/* fill in oid */
	head->object_len = cpu_to_le32(req->r_oid_len);
	memcpy(p, req->r_oid, req->r_oid_len);
	p += req->r_oid_len;

	if (do_sync) {
		op++;
		op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
	}
	if (snapc) {
		head->snap_seq = cpu_to_le64(snapc->seq);
		head->num_snaps = cpu_to_le32(snapc->num_snaps);
		for (i = 0; i < snapc->num_snaps; i++) {
			put_unaligned_le64(snapc->snaps[i], p);
			p += sizeof(u64);
		}
	}

	BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
	msg_size = p - msg->front.iov_base;
	msg->front.iov_len = msg_size;
	msg->hdr.front_len = cpu_to_le32(msg_size);
	return req;
}
Example #8
0
static uint8_t *mpc_alloc(uint8_t *const size) {
	*size = mpc_monostate.mempool->block_size;
	return mempool_alloc(mpc_monostate.mempool);
}
Example #9
0
static int
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct sk_buff_head queue;
	struct aoedev *d;
	struct buf *buf;
	ulong flags;

	blk_queue_bounce(q, &bio);

	if (bio == NULL) {
		printk(KERN_ERR "aoe: bio is NULL\n");
		BUG();
		return 0;
	}
	d = bio->bi_bdev->bd_disk->private_data;
	if (d == NULL) {
		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	} else if (bio->bi_rw & REQ_HARDBARRIER) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	}
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
	buf->stime = jiffies;
	buf->bio = bio;
	buf->resid = bio->bi_size;
	buf->sector = bio->bi_sector;
	buf->bv = &bio->bi_io_vec[bio->bi_idx];
	buf->bv_resid = buf->bv->bv_len;
	WARN_ON(buf->bv_resid == 0);
	buf->bv_off = buf->bv->bv_offset;

	spin_lock_irqsave(&d->lock, flags);

	if ((d->flags & DEVFL_UP) == 0) {
		pr_info_ratelimited("aoe: device %ld.%d is not up\n",
			d->aoemajor, d->aoeminor);
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, -ENXIO);
		return 0;
	}

	list_add_tail(&buf->bufs, &d->bufq);

	aoecmd_work(d);
	__skb_queue_head_init(&queue);
	skb_queue_splice_init(&d->sendq, &queue);

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(&queue);

	return 0;
}
Example #10
0
static inline struct pending_exception *alloc_pending_exception(void)
{
	return mempool_alloc(pending_pool, GFP_NOIO);
}
Example #11
0
static inline struct page_beancounter *io_pb_alloc(void)
{
	return mempool_alloc(pb_pool, GFP_ATOMIC);
}
Example #12
0
int exploredir(struct direntrylist* list, const char* path)
{
	struct stat fattr;                      /* Used to store attributes of a file entry */
	struct direntry* list_entry;    /* Used to capture information about file entry */
	struct dirent** entries;        /* Stores each file entry's name and stuff */
	int n;                                                  /* How many file entries are in the directory */
	int i;                                                  /* Used to traverse file entries */

	// Alphabetize the entries in the directory since Linux is stupid
	// and doesn't do this by default, which Mac OS X does...
	if ((n = scandir(path, &entries, 0, alphasort)) < 0) {
		// Send error message to all clients and then exit
		kill_clients(remove_client_pipes[1], "Cannot open directory! ; Exiting now!");
		syslog(LOG_ERR, "Cannot open directory: %s", path);
		exit(1);
	}

	// Delete reference to current directory (.)
	free(entries[0]);
	// Delete reference to parent directory (..)
	free(entries[1]);

	// Start after . and ..
	i = 2;
	while (i < n) {
		list_entry = (struct direntry*)mempool_alloc(direntry_pool, sizeof(struct direntry));
		list_entry->next = NULL;
		memset(list_entry, 0, sizeof(list_entry));

		if (list_entry == NULL) {
			// Mempool has no free nodes and malloc failed
			kill_clients(remove_client_pipes[1], "Unrecoverable server error! ; Exiting now!");
			syslog(LOG_ERR, "Cannot malloc direntry");
			exit(1);
		}

		// Make sure absolute path is not too long
		if ((strlen(path) + strlen(entries[i]->d_name) + 1) >= PATH_MAX) {
			kill_clients(remove_client_pipes[1], "Unrecoverable server error! ; Exiting now!");
			syslog(LOG_ERR, "Path is too long.");
			exit(1);
		} else {
			strcpy(abspath, path);
			strcat(abspath, "/");
			strcat(abspath, entries[i]->d_name);
		}

		// Get the attributes of the file entry
		if (stat(abspath, &fattr) < 0) {
			kill_clients(remove_client_pipes[1], "Unrecoverable server error! ; Exiting now!");
			syslog(LOG_ERR, "Cannot get stats on file: %s", entries[i]->d_name);
			exit(1);
		}

		// Make sure just the file name is not too long
		if (strlen(entries[i]->d_name) > MAX_FILENAME) {
			syslog(LOG_ERR, "Filename is too long to be saved.");
			exit(1);
		}

		// Copy the file entry name into direntry representation
		strcpy(list_entry->filename, entries[i]->d_name);
		list_entry->attrs = fattr;

		// Add the list entry now
		add_direntry(list, list_entry);

		free(entries[i]);
		i++;
	}

	free(entries);

	return 0;
}
Example #13
0
    INC_STAT(size_each_type[mt], alloc_size);

#ifdef DEBUG
    //memset(alloc_ptr, 0x33, size);
#endif

    return alloc_ptr;
}

char *
mempool_strdup(struct mempool *p,
               const char *str
               MT_ARG)
{
    int len = strlen(str);
    char *mem = (char*)mempool_alloc(p, len+1, MT_OR_ZERO);
    memcpy(mem, str, len);
    mem[len] = '\0';
    return mem;
}

void *
_mempool_copy(struct mempool *p,
              void *data,
              int len
              MT_ARG)
{
    void *mem = mempool_alloc(p, len, MT_OR_ZERO);
    memcpy(mem, data, len);
    return mem;
}
Example #14
0
static inline struct metapage *alloc_metapage(int no_wait)
{
	return mempool_alloc(metapage_mempool, no_wait ? GFP_ATOMIC : GFP_NOFS);
}
Example #15
0
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
	return mempool_alloc(prison->cell_pool, gfp);
}
void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
{
	void *buf = NULL;
	int index;

	index = 0;
	if (pool_type == POOL_TYPE_COPY) {
		if (driver->diagpool) {
			mutex_lock(&driver->diagmem_mutex);
			if (driver->count < driver->poolsize) {
				atomic_add(1, (atomic_t *)&driver->count);
				buf = mempool_alloc(driver->diagpool,
								 GFP_ATOMIC);
			}
			mutex_unlock(&driver->diagmem_mutex);
		}
	} else if (pool_type == POOL_TYPE_HDLC) {
		if (driver->diag_hdlc_pool) {
			if (driver->count_hdlc_pool < driver->poolsize_hdlc) {
				atomic_add(1,
					 (atomic_t *)&driver->count_hdlc_pool);
				buf = mempool_alloc(driver->diag_hdlc_pool,
								 GFP_ATOMIC);
			}
		}
	} else if (pool_type == POOL_TYPE_USER) {
		if (driver->diag_user_pool) {
			if (driver->count_user_pool < driver->poolsize_user) {
				atomic_add(1,
					(atomic_t *)&driver->count_user_pool);
				buf = mempool_alloc(driver->diag_user_pool,
					GFP_ATOMIC);
			}
		}
	} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
		if (driver->diag_write_struct_pool) {
			if (driver->count_write_struct_pool <
					 driver->poolsize_write_struct) {
				atomic_add(1,
				 (atomic_t *)&driver->count_write_struct_pool);
				buf = mempool_alloc(
				driver->diag_write_struct_pool, GFP_ATOMIC);
			}
		}
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
	} else if (pool_type == POOL_TYPE_HSIC ||
				pool_type == POOL_TYPE_HSIC_2) {
		index = pool_type - POOL_TYPE_HSIC;
		if (diag_hsic[index].diag_hsic_pool) {
			if (diag_hsic[index].count_hsic_pool <
					diag_hsic[index].poolsize_hsic) {
				atomic_add(1, (atomic_t *)
					&diag_hsic[index].count_hsic_pool);
				buf = mempool_alloc(
					diag_hsic[index].diag_hsic_pool,
					GFP_ATOMIC);
			}
		}
	} else if (pool_type == POOL_TYPE_HSIC_WRITE ||
					pool_type == POOL_TYPE_HSIC_2_WRITE) {
		index = pool_type - POOL_TYPE_HSIC_WRITE;
		if (diag_hsic[index].diag_hsic_write_pool) {
			if (diag_hsic[index].count_hsic_write_pool <
				diag_hsic[index].poolsize_hsic_write) {
				atomic_add(1, (atomic_t *)
					&diag_hsic[index].
					count_hsic_write_pool);
				buf = mempool_alloc(
					diag_hsic[index].diag_hsic_write_pool,
					GFP_ATOMIC);
			}
		}
#endif
	}
	return buf;
}
Example #17
0
void *osHeapAlloc(u_int size)
{
    return mempool_alloc(&systemHeapMemory, (unsigned)size);
}
Example #18
0
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
						gfp_t gfp_mask)
{
	return mempool_alloc(md->io_pool, gfp_mask);
}
Example #19
0
static struct dm_target_io *alloc_tio(struct mapped_device *md)
{
    return mempool_alloc(md->tio_pool, GFP_NOIO);
}
Example #20
0
static struct request *alloc_old_clone_request(struct mapped_device *md,
					       gfp_t gfp_mask)
{
	return mempool_alloc(md->rq_pool, gfp_mask);
}
Example #21
0
/*
 * rrpc_move_valid_pages -- migrate live data off the block
 * @rrpc: the 'rrpc' structure
 * @block: the block from which to migrate live pages
 *
 * Description:
 *   GC algorithms may call this function to migrate remaining live
 *   pages off the block prior to erasing it. This function blocks
 *   further execution until the operation is complete.
 */
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
	struct request_queue *q = rrpc->dev->q;
	struct rrpc_rev_addr *rev;
	struct nvm_rq *rqd;
	struct bio *bio;
	struct page *page;
	int slot;
	int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
	u64 phys_addr;
	DECLARE_COMPLETION_ONSTACK(wait);

	if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
		return 0;

	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		pr_err("nvm: could not alloc bio to gc\n");
		return -ENOMEM;
	}

	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);

	while ((slot = find_first_zero_bit(rblk->invalid_pages,
					    nr_pgs_per_blk)) < nr_pgs_per_blk) {

		/* Lock laddr */
		phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;

try:
		spin_lock(&rrpc->rev_lock);
		/* Get logical address from physical to logical table */
		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
		/* already updated by previous regular write */
		if (rev->addr == ADDR_EMPTY) {
			spin_unlock(&rrpc->rev_lock);
			continue;
		}

		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
		if (IS_ERR_OR_NULL(rqd)) {
			spin_unlock(&rrpc->rev_lock);
			schedule();
			goto try;
		}

		spin_unlock(&rrpc->rev_lock);

		/* Perform read to do GC */
		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = READ;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc read failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);

		bio_reset(bio);
		reinit_completion(&wait);

		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = WRITE;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		/* turn the command around and write the data back to a new
		 * address
		 */
		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc write failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);

		rrpc_inflight_laddr_release(rrpc, rqd);

		bio_reset(bio);
	}

finished:
	mempool_free(page, rrpc->page_pool);
	bio_put(bio);

	if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
		pr_err("nvm: failed to garbage collect block\n");
		return -EIO;
	}

	return 0;
}

static void rrpc_block_gc(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_dev *dev = rrpc->dev;

	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);

	if (rrpc_move_valid_pages(rrpc, rblk))
		goto done;

	nvm_erase_blk(dev, rblk->parent);
	rrpc_put_blk(rrpc, rblk);
done:
	mempool_free(gcb, rrpc->gcb_pool);
}

/* the block with highest number of invalid pages, will be in the beginning
 * of the list
 */
static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
							struct rrpc_block *rb)
{
	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
		return ra;

	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
}

/* linearly find the block with highest number of invalid pages
 * requires lun->lock
 */
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
	struct list_head *prio_list = &rlun->prio_list;
	struct rrpc_block *rblock, *max;

	BUG_ON(list_empty(prio_list));

	max = list_first_entry(prio_list, struct rrpc_block, prio);
	list_for_each_entry(rblock, prio_list, prio)
		max = rblock_max_invalid(max, rblock);

	return max;
}

static void rrpc_lun_gc(struct work_struct *work)
{
	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
	struct rrpc *rrpc = rlun->rrpc;
	struct nvm_lun *lun = rlun->parent;
	struct rrpc_block_gc *gcb;
	unsigned int nr_blocks_need;

	nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;

	if (nr_blocks_need < rrpc->nr_luns)
		nr_blocks_need = rrpc->nr_luns;

	spin_lock(&lun->lock);
	while (nr_blocks_need > lun->nr_free_blocks &&
					!list_empty(&rlun->prio_list)) {
		struct rrpc_block *rblock = block_prio_find_max(rlun);
		struct nvm_block *block = rblock->parent;

		if (!rblock->nr_invalid_pages)
			break;

		list_del_init(&rblock->prio);

		BUG_ON(!block_is_full(rrpc, rblock));

		pr_debug("rrpc: selected block '%lu' for GC\n", block->id);

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		gcb->rrpc = rrpc;
		gcb->rblk = rblock;
		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);

		queue_work(rrpc->kgc_wq, &gcb->ws_gc);

		nr_blocks_need--;
	}
	spin_unlock(&lun->lock);

	/* TODO: Hint that request queue can be started again */
}

static void rrpc_gc_queue(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
							rblk->parent->id);
}

static const struct block_device_operations rrpc_fops = {
	.owner		= THIS_MODULE,
};

static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
{
	unsigned int i;
	struct rrpc_lun *rlun, *max_free;

	if (!is_gc)
		return get_next_lun(rrpc);

	/* during GC, we don't care about RR, instead we want to make
	 * sure that we maintain evenness between the block luns.
	 */
	max_free = &rrpc->luns[0];
	/* prevent GC-ing lun from devouring pages of a lun with
	 * little free blocks. We don't take the lock as we only need an
	 * estimate.
	 */
	rrpc_for_each_lun(rrpc, rlun, i) {
		if (rlun->parent->nr_free_blocks >
					max_free->parent->nr_free_blocks)
			max_free = rlun;
	}

	return max_free;
}
Example #22
0
/* Add a new entry to the cache.  The return value is zero if the function
   call was successful.

   This function must be called with the read-lock held.

   We modify the table but we nevertheless only acquire a read-lock.
   This is ok since we use operations which would be safe even without
   locking, given that the `prune_cache' function never runs.  Using
   the readlock reduces the chance of conflicts.  */
int
cache_add (int type, const void *key, size_t len, struct datahead *packet,
	   bool first, struct database_dyn *table,
	   uid_t owner, bool prune_wakeup)
{
  if (__glibc_unlikely (debug_level >= 2))
    {
      const char *str;
      char buf[INET6_ADDRSTRLEN + 1];
      if (type == GETHOSTBYADDR || type == GETHOSTBYADDRv6)
	str = inet_ntop (type == GETHOSTBYADDR ? AF_INET : AF_INET6,
			 key, buf, sizeof (buf));
      else
	str = key;

      dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
	       str, serv2str[type], dbnames[table - dbs],
	       first ? _(" (first)") : "");
    }

  unsigned long int hash = __nis_hash (key, len) % table->head->module;
  struct hashentry *newp;

  newp = mempool_alloc (table, sizeof (struct hashentry), 0);
  /* If we cannot allocate memory, just do not do anything.  */
  if (newp == NULL)
    {
      /* If necessary mark the entry as unusable so that lookups will
	 not use it.  */
      if (first)
	packet->usable = false;

      return -1;
    }

  newp->type = type;
  newp->first = first;
  newp->len = len;
  newp->key = (char *) key - table->data;
  assert (newp->key + newp->len <= table->head->first_free);
  newp->owner = owner;
  newp->packet = (char *) packet - table->data;
  assert ((newp->packet & BLOCK_ALIGN_M1) == 0);

  /* Put the new entry in the first position.  */
  do
    newp->next = table->head->array[hash];
  while (atomic_compare_and_exchange_bool_rel (&table->head->array[hash],
					       (ref_t) ((char *) newp
							- table->data),
					       (ref_t) newp->next));

  /* Update the statistics.  */
  if (packet->notfound)
    ++table->head->negmiss;
  else if (first)
    ++table->head->posmiss;

  /* We depend on this value being correct and at least as high as the
     real number of entries.  */
  atomic_increment (&table->head->nentries);

  /* It does not matter that we are not loading the just increment
     value, this is just for statistics.  */
  unsigned long int nentries = table->head->nentries;
  if (nentries > table->head->maxnentries)
    table->head->maxnentries = nentries;

  if (table->persistent)
    // XXX async OK?
    msync ((void *) table->head,
	   (char *) &table->head->array[hash] - (char *) table->head
	   + sizeof (ref_t), MS_ASYNC);

  /* We do not have to worry about the pruning thread if we are
     re-adding the data since this is done by the pruning thread.  We
     also do not have to do anything in case this is not the first
     time the data is entered since different data heads all have the
     same timeout.  */
  if (first && prune_wakeup)
    {
      /* Perhaps the prune thread for the table is not running in a long
	 time.  Wake it if necessary.  */
      pthread_mutex_lock (&table->prune_lock);
      time_t next_wakeup = table->wakeup_time;
      bool do_wakeup = false;
      if (next_wakeup > packet->timeout + CACHE_PRUNE_INTERVAL)
	{
	  table->wakeup_time = packet->timeout;
	  do_wakeup = true;
	}
      pthread_mutex_unlock (&table->prune_lock);
      if (do_wakeup)
	pthread_cond_signal (&table->prune_cond);
    }

  return 0;
}
Example #23
0
static int disc_read(struct triton_md_handler_t *h)
{
	struct disc_net *net = container_of(h, typeof(*net), hnd);
	uint8_t *pack = NULL;
	struct ethhdr *ethhdr;
	struct pppoe_hdr *hdr;
	int n;
	struct sockaddr_ll src;
	socklen_t slen = sizeof(src);

	while (1) {
		if (!pack)
			pack = mempool_alloc(pkt_pool);

		n = net->net->recvfrom(h->fd, pack + 4, ETHER_MAX_LEN, MSG_DONTWAIT, (struct sockaddr *)&src, &slen);

		if (n < 0) {
			if (errno == EAGAIN)
				break;

			if (errno == ENETDOWN) {
				notify_down(net, src.sll_ifindex);
				continue;
			}

			log_error("pppoe: disc: read: %s\n", strerror(errno));
			continue;
		}

		ethhdr = (struct ethhdr *)(pack + 4);
		hdr = (struct pppoe_hdr *)(pack + 4 + ETH_HLEN);

		if (n < ETH_HLEN + sizeof(*hdr)) {
			if (conf_verbose)
				log_warn("pppoe: short packet received (%i)\n", n);
			continue;
		}

		if (mac_filter_check(ethhdr->h_source)) {
			__sync_add_and_fetch(&stat_filtered, 1);
			continue;
		}

		//if (memcmp(ethhdr->h_dest, bc_addr, ETH_ALEN) && memcmp(ethhdr->h_dest, serv->hwaddr, ETH_ALEN))
		//	continue;

		if (!memcmp(ethhdr->h_source, bc_addr, ETH_ALEN)) {
			if (conf_verbose)
				log_warn("pppoe: discarding packet (host address is broadcast)\n");
			continue;
		}

		if ((ethhdr->h_source[0] & 1) != 0) {
			if (conf_verbose)
				log_warn("pppoe: discarding packet (host address is not unicast)\n");
			continue;
		}

		if (n < ETH_HLEN + sizeof(*hdr) + ntohs(hdr->length)) {
			if (conf_verbose)
				log_warn("pppoe: short packet received\n");
			continue;
		}

		if (hdr->ver != 1) {
			if (conf_verbose)
				log_warn("pppoe: discarding packet (unsupported version %i)\n", hdr->ver);
			continue;
		}

		if (hdr->type != 1) {
			if (conf_verbose)
				log_warn("pppoe: discarding packet (unsupported type %i)\n", hdr->type);
		}

		if (forward(net, src.sll_ifindex, pack, n))
			pack = NULL;
	}

	mempool_free(pack);

	return 0;
}
Example #24
0
static inline struct dm_io *alloc_io(struct mapped_device *md)
{
	return mempool_alloc(md->io_pool, GFP_NOIO);
}
Example #25
0
/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from.
 *
 * Description:
 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 *   backed by the @bs's mempool.
 *
 *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
 *   able to allocate a bio. This is due to the mempool guarantees. To make this
 *   work, callers must never allocate more than 1 bio at a time from this pool.
 *   Callers that need to allocate more than 1 bio must always submit the
 *   previously allocated bio for IO before attempting to allocate a new one.
 *   Failure to do so can cause deadlocks under memory pressure.
 *
 *   Note that when running under generic_make_request() (i.e. any block
 *   driver), bios are not submitted until after you return - see the code in
 *   generic_make_request() that converts recursion into iteration, to prevent
 *   stack overflows.
 *
 *   This would normally mean allocating multiple bios under
 *   generic_make_request() would be susceptible to deadlocks, but we have
 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 *   thread.
 *
 *   However, we do not guarantee forward progress for allocations from other
 *   mempools. Doing multiple allocations from the same mempool under
 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 *   for per bio allocations.
 *
 *   RETURNS:
 *   Pointer to new bio on success, NULL on failure.
 */
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
	gfp_t saved_gfp = gfp_mask;
	unsigned front_pad;
	unsigned inline_vecs;
	unsigned long idx = BIO_POOL_NONE;
	struct bio_vec *bvl = NULL;
	struct bio *bio;
	void *p;

	if (!bs) {
		if (nr_iovecs > UIO_MAXIOV)
			return NULL;

		p = kmalloc(sizeof(struct bio) +
			    nr_iovecs * sizeof(struct bio_vec),
			    gfp_mask);
		front_pad = 0;
		inline_vecs = nr_iovecs;
	} else {
		/* should not use nobvec bioset for nr_iovecs > 0 */
		if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
			return NULL;
		/*
		 * generic_make_request() converts recursion to iteration; this
		 * means if we're running beneath it, any bios we allocate and
		 * submit will not be submitted (and thus freed) until after we
		 * return.
		 *
		 * This exposes us to a potential deadlock if we allocate
		 * multiple bios from the same bio_set() while running
		 * underneath generic_make_request(). If we were to allocate
		 * multiple bios (say a stacking block driver that was splitting
		 * bios), we would deadlock if we exhausted the mempool's
		 * reserve.
		 *
		 * We solve this, and guarantee forward progress, with a rescuer
		 * workqueue per bio_set. If we go to allocate and there are
		 * bios on current->bio_list, we first try the allocation
		 * without __GFP_WAIT; if that fails, we punt those bios we
		 * would be blocking to the rescuer workqueue before we retry
		 * with the original gfp_flags.
		 */

		if (current->bio_list && !bio_list_empty(current->bio_list))
			gfp_mask &= ~__GFP_WAIT;

		p = mempool_alloc(bs->bio_pool, gfp_mask);
		if (!p && gfp_mask != saved_gfp) {
			punt_bios_to_rescuer(bs);
			gfp_mask = saved_gfp;
			p = mempool_alloc(bs->bio_pool, gfp_mask);
		}

		front_pad = bs->front_pad;
		inline_vecs = BIO_INLINE_VECS;
	}

	if (unlikely(!p))
		return NULL;

	bio = p + front_pad;
	bio_init(bio);

	if (nr_iovecs > inline_vecs) {
		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
		if (!bvl && gfp_mask != saved_gfp) {
			punt_bios_to_rescuer(bs);
			gfp_mask = saved_gfp;
			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
		}

		if (unlikely(!bvl))
			goto err_free;

		bio->bi_flags |= 1 << BIO_OWNS_VEC;
	} else if (nr_iovecs) {
		bvl = bio->bi_inline_vecs;
	}

	bio->bi_pool = bs;
	bio->bi_flags |= idx << BIO_POOL_OFFSET;
	bio->bi_max_vecs = nr_iovecs;
	bio->bi_io_vec = bvl;
	return bio;

err_free:
	mempool_free(p, bs->bio_pool);
	return NULL;
}
Example #26
0
File: recv.c Project: weizn11/C
int recv_data_from_client(IO_OPERATION_DATA *pIOData)
{
    int recvSize;
    char *recvPointer=NULL;
    MAIN_PACKET *pPacket=NULL;
    P2P_CONN_INFO *pConnInfo=NULL;
    P2P_CREATE_CONN_THREAD_PARA *pP2PCreateConnThreadPara=NULL;
    pthread_t threadID;

    memset(&threadID,NULL,sizeof(pthread_t));

    if(pIOData->recvBuffer==NULL)
    {
        pIOData->recvSize=0;
        pIOData->recvBuffer=(char *)mempool_alloc(Mempool_RecvBuffer);
        if(pIOData->recvBuffer==NULL) return 0;
        memset(pIOData->recvBuffer,NULL,sizeof(MAIN_PACKET));
        recvPointer=pIOData->recvBuffer;
    }
    recvPointer=pIOData->recvBuffer+pIOData->recvSize;

    recvSize=recv(pIOData->Socket,recvPointer,sizeof(MAIN_PACKET)-pIOData->recvSize,0);
    if(recvSize<=0)
    {
        //socket disable
        printf("recv failed!Error code:%d\n",errno);
        return -2;
    }
    if(recvSize+pIOData->recvSize>sizeof(MAIN_PACKET))
    {
        mempool_free(Mempool_RecvBuffer,pIOData->recvBuffer);
        pIOData->recvBuffer=NULL;
        pIOData->recvSize=0;
        return -3;
    }
    pIOData->recvSize+=recvSize;

    if(pIOData->recvSize==sizeof(MAIN_PACKET))
    {
        pPacket=(MAIN_PACKET *)pIOData->recvBuffer;
        switch(pPacket->proto)
        {
        case 1:
            printf("recv p2p conn request\n");
            pConnInfo=(P2P_CONN_INFO *)pPacket->data;
            pP2PCreateConnThreadPara=(P2P_CREATE_CONN_THREAD_PARA *)malloc(sizeof(P2P_CREATE_CONN_THREAD_PARA));
            if(pP2PCreateConnThreadPara==NULL) break;
            memset(pP2PCreateConnThreadPara,NULL,sizeof(P2P_CREATE_CONN_THREAD_PARA));
            pP2PCreateConnThreadPara->connInfo=*pConnInfo;
            pP2PCreateConnThreadPara->index=pIOData->index;
            pP2PCreateConnThreadPara->pIONode=pIOData->pIONode;
            if(pthread_create(&threadID,NULL,p2p_create_conn_thread,(void *)pP2PCreateConnThreadPara)!=0)
            {
                //create thread failed.
            }
            break;
        default:
            break;
        }
        mempool_free(Mempool_RecvBuffer,pIOData->recvBuffer);
        pIOData->recvBuffer=NULL;
        pIOData->recvSize=0;
    }

    return recvSize;
}
Example #27
0
struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
			   mempool_t *pool)
{
	struct bio_vec *bvl;

	/*
	 * see comment near bvec_array define!
	 */
	switch (nr) {
	case 1:
		*idx = 0;
		break;
	case 2 ... 4:
		*idx = 1;
		break;
	case 5 ... 16:
		*idx = 2;
		break;
	case 17 ... 64:
		*idx = 3;
		break;
	case 65 ... 128:
		*idx = 4;
		break;
	case 129 ... BIO_MAX_PAGES:
		*idx = 5;
		break;
	default:
		return NULL;
	}

	/*
	 * idx now points to the pool we want to allocate from. only the
	 * 1-vec entry pool is mempool backed.
	 */
	if (*idx == BIOVEC_MAX_IDX) {
fallback:
		bvl = mempool_alloc(pool, gfp_mask);
	} else {
		struct biovec_slab *bvs = bvec_slabs + *idx;
		gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);

		/*
		 * Make this allocation restricted and don't dump info on
		 * allocation failures, since we'll fallback to the mempool
		 * in case of failure.
		 */
		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;

		/*
		 * Try a slab allocation. If this fails and __GFP_WAIT
		 * is set, retry with the 1-entry mempool
		 */
		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
		if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
			*idx = BIOVEC_MAX_IDX;
			goto fallback;
		}
	}

	return bvl;
}
Example #28
0
int main(void)
{
    MemPool test;
    MemBucket *bucks[SIZE];
    MemBucket *bucket = NULL;
    int i;

    //char *stuffs[4] = { "eenie", "meenie", "minie", "moe" };
    char *stuffs2[36] =
    {   "1eenie", "2meenie", "3minie", " 4moe",
        "1xxxxx", "2yyyyyy", "3zzzzz", " 4qqqq",
        "1eenie", "2meenie", "3minie", " 4moe",
        "1eenie", "2meenie", "3minie", " 4moe",
        "1eenie", "2meenie", "3minie", " 4moe",
        "1eenie", "2meenie", "3minie", " 4moe",
        "1eenie", "2meenie", "3minie", " 4moe",
        "1eenie", "2meenie", "3minie", " 4moe",
        "1eenie", "2meenie", "3minie", " 4moe"
    };

    if(mempool_init(&test, 36, 256))
    {
        printf("error in mempool initialization\n");
    }

    for(i = 0; i < 36; i++)
    {
        if((bucks[i] = mempool_alloc(&test)) == NULL)
        {
            printf("error in mempool_alloc: i=%d\n", i);
            continue;
        }

        bucket = bucks[i];

        bucket->data = strncpy(bucket->data, stuffs2[i], 256);
        printf("bucket->key: %p\n", bucket->key);
        printf("bucket->data: %s\n", (char *) bucket->data);
    }

    for(i = 0; i < 2; i++)
    {
        mempool_free(&test, bucks[i]);
        bucks[i] = NULL;
    }

    for(i = 0; i < 14; i++)
    {
        if((bucks[i] = mempool_alloc(&test)) == NULL)
        {
            printf("error in mempool_alloc: i=%d\n", i);
            continue;
        }

        bucket = bucks[i];

        bucket->data = strncpy(bucket->data, stuffs2[i], 256);
        printf("bucket->key: %p\n", bucket->key);
        printf("bucket->data: %s\n", (char *) bucket->data);
    }

    printf("free: %u, used: %u\n", test.free_list.size, test.used_list.size);


    return 0;
}