static int chap_server_compute_md5(
	struct iscsi_conn *conn,
	struct iscsi_node_auth *auth,
	char *nr_in_ptr,
	char *nr_out_ptr,
	unsigned int *nr_out_len)
{
	char *endptr;
	unsigned long id;
	unsigned char id_as_uchar;
	unsigned char digest[MD5_SIGNATURE_SIZE];
	unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
	unsigned char identifier[10], *challenge = NULL;
	unsigned char *challenge_binhex = NULL;
	unsigned char client_digest[MD5_SIGNATURE_SIZE];
	unsigned char server_digest[MD5_SIGNATURE_SIZE];
	unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
	size_t compare_len;
	struct iscsi_chap *chap = conn->auth_protocol;
	struct crypto_hash *tfm;
	struct hash_desc desc;
	struct scatterlist sg;
	int auth_ret = -1, ret, challenge_len;

	memset(identifier, 0, 10);
	memset(chap_n, 0, MAX_CHAP_N_SIZE);
	memset(chap_r, 0, MAX_RESPONSE_LENGTH);
	memset(digest, 0, MD5_SIGNATURE_SIZE);
	memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
	memset(client_digest, 0, MD5_SIGNATURE_SIZE);
	memset(server_digest, 0, MD5_SIGNATURE_SIZE);

	challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
	if (!challenge) {
		pr_err("Unable to allocate challenge buffer\n");
		goto out;
	}

	challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
	if (!challenge_binhex) {
		pr_err("Unable to allocate challenge_binhex buffer\n");
		goto out;
	}
	/*
	 * Extract CHAP_N.
	 */
	if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
				&type) < 0) {
		pr_err("Could not find CHAP_N.\n");
		goto out;
	}
	if (type == HEX) {
		pr_err("Could not find CHAP_N.\n");
		goto out;
	}

	/* Include the terminating NULL in the compare */
	compare_len = strlen(auth->userid) + 1;
	if (strncmp(chap_n, auth->userid, compare_len) != 0) {
		pr_err("CHAP_N values do not match!\n");
		goto out;
	}
	pr_debug("[server] Got CHAP_N=%s\n", chap_n);
	/*
	 * Extract CHAP_R.
	 */
	if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
				&type) < 0) {
		pr_err("Could not find CHAP_R.\n");
		goto out;
	}
	if (type != HEX) {
		pr_err("Could not find CHAP_R.\n");
		goto out;
	}

	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));

	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		pr_err("Unable to allocate struct crypto_hash\n");
		goto out;
	}
	desc.tfm = tfm;
	desc.flags = 0;

	ret = crypto_hash_init(&desc);
	if (ret < 0) {
		pr_err("crypto_hash_init() failed\n");
		crypto_free_hash(tfm);
		goto out;
	}

	sg_init_one(&sg, &chap->id, 1);
	ret = crypto_hash_update(&desc, &sg, 1);
	if (ret < 0) {
		pr_err("crypto_hash_update() failed for id\n");
		crypto_free_hash(tfm);
		goto out;
	}

	sg_init_one(&sg, &auth->password, strlen(auth->password));
	ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
	if (ret < 0) {
		pr_err("crypto_hash_update() failed for password\n");
		crypto_free_hash(tfm);
		goto out;
	}

	sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
	ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
	if (ret < 0) {
		pr_err("crypto_hash_update() failed for challenge\n");
		crypto_free_hash(tfm);
		goto out;
	}

	ret = crypto_hash_final(&desc, server_digest);
	if (ret < 0) {
		pr_err("crypto_hash_final() failed for server digest\n");
		crypto_free_hash(tfm);
		goto out;
	}
	crypto_free_hash(tfm);

	chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
	pr_debug("[server] MD5 Server Digest: %s\n", response);

	if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
		pr_debug("[server] MD5 Digests do not match!\n\n");
		goto out;
	} else
		pr_debug("[server] MD5 Digests match, CHAP connetication"
				" successful.\n\n");
	/*
	 * One way authentication has succeeded, return now if mutual
	 * authentication is not enabled.
	 */
	if (!auth->authenticate_target) {
		kfree(challenge);
		kfree(challenge_binhex);
		return 0;
	}
	/*
	 * Get CHAP_I.
	 */
	if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
		pr_err("Could not find CHAP_I.\n");
		goto out;
	}

	if (type == HEX)
		id = simple_strtoul(&identifier[2], &endptr, 0);
	else
		id = simple_strtoul(identifier, &endptr, 0);
	if (id > 255) {
		pr_err("chap identifier: %lu greater than 255\n", id);
		goto out;
	}
	/*
	 * RFC 1994 says Identifier is no more than octet (8 bits).
	 */
	pr_debug("[server] Got CHAP_I=%lu\n", id);
	/*
	 * Get CHAP_C.
	 */
	if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
			challenge, &type) < 0) {
		pr_err("Could not find CHAP_C.\n");
		goto out;
	}

	if (type != HEX) {
		pr_err("Could not find CHAP_C.\n");
		goto out;
	}
	pr_debug("[server] Got CHAP_C=%s\n", challenge);
	challenge_len = chap_string_to_hex(challenge_binhex, challenge,
				strlen(challenge));
	if (!challenge_len) {
		pr_err("Unable to convert incoming challenge\n");
		goto out;
	}
	/*
	 * During mutual authentication, the CHAP_C generated by the
	 * initiator must not match the original CHAP_C generated by
	 * the target.
	 */
	if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
		pr_err("initiator CHAP_C matches target CHAP_C, failing"
		       " login attempt\n");
		goto out;
	}
	/*
	 * Generate CHAP_N and CHAP_R for mutual authentication.
	 */
	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		pr_err("Unable to allocate struct crypto_hash\n");
		goto out;
	}
	desc.tfm = tfm;
	desc.flags = 0;

	ret = crypto_hash_init(&desc);
	if (ret < 0) {
		pr_err("crypto_hash_init() failed\n");
		crypto_free_hash(tfm);
		goto out;
	}

	/* To handle both endiannesses */
	id_as_uchar = id;
	sg_init_one(&sg, &id_as_uchar, 1);
	ret = crypto_hash_update(&desc, &sg, 1);
	if (ret < 0) {
		pr_err("crypto_hash_update() failed for id\n");
		crypto_free_hash(tfm);
		goto out;
	}

	sg_init_one(&sg, auth->password_mutual,
				strlen(auth->password_mutual));
	ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
	if (ret < 0) {
		pr_err("crypto_hash_update() failed for"
				" password_mutual\n");
		crypto_free_hash(tfm);
		goto out;
	}
	/*
	 * Convert received challenge to binary hex.
	 */
	sg_init_one(&sg, challenge_binhex, challenge_len);
	ret = crypto_hash_update(&desc, &sg, challenge_len);
	if (ret < 0) {
		pr_err("crypto_hash_update() failed for ma challenge\n");
		crypto_free_hash(tfm);
		goto out;
	}

	ret = crypto_hash_final(&desc, digest);
	if (ret < 0) {
		pr_err("crypto_hash_final() failed for ma digest\n");
		crypto_free_hash(tfm);
		goto out;
	}
	crypto_free_hash(tfm);
	/*
	 * Generate CHAP_N and CHAP_R.
	 */
	*nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
	*nr_out_len += 1;
	pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
	/*
	 * Convert response from binary hex to ascii hext.
	 */
	chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
	*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
			response);
	*nr_out_len += 1;
	pr_debug("[server] Sending CHAP_R=0x%s\n", response);
	auth_ret = 0;
out:
	kfree(challenge);
	kfree(challenge_binhex);
	return auth_ret;
}
Esempio n. 2
0
/**
 * iscsi_sw_tcp_xmit_segment - transmit segment
 * @tcp_conn: the iSCSI TCP connection
 * @segment: the buffer to transmnit
 *
 * This function transmits as much of the buffer as
 * the network layer will accept, and returns the number of
 * bytes transmitted.
 *
 * If CRC hashing is enabled, the function will compute the
 * hash as it goes. When the entire segment has been transmitted,
 * it will retrieve the hash value and send it as well.
 */
static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
				     struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sk = tcp_sw_conn->sock;
	unsigned int copied = 0;
	int r = 0;

	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
		struct scatterlist *sg;
		unsigned int offset, copy;
		int flags = 0;

		r = 0;
		offset = segment->copied;
		copy = segment->size - offset;

		if (segment->total_copied + segment->size < segment->total_size)
			flags |= MSG_MORE;

		/* Use sendpage if we can; else fall back to sendmsg */
		if (!segment->data) {
			sg = segment->sg;
			offset += segment->sg_offset + sg->offset;
			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
						  copy, flags);
		} else {
			struct msghdr msg = { .msg_flags = flags };
			struct kvec iov = {
				.iov_base = segment->data + offset,
				.iov_len = copy
			};

			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
		}

		if (r < 0) {
			iscsi_tcp_segment_unmap(segment);
			return r;
		}
		copied += r;
	}
	return copied;
}

/**
 * iscsi_sw_tcp_xmit - TCP transmit
 **/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
	unsigned int consumed = 0;
	int rc = 0;

	while (1) {
		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
		/*
		 * We may not have been able to send data because the conn
		 * is getting stopped. libiscsi will know so propagate err
		 * for it to do the right thing.
		 */
		if (rc == -EAGAIN)
			return rc;
		else if (rc < 0) {
			rc = ISCSI_ERR_XMIT_FAILED;
			goto error;
		} else if (rc == 0)
			break;

		consumed += rc;

		if (segment->total_copied >= segment->total_size) {
			if (segment->done != NULL) {
				rc = segment->done(tcp_conn, segment);
				if (rc != 0)
					goto error;
			}
		}
	}

	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);

	conn->txdata_octets += consumed;
	return consumed;

error:
	/* Transmit error. We could initiate error recovery
	 * here. */
	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, rc);
	return -EIO;
}

/**
 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
 */
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;

	return segment->total_copied - segment->total_size;
}

static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
	struct iscsi_conn *conn = task->conn;
	int rc;

	while (iscsi_sw_tcp_xmit_qlen(conn)) {
		rc = iscsi_sw_tcp_xmit(conn);
		if (rc == 0)
			return -EAGAIN;
		if (rc < 0)
			return rc;
	}

	return 0;
}

/*
 * This is called when we're done sending the header.
 * Simply copy the data_segment to the send segment, and return.
 */
static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
				      struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
			 "Header done. Next segment size %u total_size %u\n",
			 tcp_sw_conn->out.segment.size,
			 tcp_sw_conn->out.segment.total_size);
	return 0;
}

static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
				       size_t hdrlen)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
			 "digest enabled" : "digest disabled");

	/* Clear the data segment - needs to be filled in by the
	 * caller using iscsi_tcp_send_data_prep() */
	memset(&tcp_sw_conn->out.data_segment, 0,
	       sizeof(struct iscsi_segment));

	/* If header digest is enabled, compute the CRC and
	 * place the digest into the same buffer. We make
	 * sure that both iscsi_tcp_task and mtask have
	 * sufficient room.
	 */
	if (conn->hdrdgst_en) {
		iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
				      hdr + hdrlen);
		hdrlen += ISCSI_DIGEST_SIZE;
	}

	/* Remember header pointer for later, when we need
	 * to decide whether there's a payload to go along
	 * with the header. */
	tcp_sw_conn->out.hdr = hdr;

	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
				  iscsi_sw_tcp_send_hdr_done, NULL);
}

/*
 * Prepare the send buffer for the payload data.
 * Padding and checksumming will all be taken care
 * of by the iscsi_segment routines.
 */
static int
iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
			    unsigned int count, unsigned int offset,
			    unsigned int len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
			 conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
				     sg, count, offset, len,
				     NULL, tx_hash);
}

static void
iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
				   size_t len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
				data, len, NULL, tx_hash);
}

static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
				 unsigned int offset, unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	int err = 0;

	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);

	if (!count)
		return 0;

	if (!task->sc)
		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
	else {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);

		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
						  sdb->table.nents, offset,
						  count);
	}

	if (err) {
		/* got invalid offset/len */
		return -EIO;
	}
	return 0;
}

static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;

	task->hdr = task->dd_data + sizeof(*tcp_task);
	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
	return 0;
}

static struct iscsi_cls_conn *
iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
			 uint32_t conn_idx)
{
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;

	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
					conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
	tcp_conn = conn->dd_data;
	tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->tx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
		goto free_conn;

	tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->rx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
		goto free_tx_tfm;
	tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;

	return cls_conn;

free_tx_tfm:
	crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
free_conn:
	iscsi_conn_printk(KERN_ERR, conn,
			  "Could not create connection due to crc32c "
			  "loading error. Make sure the crc32c "
			  "module is built as a module or into the "
			  "kernel\n");
	iscsi_tcp_conn_teardown(cls_conn);
	return NULL;
}

static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	if (!sock)
		return;

	sock_hold(sock->sk);
	iscsi_sw_tcp_conn_restore_callbacks(conn);
	sock_put(sock->sk);

	spin_lock_bh(&session->lock);
	tcp_sw_conn->sock = NULL;
	spin_unlock_bh(&session->lock);
	sockfd_put(sock);
}

static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	iscsi_sw_tcp_release_conn(conn);

	if (tcp_sw_conn->tx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
	if (tcp_sw_conn->rx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->rx_hash.tfm);

	iscsi_tcp_conn_teardown(cls_conn);
}

static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	/* userspace may have goofed up and not bound us */
	if (!sock)
		return;

	sock->sk->sk_err = EIO;
	wake_up_interruptible(sk_sleep(sock->sk));

	/* stop xmit side */
	iscsi_suspend_tx(conn);

	/* stop recv side and release socket */
	iscsi_sw_tcp_release_conn(conn);

	iscsi_conn_stop(cls_conn, flag);
}

static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
		       int is_leading)
{
	struct iscsi_session *session = cls_session->dd_data;
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;

	/* lookup for existing socket */
	sock = sockfd_lookup((int)transport_eph, &err);
	if (!sock) {
		iscsi_conn_printk(KERN_ERR, conn,
				  "sockfd_lookup failed %d\n", err);
		return -EEXIST;
	}

	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
		goto free_socket;

	spin_lock_bh(&session->lock);
	/* bind iSCSI connection and socket */
	tcp_sw_conn->sock = sock;
	spin_unlock_bh(&session->lock);

	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = 1;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;

	iscsi_sw_tcp_conn_set_callbacks(conn);
	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;

free_socket:
	sockfd_put(sock);
	return err;
}

static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf,
				       int buflen)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	int value;

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		break;
	case ISCSI_PARAM_DATADGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		tcp_sw_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
		break;
	case ISCSI_PARAM_MAX_R2T:
		sscanf(buf, "%d", &value);
		if (value <= 0 || !is_power_of_2(value))
			return -EINVAL;
		if (session->max_r2t == value)
			break;
		iscsi_tcp_r2tpool_free(session);
		iscsi_set_param(cls_conn, param, buf, buflen);
		if (iscsi_tcp_r2tpool_alloc(session))
			return -ENOMEM;
		break;
	default:
		return iscsi_set_param(cls_conn, param, buf, buflen);
	}

	return 0;
}

static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sockaddr_in6 addr;
	int rc, len;

	switch(param) {
	case ISCSI_PARAM_CONN_PORT:
	case ISCSI_PARAM_CONN_ADDRESS:
		spin_lock_bh(&conn->session->lock);
		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
			spin_unlock_bh(&conn->session->lock);
			return -ENOTCONN;
		}
		rc = kernel_getpeername(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&conn->session->lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_conn_get_param(cls_conn, param, buf);
	}

	return 0;
}

static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
				       enum iscsi_host_param param, char *buf)
{
	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
	struct iscsi_session *session = tcp_sw_host->session;
	struct iscsi_conn *conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct sockaddr_in6 addr;
	int rc, len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		spin_lock_bh(&session->lock);
		conn = session->leadconn;
		if (!conn) {
			spin_unlock_bh(&session->lock);
			return -ENOTCONN;
		}
		tcp_conn = conn->dd_data;

		tcp_sw_conn = tcp_conn->dd_data;
		if (!tcp_sw_conn->sock) {
			spin_unlock_bh(&session->lock);
			return -ENOTCONN;
		}

		rc = kernel_getsockname(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&session->lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_host_get_param(shost, param, buf);
	}

	return 0;
}

static void
iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
			    struct iscsi_stats *stats)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;

	iscsi_tcp_conn_get_stats(cls_conn, stats);
}

static struct iscsi_cls_session *
iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
			    uint16_t qdepth, uint32_t initial_cmdsn)
{
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	struct iscsi_sw_tcp_host *tcp_sw_host;
	struct Scsi_Host *shost;

	if (ep) {
		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
		return NULL;
	}

	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
				 sizeof(struct iscsi_sw_tcp_host), 1);
	if (!shost)
		return NULL;
	shost->transportt = iscsi_sw_tcp_scsi_transport;
	shost->cmd_per_lun = qdepth;
	shost->max_lun = iscsi_max_lun;
	shost->max_id = 0;
	shost->max_channel = 0;
	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;

	if (iscsi_host_add(shost, NULL))
		goto free_host;

	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
					  cmds_max, 0,
					  sizeof(struct iscsi_tcp_task) +
					  sizeof(struct iscsi_sw_tcp_hdrbuf),
					  initial_cmdsn, 0);
	if (!cls_session)
		goto remove_host;
	session = cls_session->dd_data;
	tcp_sw_host = iscsi_host_priv(shost);
	tcp_sw_host->session = session;

	shost->can_queue = session->scsi_cmds_max;
	if (iscsi_tcp_r2tpool_alloc(session))
		goto remove_session;
	return cls_session;

remove_session:
	iscsi_session_teardown(cls_session);
remove_host:
	iscsi_host_remove(shost);
free_host:
	iscsi_host_free(shost);
	return NULL;
}

static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);

	iscsi_tcp_r2tpool_free(cls_session->dd_data);
	iscsi_session_teardown(cls_session);

	iscsi_host_remove(shost);
	iscsi_host_free(shost);
}

static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
	set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
	return 0;
}
Esempio n. 3
0
static int esp_init_state(struct xfrm_state *x)
{
	struct esp_data *esp = NULL;
	struct crypto_blkcipher *tfm;

	/* null auth and encryption can have zero length keys */
	if (x->aalg) {
		if (x->aalg->alg_key_len > 512)
			goto error;
	}
	if (x->ealg == NULL)
		goto error;

	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
	if (esp == NULL)
		return -ENOMEM;

	if (x->aalg) {
		struct xfrm_algo_desc *aalg_desc;
		struct crypto_hash *hash;

		esp->auth.key = x->aalg->alg_key;
		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
		hash = crypto_alloc_hash(x->aalg->alg_name, 0,
					 CRYPTO_ALG_ASYNC);
		if (IS_ERR(hash))
			goto error;

		esp->auth.tfm = hash;
		if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
			goto error;

		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
		BUG_ON(!aalg_desc);

		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
		    crypto_hash_digestsize(hash)) {
			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
				 x->aalg->alg_name,
				 crypto_hash_digestsize(hash),
				 aalg_desc->uinfo.auth.icv_fullbits/8);
			goto error;
		}

		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;

		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
		if (!esp->auth.work_icv)
			goto error;
	}
	esp->conf.key = x->ealg->alg_key;
	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
	tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;
	esp->conf.tfm = tfm;
	esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
	esp->conf.padlen = 0;
	if (esp->conf.ivlen) {
		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
		if (unlikely(esp->conf.ivec == NULL))
			goto error;
		esp->conf.ivinitted = 0;
	}
	if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
		goto error;
	x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct iphdr);
	if (x->encap) {
		struct xfrm_encap_tmpl *encap = x->encap;

		switch (encap->encap_type) {
		default:
			goto error;
		case UDP_ENCAP_ESPINUDP:
			x->props.header_len += sizeof(struct udphdr);
			break;
		case UDP_ENCAP_ESPINUDP_NON_IKE:
			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
			break;
		}
	}
	x->data = esp;
	x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len;
	return 0;

error:
	x->data = esp;
	esp_destroy(x);
	x->data = NULL;
	return -EINVAL;
}
Esempio n. 4
0
static int ah_init_state(struct xfrm_state *x)
{
	struct ah_data *ahp = NULL;
	struct xfrm_algo_desc *aalg_desc;
	struct crypto_hash *tfm;

	if (!x->aalg)
		goto error;

	/* null auth can use a zero length key */
	if (x->aalg->alg_key_len > 512)
		goto error;

	if (x->encap)
		goto error;

	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
	if (ahp == NULL)
		return -ENOMEM;

	ahp->key = x->aalg->alg_key;
	ahp->key_len = (x->aalg->alg_key_len+7)/8;
	tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;

	ahp->tfm = tfm;
	if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
		goto error;

	/*
	 * Lookup the algorithm description maintained by xfrm_algo,
	 * verify crypto transform properties, and store information
	 * we need for AH processing.  This lookup cannot fail here
	 * after a successful crypto_alloc_hash().
	 */
	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
	BUG_ON(!aalg_desc);

	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
	    crypto_hash_digestsize(tfm)) {
		printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
		       x->aalg->alg_name, crypto_hash_digestsize(tfm),
		       aalg_desc->uinfo.auth.icv_fullbits/8);
		goto error;
	}

	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
	ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;

	BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);

	ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
	if (!ahp->work_icv)
		goto error;

	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len);
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct iphdr);
	x->data = ahp;

	return 0;

error:
	if (ahp) {
		kfree(ahp->work_icv);
		crypto_free_hash(ahp->tfm);
		kfree(ahp);
	}
	return -EINVAL;
}
static u32 qfprom_secdat_read(void)
{
  struct file *fp;
  int cnt=0;
  u32 ret = RET_OK;
  mm_segment_t old_fs=get_fs();

#ifdef CONFIG_LGE_QFPROM_SECHASH
  struct crypto_hash *tfm = NULL;
  struct hash_desc desc;
  struct scatterlist sg[FUSEPROV_SEC_STRUCTURE_MAX_NUM];
  char result[32]={0};
  unsigned char temp_buf[4]={0};
  unsigned char config_hash[32]={0};
  int i=0;
  int temp=0;
  int sg_idx=0;
  int segment_size=0;
#else
  printk(KERN_ERR "[QFUSE]%s : CONFIG_LGE_QFPROM_SECHASH is not exist\n", __func__);
  return RET_ERR;
#endif

  printk(KERN_INFO "[QFUSE]%s start\n", __func__);

  mutex_lock(&secdat_lock);
  if(secdat.pentry != NULL){
    printk(KERN_INFO "[QFUSE]%s : secdata file already loaded \n", __func__);
    mutex_unlock(&secdat_lock);
    return RET_OK;
  }


  set_fs(KERNEL_DS);
  fp=filp_open(SEC_PATH, O_RDONLY, S_IRUSR);

  if(IS_ERR(fp)){
    int temp_err=0;
    temp_err =PTR_ERR(fp);
    printk(KERN_ERR "[QFUSE]%s : secdata file open error : %d\n", __func__, temp_err);
    ret = RET_ERR;
    goto err;
  }

  tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
  if (IS_ERR(tfm)){
    printk(KERN_ERR "[QFUSE]%s :hash alloc error\n", __func__);
    ret = RET_ERR;
    goto err_mem;
  }
  desc.tfm=tfm;
  desc.flags=0;

  if (crypto_hash_init(&desc) != 0){
    printk(KERN_ERR "[QFUSE]%s : hash init error\n", __func__);
    ret = RET_ERR;
    goto err_mem;
  }

  sg_init_table(sg, ARRAY_SIZE(sg));

  fp->f_pos = 0;
  cnt = vfs_read(fp,(char*)&secdat.hdr, sizeof(secdat.hdr),&fp->f_pos);
  if(cnt != sizeof(secdat.hdr)){
    printk(KERN_ERR "[QFUSE]%s : hdr read error\n", __func__);
    ret = RET_ERR;
    goto err_mem;
  }
  sg_set_buf(&sg[sg_idx++], (const char*)&secdat.hdr, sizeof(fuseprov_secdat_hdr_type));

  if(secdat.hdr.revision >= 2 && secdat.hdr.segment_number !=0)
  {
    for(i=0; i < secdat.hdr.segment_number ; i++)
    {
      cnt = vfs_read(fp, (char*)&secdat.segment, sizeof(secdat.segment),&fp->f_pos);
      if(cnt != sizeof(secdat.segment)){
        printk(KERN_ERR "[QFUSE]%s : segment read error\n", __func__);
        ret = RET_ERR;
        goto err_mem;
      }
      sg_set_buf(&sg[sg_idx++], (const char*)&secdat.segment, sizeof(fuseprov_secdat_hdr_segment_type));
    }
    segment_size = cnt;
  }

  cnt = vfs_read(fp, (char*)&secdat.list_hdr, sizeof(secdat.list_hdr),&fp->f_pos);
  if(cnt != sizeof(secdat.list_hdr)){
    printk(KERN_ERR "[QFUSE]%s : list_hdr read error\n", __func__);
    ret = RET_ERR;
    goto err_mem;
  }
  sg_set_buf(&sg[sg_idx++], (const char*)&secdat.list_hdr, sizeof(fuseprov_qfuse_list_hdr_type));

  if(secdat.list_hdr.size > 0 && secdat.list_hdr.fuse_count > 0 && secdat.list_hdr.fuse_count <= FUSEPROV_MAX_FUSE_COUNT){
    secdat.pentry = kmalloc(secdat.list_hdr.size, GFP_KERNEL);
    if(secdat.pentry != NULL){
      memset(secdat.pentry, 0, secdat.list_hdr.size);
      cnt = vfs_read(fp, (char *)secdat.pentry, secdat.list_hdr.size,&fp->f_pos);
      if(cnt != secdat.list_hdr.size){
        printk(KERN_ERR "[QFUSE]%s : fuseprov_pentry read error\n", __func__);
        kfree(secdat.pentry);
        ret = RET_ERR;
        goto err_mem;
      }
      sg_set_buf(&sg[sg_idx++], (const char*)secdat.pentry, secdat.list_hdr.size);
    }else{
       printk(KERN_ERR "[QFUSE]%s : kmalloc pentry error\n", __func__);
       ret = RET_ERR;
       goto err_mem;
    }
  }else{
    printk(KERN_ERR "[QFUSE]%s : invalid header", __func__);
    printk(KERN_ERR "[QFUSE]hdr.magic1      : 0x%08X\n", secdat.hdr.magic1);
    printk(KERN_ERR "[QFUSE]   .magic2      : 0x%08X\n", secdat.hdr.magic2);
    printk(KERN_ERR "[QFUSE]   .revision    : 0x%08X\n", secdat.hdr.revision);
    printk(KERN_ERR "[QFUSE]   .size        : 0x%08X\n", secdat.hdr.size);
    printk(KERN_ERR "[QFUSE]   .segment_num : 0x%08X\n", secdat.hdr.segment_number);

    if(secdat.hdr.revision >= 2 && secdat.hdr.segment_number !=0){
      printk(KERN_ERR "[QFUSE]segment.offset    : 0x%08X\n", secdat.segment.offset);
      printk(KERN_ERR "[QFUSE]       .type      : 0x%08X\n", secdat.segment.type);
      printk(KERN_ERR "[QFUSE]       .attribute : 0x%08X\n", secdat.segment.attribute);
    }
    printk(KERN_ERR "[QFUSE]list_hdr.revision : 0x%08X\n", secdat.list_hdr.revision);
    printk(KERN_ERR "[QFUSE]        .size     : 0x%08X\n", secdat.list_hdr.size);
    printk(KERN_ERR "[QFUSE]        .fuse_cnt : 0x%08X, %d\n", secdat.list_hdr.fuse_count, secdat.list_hdr.fuse_count);

    ret = RET_ERR;
    goto err_mem;
  }

  cnt = vfs_read(fp,(char*)&secdat.footer, sizeof(secdat.footer),&fp->f_pos);
  if(cnt != sizeof(secdat.footer)){
    printk(KERN_ERR "[QFUSE]%s : fuseprov_footer read error\n", __func__);
    ret = RET_ERR;
    goto err_mem;
  }
  sg_set_buf(&sg[sg_idx], (const char*)&secdat.footer, sizeof(fuseprov_secdat_footer_type));

  if(crypto_hash_digest(&desc, sg, sizeof(fuseprov_secdat_hdr_type)+segment_size+secdat.hdr.size, result) != 0){
    printk(KERN_ERR "[QFUSE]%s : hash_digest error\n", __func__);
    ret = RET_ERR;
    goto err_mem;
  }

  for(i=0;i<64;i=i+2){
    memset(temp_buf, 0, 4);
    memcpy(temp_buf, CONFIG_LGE_QFPROM_SECHASH+i, 2);
    sscanf(temp_buf, "%x", &temp);
    config_hash[i/2] = temp;
  }

  if(strncmp(result, config_hash, sizeof(result))!=0){
    printk(KERN_ERR "[QFUSE]%s : sec hash different\n", __func__);
    printk(KERN_ERR "[QFUSE]partition hash : %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
          result[0],result[1],result[2],result[3],result[4],result[5],result[6],result[7],
          result[8],result[9],result[10],result[11],result[12],result[13],result[14],result[15],
          result[16],result[17],result[18],result[19],result[20],result[21],result[22],result[23],
          result[24],result[25],result[26],result[27],result[28],result[29],result[30],result[31]);
    printk(KERN_ERR "[QFUSE]config hash : %s\n",CONFIG_LGE_QFPROM_SECHASH);
    ret = RET_ERR;
    goto err_mem;
  }

err_mem:
  if(tfm)
    crypto_free_hash(tfm);

  if(ret == RET_ERR && secdat.pentry){
    kfree(secdat.pentry);
    secdat.pentry=NULL;
  }
  if(fp)
    filp_close(fp, NULL);

err:
  set_fs(old_fs);
  mutex_unlock(&secdat_lock);
  printk(KERN_INFO "[QFUSE]%s end\n", __func__);
  return ret;
}
Esempio n. 6
0
File: sha2.c Progetto: aeppert/km
int plaintext_to_sha1(unsigned char *hash,
		const char *plaintext, unsigned int len)
{
	struct page *page = NULL;
	char *data = NULL;
	int offset = 0;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)

	struct crypto_hash *tfm = NULL;
	struct scatterlist sg = {0};
	struct hash_desc desc = {0};

	page = alloc_page(GFP_KERNEL);
	if (!page) {
		return -ENOMEM;
	}
	data = (char *)page_address(page);

	tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		__free_page(page);
		return -EINVAL;
	}

	desc.tfm = tfm;
	desc.flags = 0;

	crypto_hash_init(&desc);
	sg_init_one(&sg, (void *)data, PAGE_SIZE);

	for (offset = 0; offset < len; offset += PAGE_SIZE) {
		memset(data, 0x00, PAGE_SIZE);
		/* Check if the data is page size or part of page */
		if ((len - offset) >= PAGE_SIZE) {
			memcpy(data, plaintext + offset, PAGE_SIZE);
			crypto_hash_update(&desc, &sg, PAGE_SIZE);
		} else {
			memcpy(data, plaintext + offset, (len - offset));
			sg_init_one(&sg, (void *)data, (len - offset));
			crypto_hash_update(&desc, &sg, (len - offset));
		}
	}

	crypto_hash_final(&desc, hash);

	crypto_free_hash(tfm);

#else

	struct crypto_tfm *tfm;
	struct scatterlist sg;

	page = alloc_page(GFP_KERNEL);
	if (!page) {
		return -ENOMEM;
	}
	data = (char *)page_address(page);

	tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP);
	if (tfm == NULL) {
		__free_page(page);
		return -EINVAL;
	}

	crypto_digest_init(tfm);
	sg_init_one(&sg, (u8 *)data, PAGE_SIZE);

	for (offset = 0; offset < len; offset += PAGE_SIZE) {
		memset(data, 0x00, PAGE_SIZE);
		if ((len - offset) >= PAGE_SIZE) {
			memcpy(data, plaintext + offset, PAGE_SIZE);
			crypto_digest_update(tfm, &sg, 1);
		} else {
			memcpy(data, plaintext + offset, (len - offset));
			sg_init_one(&sg, (u8 *)data, (len - offset));
			crypto_digest_update(tfm, &sg, 1);
		}
	}

	crypto_digest_final(tfm, hash);

	crypto_free_tfm(tfm);

#endif

	__free_page(page);
	return 0;

}
Esempio n. 7
0
	int calculate_integrity(struct file *filp,
						char *ibuf,
						int ilen)
	{
	int r = -1 , ret = -1;
	ssize_t vfs_read_retval = 0;
	loff_t file_offset = 0;
	mm_segment_t oldfs = get_fs();

	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);

	struct scatterlist sg;
	struct crypto_hash *tfm = NULL;
	struct hash_desc desc;
	char *algo = kmalloc(1024, GFP_KERNEL);
	if (!algo) {
		ret = -ENOMEM;
		goto out;
	}
	__initialize_with_null(algo, 1024);

#ifdef EXTRA_CREDIT
	ret = vfs_getxattr(filp->f_path.dentry,
				 INT_TYPE_XATTR,
				 algo,
				 1024);
	if (ret <= 0)
		__initialize_with_null(algo, 1024);

#endif
	if (*algo == '\0')
		strcpy(algo, DEFAULT_ALGO);


	if (!buf)
		goto out;
	__initialize_with_null(ibuf, ilen);

	if (!filp->f_op->read) {
		r = -2;
		goto out;
	}
	filp->f_pos = 0;
	set_fs(KERNEL_DS);

	tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		r = -EINVAL;
		goto out;
	}
	desc.tfm = tfm;
	desc.flags = 0;

	if (crypto_hash_digestsize(tfm) > ilen) {
		r = -EINVAL;
		goto out;
	}
	crypto_hash_setkey(tfm, key, strlen(key));
	ret = crypto_hash_init(&desc);
	if (ret) {
		r = ret;
		goto out;
	}
	sg_init_table(&sg, 1);
	file_offset = 0;
	do {
		vfs_read_retval = vfs_read(filp, buf, PAGE_SIZE, &file_offset);

		if (vfs_read_retval < 0) {
			ret = vfs_read_retval;
			goto out;
		}
		sg_set_buf(&sg, (u8 *)buf, vfs_read_retval);
		ret = crypto_hash_update(&desc, &sg, sg.length);

		if (ret) {
			r = ret;
			goto out;
		}
		if (vfs_read_retval < ksize(buf))
			break;
	} while (1);
	ret = crypto_hash_final(&desc, ibuf);
	if (ret) {
		r = ret;
		goto out;
	}
out:
	kfree(buf);
	kfree(algo);
	if (!IS_ERR(tfm))
		crypto_free_hash(tfm);
	set_fs(oldfs);
	return ret;
	}
void do_integrity_check(void)
{
	u8 *rbuf = 0;
	u32 len;
	u8 hmac[SHA256_DIGEST_SIZE];
	struct hash_desc desc;
	struct scatterlist sg;
	u8 *key = "12345678";
    int i, step_len = PAGE_SIZE, err;
    u8 *pAllocBuf = 0;

    printk(KERN_INFO "FIPS: integrity start\n");

	if (unlikely(!need_integrity_check || in_fips_err())) {
        printk(KERN_INFO "FIPS: integrity check not needed\n");
		return;
	}
	rbuf = (u8*)phys_to_virt((unsigned long)CONFIG_CRYPTO_FIPS_INTEG_COPY_ADDRESS);

	if (*((u32 *) &rbuf[36]) != 0x016F2818) {
		printk(KERN_ERR "FIPS: invalid zImage magic number.");
		set_in_fips_err();
		goto err1;
	}

	if (*(u32 *) &rbuf[44] <= *(u32 *) &rbuf[40]) {
		printk(KERN_ERR "FIPS: invalid zImage calculated len");
		set_in_fips_err();
		goto err1;
	}

	len = *(u32 *) &rbuf[44] - *(u32 *) &rbuf[40];

    printk(KERN_INFO "FIPS: integrity actual zImageLen = %d\n", len);
    printk(KERN_INFO "FIPS: do kernel integrity check address: %lx \n", (unsigned long)rbuf);

	desc.tfm = crypto_alloc_hash("hmac(sha256)", 0, 0);

	if (IS_ERR(desc.tfm)) {
		printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n",
		       PTR_ERR(desc.tfm));
		set_in_fips_err();
		goto err1;
	}
#if FIPS_FUNC_TEST == 2
    rbuf[1024] = rbuf[1024] + 1;
#endif
	crypto_hash_setkey(desc.tfm, key, strlen(key));

	pAllocBuf = kmalloc(step_len,GFP_KERNEL);
	if (!pAllocBuf) {
		printk(KERN_INFO "Fail to alloc memory, length %d\n", step_len);
		set_in_fips_err();
		goto err1;
	}

	err = crypto_hash_init(&desc);
	if (err) {
		printk(KERN_INFO "fail at crypto_hash_init\n");
        set_in_fips_err();
		kfree(pAllocBuf);
		goto err1;
	}

	for (i = 0; i < len; i += step_len) 	{

		//last is reached
		if (i + step_len >= len - 1)  {
			memcpy(pAllocBuf, &rbuf[i], len - i);
			sg_init_one(&sg, pAllocBuf, len - i);
			err = crypto_hash_update(&desc, &sg, len - i);
			if (err) {
				printk(KERN_INFO "Fail to crypto_hash_update1\n");
                set_in_fips_err();
				goto err;
			}
			err = crypto_hash_final(&desc, hmac);
			if (err) {
				printk(KERN_INFO "Fail to crypto_hash_final\n");
                set_in_fips_err();
				goto err;
			}
		} else {
		    memcpy(pAllocBuf, &rbuf[i], step_len);
		    sg_init_one(&sg, pAllocBuf, step_len);
		    err = crypto_hash_update(&desc, &sg, step_len);

		    if (err) {
			    printk(KERN_INFO "Fail to crypto_hash_update\n");
                set_in_fips_err();
			    goto err;
		    }
        }
	}
#if FIPS_FUNC_TEST == 2
    rbuf[1024] = rbuf[1024] - 1;
#endif
	if (!strncmp(hmac, &rbuf[len], SHA256_DIGEST_SIZE)) {
		printk(KERN_INFO "FIPS: integrity check passed\n");
	} else {
		printk(KERN_ERR "FIPS: integrity check failed. hmac:%lx, buf:%lx.\n",(long) hmac, (long)rbuf[len] );
		set_in_fips_err();
	}

 err:
	kfree(pAllocBuf);
	crypto_free_hash(desc.tfm);
 err1:
	need_integrity_check = false;

/*	if(integrity_mem_reservoir != 0) {
		printk(KERN_NOTICE "FIPS free integrity_mem_reservoir = %ld\n", integrity_mem_reservoir);
		free_bootmem((unsigned long)CONFIG_CRYPTO_FIPS_INTEG_COPY_ADDRESS, integrity_mem_reservoir);
	}
*/
}
Esempio n. 9
0
static int verihmac_calc_hash(struct file* file,char* digest)
{
	struct hash_desc desc;
	struct scatterlist sg;

	int rc;
	char *rbuf;

	loff_t i_size, offset = 0;
	
	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!rbuf) {
	        printk(VERI_ERROR "no free memory\n");
		rc = -ENOMEM;
		return rc;
	}
	
	desc.tfm = crypto_alloc_hash(VERI_HMAC, 0,CRYPTO_ALG_ASYNC);
	
	if (IS_ERR(desc.tfm)) {
		printk(VERI_ERROR "failed to load %s transform: %ld\n",
			VERI_HMAC, PTR_ERR(desc.tfm));
		rc = PTR_ERR(desc.tfm);
		kfree(rbuf);
		return rc;
	}
	
	desc.flags = 0;
	
       if(crypto_hash_setkey(desc.tfm,password_str,password_len)!=0) { 
	  printk(VERI_ERROR "setting key error  \n");
	  kfree(rbuf);
	  crypto_free_hash(desc.tfm);
	  return -1;
	}
	
	rc=crypto_hash_init(&desc);   
	
	if(rc)
	{
	  printk(VERI_ERROR "hash_init failed \n");
	  crypto_free_hash(desc.tfm);
	  kfree(rbuf);
	  return rc;
	}
	
	i_size = i_size_read(file->f_dentry->d_inode);
	
	while (offset < i_size) {
		int rbuf_len;
		rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
		
		if (rbuf_len < 0) {
			rc = rbuf_len;
			break;
		}
		
		if (rbuf_len == 0)
			break;
		
		offset += rbuf_len;
		
		sg_set_buf(&sg, rbuf, rbuf_len);
		rc = crypto_hash_update(&desc, &sg, rbuf_len);
		
		if (rc)
			break;
	}

	kfree(rbuf);
	if (!rc)
		rc = crypto_hash_final(&desc, digest);
	
	crypto_free_hash(desc.tfm);
	return rc;
}
int checksum(char *user_key, char *chksum)
{
  char *buf = NULL,*MD5_digest = NULL,*digest = NULL;
  int err,i,ret,len = 0;
  const int KEY_LENGTH = 33;
  struct hash_desc desc;
  struct crypto_hash *tfm;
  struct scatterlist sg;
  len = strlen(user_key);

  buf = kmalloc(len,GFP_KERNEL);
  if(buf == NULL)
    {
      printk("ERROR in memory allocation\n");
      err = -ENOMEM;
      goto bin;
    }
  memset(buf, 0 , len);

  MD5_digest = kmalloc(KEY_LENGTH, GFP_KERNEL);
  if(MD5_digest == NULL)
    {
      printk("ERROR in memory allocation\n");
      err = -ENOMEM;
      goto bin;	
    }
  memset(MD5_digest,0,(2*MD5_SIGNATURE_SIZE)+1);

  digest = kmalloc((MD5_SIGNATURE_SIZE+1) * sizeof(char), GFP_KERNEL);
  if(digest == NULL)
    {
      printk("ERROR in memory allocation\n");
      err = -ENOMEM;
      goto bin;
    }
  memset(digest, 0 , MD5_SIGNATURE_SIZE+1);	

  strncpy(buf,user_key,len);
  tfm = crypto_alloc_hash("md5",0,CRYPTO_ALG_ASYNC);
  if (IS_ERR(tfm))
    {
      printk("Unable to allocate struct srypto_hash\n");
    }

  desc.tfm = tfm;
  desc.flags = 0;

  ret = crypto_hash_init(&desc);
  if(ret<0)
    {
      printk("crypto_hash_init() failed\n");
      crypto_free_hash(tfm);
    }
  // oldfs = get_fs();
  // set_fs(KERNEL_DS);
  /* Reading the file chunk by chunk. Here we are reading the file = PAGE_SIZE which is equal to 4096 */

  sg_init_one(&sg,(void *) buf,len);
  err = crypto_hash_update(&desc,&sg,len);
  if(err<0)
    {
      printk("crypto_hash_update() failed for id\n");
      crypto_free_hash(tfm);
      err = -EINVAL;
      goto bin;
    }


  // set_fs(oldfs);
  err = crypto_hash_final(&desc,digest);
  if (err<0)
    {
      printk("crypto_hash_final() failed for sever digest");
      crypto_free_hash(tfm);
      err = -EINVAL;
      goto bin;
    }
  for(i = 0; i<16; i++)
    {
      sprintf((MD5_digest + i*2),"%02x",digest[i] & 0xFF);
    }	
  memcpy(chksum,MD5_digest,KEY_LENGTH);
  /*	len1 = strlen(chksum);*/

 bin:
  if(buf)
    kfree(buf);
  if(MD5_digest)
    kfree(MD5_digest);
  if(digest)
    kfree(digest);
  return err;
}
Esempio n. 11
0
/*
 * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3.
 * Well, not what's written there, but rather what they meant.
 */
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
	struct scatterlist sg_in[1], sg_out[1];
	struct blkcipher_desc desc = { .tfm = state->arc4 };

	get_new_key_from_sha(state);
	if (!initial_key) {
		crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
					state->keylen);
		sg_init_table(sg_in, 1);
		sg_init_table(sg_out, 1);
		setup_sg(sg_in, state->sha1_digest, state->keylen);
		setup_sg(sg_out, state->session_key, state->keylen);
		if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
					     state->keylen) != 0) {
    		    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
		}
	} else {
		memcpy(state->session_key, state->sha1_digest, state->keylen);
	}
	if (state->keylen == 8) {
		/* See RFC 3078 */
		state->session_key[0] = 0xd1;
		state->session_key[1] = 0x26;
		state->session_key[2] = 0x9e;
	}
	crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
}

/*
 * Allocate space for a (de)compressor.
 */
static void *mppe_alloc(unsigned char *options, int optlen)
{
	struct ppp_mppe_state *state;
	unsigned int digestsize;

	if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
	    options[0] != CI_MPPE || options[1] != CILEN_MPPE)
		goto out;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state == NULL)
		goto out;


	state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(state->arc4)) {
		state->arc4 = NULL;
		goto out_free;
	}

	state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(state->sha1)) {
		state->sha1 = NULL;
		goto out_free;
	}

	digestsize = crypto_hash_digestsize(state->sha1);
	if (digestsize < MPPE_MAX_KEY_LEN)
		goto out_free;

	state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
	if (!state->sha1_digest)
		goto out_free;

	/* Save keys. */
	memcpy(state->master_key, &options[CILEN_MPPE],
	       sizeof(state->master_key));
	memcpy(state->session_key, state->master_key,
	       sizeof(state->master_key));

	/*
	 * We defer initial key generation until mppe_init(), as mppe_alloc()
	 * is called frequently during negotiation.
	 */

	return (void *)state;

	out_free:
	    if (state->sha1_digest)
		kfree(state->sha1_digest);
	    if (state->sha1)
		crypto_free_hash(state->sha1);
	    if (state->arc4)
		crypto_free_blkcipher(state->arc4);
	    kfree(state);
	out:
	return NULL;
}

/*
 * Deallocate space for a (de)compressor.
 */
static void mppe_free(void *arg)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	if (state) {
	    if (state->sha1_digest)
		kfree(state->sha1_digest);
	    if (state->sha1)
		crypto_free_hash(state->sha1);
	    if (state->arc4)
		crypto_free_blkcipher(state->arc4);
	    kfree(state);
	}
}

/*
 * Initialize (de)compressor state.
 */
static int
mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
	  const char *debugstr)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	unsigned char mppe_opts;

	if (optlen != CILEN_MPPE ||
	    options[0] != CI_MPPE || options[1] != CILEN_MPPE)
		return 0;

	MPPE_CI_TO_OPTS(&options[2], mppe_opts);
	if (mppe_opts & MPPE_OPT_128)
		state->keylen = 16;
	else if (mppe_opts & MPPE_OPT_40)
		state->keylen = 8;
	else {
		printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr,
		       unit);
		return 0;
	}
	if (mppe_opts & MPPE_OPT_STATEFUL)
		state->stateful = 1;

	/* Generate the initial session key. */
	mppe_rekey(state, 1);

	if (debug) {
		int i;
		char mkey[sizeof(state->master_key) * 2 + 1];
		char skey[sizeof(state->session_key) * 2 + 1];

		printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
		       debugstr, unit, (state->keylen == 16) ? 128 : 40,
		       (state->stateful) ? "stateful" : "stateless");

		for (i = 0; i < sizeof(state->master_key); i++)
			sprintf(mkey + i * 2, "%02x", state->master_key[i]);
		for (i = 0; i < sizeof(state->session_key); i++)
			sprintf(skey + i * 2, "%02x", state->session_key[i]);
		printk(KERN_DEBUG
		       "%s[%d]: keys: master: %s initial session: %s\n",
		       debugstr, unit, mkey, skey);
	}

	/*
	 * Initialize the coherency count.  The initial value is not specified
	 * in RFC 3078, but we can make a reasonable assumption that it will
	 * start at 0.  Setting it to the max here makes the comp/decomp code
	 * do the right thing (determined through experiment).
	 */
	state->ccount = MPPE_CCOUNT_SPACE - 1;

	/*
	 * Note that even though we have initialized the key table, we don't
	 * set the FLUSHED bit.  This is contrary to RFC 3078, sec. 3.1.
	 */
	state->bits = MPPE_BIT_ENCRYPTED;

	state->unit = unit;
	state->debug = debug;

	return 1;
}

static int
mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit,
	       int hdrlen, int debug)
{
	/* ARGSUSED */
	return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init");
}

/*
 * We received a CCP Reset-Request (actually, we are sending a Reset-Ack),
 * tell the compressor to rekey.  Note that we MUST NOT rekey for
 * every CCP Reset-Request; we only rekey on the next xmit packet.
 * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost.
 * So, rekeying for every CCP Reset-Request is broken as the peer will not
 * know how many times we've rekeyed.  (If we rekey and THEN get another
 * CCP Reset-Request, we must rekey again.)
 */
static void mppe_comp_reset(void *arg)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	state->bits |= MPPE_BIT_FLUSHED;
}

/*
 * Compress (encrypt) a packet.
 * It's strange to call this a compressor, since the output is always
 * MPPE_OVHD + 2 bytes larger than the input.
 */
static int
mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
	      int isize, int osize)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	struct blkcipher_desc desc = { .tfm = state->arc4 };
	int proto;
	struct scatterlist sg_in[1], sg_out[1];

	/*
	 * Check that the protocol is in the range we handle.
	 */
	proto = PPP_PROTOCOL(ibuf);
	if (proto < 0x0021 || proto > 0x00fa)
		return 0;

	/* Make sure we have enough room to generate an encrypted packet. */
	if (osize < isize + MPPE_OVHD + 2) {
		/* Drop the packet if we should encrypt it, but can't. */
		printk(KERN_DEBUG "mppe_compress[%d]: osize too small! "
		       "(have: %d need: %d)\n", state->unit,
		       osize, osize + MPPE_OVHD + 2);
		return -1;
	}

	osize = isize + MPPE_OVHD + 2;

	/*
	 * Copy over the PPP header and set control bits.
	 */
	obuf[0] = PPP_ADDRESS(ibuf);
	obuf[1] = PPP_CONTROL(ibuf);
	put_unaligned_be16(PPP_COMP, obuf + 2);
	obuf += PPP_HDRLEN;

	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
	if (state->debug >= 7)
		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
		       state->ccount);
	put_unaligned_be16(state->ccount, obuf);

	if (!state->stateful ||	/* stateless mode     */
	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
	    (state->bits & MPPE_BIT_FLUSHED)) {	/* CCP Reset-Request  */
		/* We must rekey */
		if (state->debug && state->stateful)
			printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n",
			       state->unit);
		mppe_rekey(state, 0);
		state->bits |= MPPE_BIT_FLUSHED;
	}
	obuf[0] |= state->bits;
	state->bits &= ~MPPE_BIT_FLUSHED;	/* reset for next xmit */

	obuf += MPPE_OVHD;
	ibuf += 2;		/* skip to proto field */
	isize -= 2;

	/* Encrypt packet */
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	setup_sg(sg_in, ibuf, isize);
	setup_sg(sg_out, obuf, osize);
	if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
		printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
		return -1;
	}

	state->stats.unc_bytes += isize;
	state->stats.unc_packets++;
	state->stats.comp_bytes += osize;
	state->stats.comp_packets++;

	return osize;
}

/*
 * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going
 * to look bad ... and the longer the link is up the worse it will get.
 */
static void mppe_comp_stats(void *arg, struct compstat *stats)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	*stats = state->stats;
}

static int
mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit,
		 int hdrlen, int mru, int debug)
{
	/* ARGSUSED */
	return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init");
}

/*
 * We received a CCP Reset-Ack.  Just ignore it.
 */
static void mppe_decomp_reset(void *arg)
{
	/* ARGSUSED */
	return;
}

/*
 * Decompress (decrypt) an MPPE packet.
 */
static int
mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
		int osize)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	struct blkcipher_desc desc = { .tfm = state->arc4 };
	unsigned ccount;
	int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
	int sanity = 0;
	struct scatterlist sg_in[1], sg_out[1];

	if (isize <= PPP_HDRLEN + MPPE_OVHD) {
		if (state->debug)
			printk(KERN_DEBUG
			       "mppe_decompress[%d]: short pkt (%d)\n",
			       state->unit, isize);
		return DECOMP_ERROR;
	}

	/*
	 * Make sure we have enough room to decrypt the packet.
	 * Note that for our test we only subtract 1 byte whereas in
	 * mppe_compress() we added 2 bytes (+MPPE_OVHD);
	 * this is to account for possible PFC.
	 */
	if (osize < isize - MPPE_OVHD - 1) {
		printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
		       "(have: %d need: %d)\n", state->unit,
		       osize, isize - MPPE_OVHD - 1);
		return DECOMP_ERROR;
	}
	osize = isize - MPPE_OVHD - 2;	/* assume no PFC */

	ccount = MPPE_CCOUNT(ibuf);
	if (state->debug >= 7)
		printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n",
		       state->unit, ccount);

	/* sanity checks -- terminate with extreme prejudice */
	if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) {
		printk(KERN_DEBUG
		       "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
		       state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}
	if (!state->stateful && !flushed) {
		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
		       "stateless mode!\n", state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}
	if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
		       "flag packet!\n", state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}

	if (sanity) {
		if (state->sanity_errors < SANITY_MAX)
			return DECOMP_ERROR;
		else
			/*
			 * Take LCP down if the peer is sending too many bogons.
			 * We don't want to do this for a single or just a few
			 * instances since it could just be due to packet corruption.
			 */
			return DECOMP_FATALERROR;
	}

	/*
	 * Check the coherency count.
	 */

	if (!state->stateful) {
		/* RFC 3078, sec 8.1.  Rekey for every packet. */
		while (state->ccount != ccount) {
			mppe_rekey(state, 0);
			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
		}
	} else {
		/* RFC 3078, sec 8.2. */
		if (!state->discard) {
			/* normal state */
			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
			if (ccount != state->ccount) {
				/*
				 * (ccount > state->ccount)
				 * Packet loss detected, enter the discard state.
				 * Signal the peer to rekey (by sending a CCP Reset-Request).
				 */
				state->discard = 1;
				return DECOMP_ERROR;
			}
		} else {
			/* discard state */
			if (!flushed) {
				/* ccp.c will be silent (no additional CCP Reset-Requests). */
				return DECOMP_ERROR;
			} else {
				/* Rekey for every missed "flag" packet. */
				while ((ccount & ~0xff) !=
				       (state->ccount & ~0xff)) {
					mppe_rekey(state, 0);
					state->ccount =
					    (state->ccount +
					     256) % MPPE_CCOUNT_SPACE;
				}

				/* reset */
				state->discard = 0;
				state->ccount = ccount;
				/*
				 * Another problem with RFC 3078 here.  It implies that the
				 * peer need not send a Reset-Ack packet.  But RFC 1962
				 * requires it.  Hopefully, M$ does send a Reset-Ack; even
				 * though it isn't required for MPPE synchronization, it is
				 * required to reset CCP state.
				 */
			}
		}
		if (flushed)
			mppe_rekey(state, 0);
	}

	/*
	 * Fill in the first part of the PPP header.  The protocol field
	 * comes from the decrypted data.
	 */
	obuf[0] = PPP_ADDRESS(ibuf);	/* +1 */
	obuf[1] = PPP_CONTROL(ibuf);	/* +1 */
	obuf += 2;
	ibuf += PPP_HDRLEN + MPPE_OVHD;
	isize -= PPP_HDRLEN + MPPE_OVHD;	/* -6 */
	/* net osize: isize-4 */

	/*
	 * Decrypt the first byte in order to check if it is
	 * a compressed or uncompressed protocol field.
	 */
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	setup_sg(sg_in, ibuf, 1);
	setup_sg(sg_out, obuf, 1);
	if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
		return DECOMP_ERROR;
	}

	/*
	 * Do PFC decompression.
	 * This would be nicer if we were given the actual sk_buff
	 * instead of a char *.
	 */
	if ((obuf[0] & 0x01) != 0) {
		obuf[1] = obuf[0];
		obuf[0] = 0;
		obuf++;
		osize++;
	}

	/* And finally, decrypt the rest of the packet. */
	setup_sg(sg_in, ibuf + 1, isize - 1);
	setup_sg(sg_out, obuf + 1, osize - 1);
	if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
		return DECOMP_ERROR;
	}

	state->stats.unc_bytes += osize;
	state->stats.unc_packets++;
	state->stats.comp_bytes += isize;
	state->stats.comp_packets++;

	/* good packet credit */
	state->sanity_errors >>= 1;

	return osize;
}

/*
 * Incompressible data has arrived (this should never happen!).
 * We should probably drop the link if the protocol is in the range
 * of what should be encrypted.  At the least, we should drop this
 * packet.  (How to do this?)
 */
static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	if (state->debug &&
	    (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa))
		printk(KERN_DEBUG
		       "mppe_incomp[%d]: incompressible (unencrypted) data! "
		       "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf));

	state->stats.inc_bytes += icnt;
	state->stats.inc_packets++;
	state->stats.unc_bytes += icnt;
	state->stats.unc_packets++;
}

/*************************************************************
 * Module interface table
 *************************************************************/

/*
 * Procedures exported to if_ppp.c.
 */
static struct compressor ppp_mppe = {
	.compress_proto = CI_MPPE,
	.comp_alloc     = mppe_alloc,
	.comp_free      = mppe_free,
	.comp_init      = mppe_comp_init,
	.comp_reset     = mppe_comp_reset,
	.compress       = mppe_compress,
	.comp_stat      = mppe_comp_stats,
	.decomp_alloc   = mppe_alloc,
	.decomp_free    = mppe_free,
	.decomp_init    = mppe_decomp_init,
	.decomp_reset   = mppe_decomp_reset,
	.decompress     = mppe_decompress,
	.incomp         = mppe_incomp,
	.decomp_stat    = mppe_comp_stats,
	.owner          = THIS_MODULE,
	.comp_extra     = MPPE_PAD,
};

/*
 * ppp_mppe_init()
 *
 * Prior to allowing load, try to load the arc4 and sha1 crypto
 * libraries.  The actual use will be allocated later, but
 * this way the module will fail to insmod if they aren't available.
 */

static int __init ppp_mppe_init(void)
{
	int answer;
	if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
	      crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
		return -ENODEV;

	sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
	if (!sha_pad)
		return -ENOMEM;
	sha_pad_init(sha_pad);

	answer = ppp_register_compressor(&ppp_mppe);

	if (answer == 0)
		printk(KERN_INFO "PPP MPPE Compression module registered\n");
	else
		kfree(sha_pad);

	return answer;
}

static void __exit ppp_mppe_cleanup(void)
{
	ppp_unregister_compressor(&ppp_mppe);
	kfree(sha_pad);
}

module_init(ppp_mppe_init);
module_exit(ppp_mppe_cleanup);
Esempio n. 12
0
static int tf_self_test_integrity(const char *alg_name, struct module *mod)
{
	unsigned char expected[32];
	unsigned char actual[32];
	struct scatterlist *sg = NULL;
	struct hash_desc desc = {NULL, 0};
	size_t digest_length;
	unsigned char *const key = tf_integrity_hmac_sha256_key;
	size_t const key_length = sizeof(tf_integrity_hmac_sha256_key);
	int error;

	if (mod->raw_binary_ptr == NULL)
		return -ENXIO;
	if (tf_integrity_hmac_sha256_expected_value == NULL)
		return -ENOENT;
	INFO("expected=%s", tf_integrity_hmac_sha256_expected_value);
	error = scan_hex(expected, sizeof(expected),
			 tf_integrity_hmac_sha256_expected_value);
	if (error < 0) {
		pr_err("tf_driver: Badly formatted hmac_sha256 parameter "
		       "(should be a hex string)\n");
		return -EIO;
	};

	desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
	if (IS_ERR_OR_NULL(desc.tfm)) {
		ERROR("crypto_alloc_hash(%s) failed", alg_name);
		error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
		goto abort;
	}
	digest_length = crypto_hash_digestsize(desc.tfm);
	INFO("alg_name=%s driver_name=%s digest_length=%u",
	     alg_name,
	     crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)),
	     digest_length);

	error = crypto_hash_setkey(desc.tfm, key, key_length);
	if (error) {
		ERROR("crypto_hash_setkey(%s) failed: %d",
		      alg_name, error);
		goto abort;
	}

	sg = vmalloc_to_sg(mod->raw_binary_ptr, mod->raw_binary_size);
	if (IS_ERR_OR_NULL(sg)) {
		ERROR("vmalloc_to_sg(%lu) failed: %d",
		      mod->raw_binary_size, (int)sg);
		error = (sg == NULL ? -ENOMEM : (int)sg);
		goto abort;
	}

	error = crypto_hash_digest(&desc, sg, mod->raw_binary_size, actual);
	if (error) {
		ERROR("crypto_hash_digest(%s) failed: %d",
		      alg_name, error);
		goto abort;
	}

	kfree(sg);
	crypto_free_hash(desc.tfm);

#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
	if (tf_fault_injection_mask & TF_CRYPTO_ALG_INTEGRITY) {
		pr_warning("TF: injecting fault in integrity check!\n");
		actual[0] = 0xff;
		actual[1] ^= 0xff;
	}
#endif
	TF_TRACE_ARRAY(expected, digest_length);
	TF_TRACE_ARRAY(actual, digest_length);
	if (memcmp(expected, actual, digest_length)) {
		ERROR("wrong %s digest value", alg_name);
		error = -EINVAL;
	} else {
		INFO("%s: digest successful", alg_name);
		error = 0;
	}

	return error;

abort:
	if (!IS_ERR_OR_NULL(sg))
		kfree(sg);
	if (!IS_ERR_OR_NULL(desc.tfm))
		crypto_free_hash(desc.tfm);
	return error == -ENOMEM ? error : -EIO;
}
Esempio n. 13
0
static int tf_self_test_digest(const char *alg_name,
			       const struct digest_test_vector *tv)
{
	unsigned char digest[64];
	unsigned char input[256];
	struct scatterlist sg;
	struct hash_desc desc = {NULL, 0};
	int error;
	size_t digest_length;

	desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
	if (IS_ERR_OR_NULL(desc.tfm)) {
		ERROR("crypto_alloc_hash(%s) failed", alg_name);
		error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
		goto abort;
	}

	digest_length = crypto_hash_digestsize(desc.tfm);
	INFO("alg_name=%s driver_name=%s digest_length=%u",
	     alg_name,
	     crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)),
	     digest_length);
	if (digest_length > sizeof(digest)) {
		ERROR("digest length too large (%zu > %zu)",
		      digest_length, sizeof(digest));
		error = -ENOMEM;
		goto abort;
	}

	if (tv->key != NULL) {
		error = crypto_hash_setkey(desc.tfm, tv->key, tv->key_length);
		if (error) {
			ERROR("crypto_hash_setkey(%s) failed: %d",
			      alg_name, error);
			goto abort;
		}
		TF_TRACE_ARRAY(tv->key, tv->key_length);
	}
	error = crypto_hash_init(&desc);
	if (error) {
		ERROR("crypto_hash_init(%s) failed: %d", alg_name, error);
		goto abort;
	}

	/* The test vector data is in vmalloc'ed memory since it's a module
	   global. Copy it to the stack, since the crypto API doesn't support
	   vmalloc'ed memory. */
	if (tv->length > sizeof(input)) {
		ERROR("data too large (%zu > %zu)",
		      tv->length, sizeof(input));
		error = -ENOMEM;
		goto abort;
	}
	memcpy(input, tv->text, tv->length);
	INFO("sg_init_one(%p, %p, %u)", &sg, input, tv->length);
	sg_init_one(&sg, input, tv->length);

	TF_TRACE_ARRAY(input, tv->length);
	error = crypto_hash_update(&desc, &sg, tv->length);
	if (error) {
		ERROR("crypto_hash_update(%s) failed: %d",
		      alg_name, error);
		goto abort;
	}

	error = crypto_hash_final(&desc, digest);
	if (error) {
		ERROR("crypto_hash_final(%s) failed: %d", alg_name, error);
		goto abort;
	}

	crypto_free_hash(desc.tfm);
	desc.tfm = NULL;

	if (memcmp(digest, tv->digest, digest_length)) {
		TF_TRACE_ARRAY(digest, digest_length);
		ERROR("wrong %s digest value", alg_name);
		pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n",
		       alg_name);
		error = -EINVAL;
	} else {
		INFO("%s: digest successful", alg_name);
		error = 0;
	}
	return error;

abort:
	if (!IS_ERR_OR_NULL(desc.tfm))
		crypto_free_hash(desc.tfm);
	pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n", alg_name);
	return error;
}