/* * Process a key_share extension received in the ClientHello. |pkt| contains * the raw PACKET data for the extension. Returns 1 on success or 0 on failure. * If a failure occurs then |*al| is set to an appropriate alert value. */ int tls_parse_ctos_key_share(SSL *s, PACKET *pkt, unsigned int context, X509 *x, size_t chainidx, int *al) { #ifndef OPENSSL_NO_TLS1_3 unsigned int group_id; PACKET key_share_list, encoded_pt; const unsigned char *clntcurves, *srvrcurves; size_t clnt_num_curves, srvr_num_curves; int group_nid, found = 0; unsigned int curve_flags; if (s->hit && (s->ext.psk_kex_mode & TLSEXT_KEX_MODE_FLAG_KE_DHE) == 0) return 1; /* Sanity check */ if (s->s3->peer_tmp != NULL) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } if (!PACKET_as_length_prefixed_2(pkt, &key_share_list)) { *al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_LENGTH_MISMATCH); return 0; } /* Get our list of supported curves */ if (!tls1_get_curvelist(s, 0, &srvrcurves, &srvr_num_curves)) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } /* Get the clients list of supported curves. */ if (!tls1_get_curvelist(s, 1, &clntcurves, &clnt_num_curves)) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } if (clnt_num_curves == 0) { /* * This can only happen if the supported_groups extension was not sent, * because we verify that the length is non-zero when we process that * extension. */ *al = SSL_AD_MISSING_EXTENSION; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_MISSING_SUPPORTED_GROUPS_EXTENSION); return 0; } while (PACKET_remaining(&key_share_list) > 0) { if (!PACKET_get_net_2(&key_share_list, &group_id) || !PACKET_get_length_prefixed_2(&key_share_list, &encoded_pt) || PACKET_remaining(&encoded_pt) == 0) { *al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_LENGTH_MISMATCH); return 0; } /* * If we already found a suitable key_share we loop through the * rest to verify the structure, but don't process them. */ if (found) continue; /* Check if this share is in supported_groups sent from client */ if (!check_in_list(s, group_id, clntcurves, clnt_num_curves, 0)) { *al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_BAD_KEY_SHARE); return 0; } /* Check if this share is for a group we can use */ if (!check_in_list(s, group_id, srvrcurves, srvr_num_curves, 1)) { /* Share not suitable */ continue; } group_nid = tls1_ec_curve_id2nid(group_id, &curve_flags); if (group_nid == 0) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS); return 0; } if ((curve_flags & TLS_CURVE_TYPE) == TLS_CURVE_CUSTOM) { /* Can happen for some curves, e.g. X25519 */ EVP_PKEY *key = EVP_PKEY_new(); if (key == NULL || !EVP_PKEY_set_type(key, group_nid)) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_EVP_LIB); EVP_PKEY_free(key); return 0; } s->s3->peer_tmp = key; } else { /* Set up EVP_PKEY with named curve as parameters */ EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL); if (pctx == NULL || EVP_PKEY_paramgen_init(pctx) <= 0 || EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, group_nid) <= 0 || EVP_PKEY_paramgen(pctx, &s->s3->peer_tmp) <= 0) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_EVP_LIB); EVP_PKEY_CTX_free(pctx); return 0; } EVP_PKEY_CTX_free(pctx); pctx = NULL; } s->s3->group_id = group_id; if (!EVP_PKEY_set1_tls_encodedpoint(s->s3->peer_tmp, PACKET_data(&encoded_pt), PACKET_remaining(&encoded_pt))) { *al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_BAD_ECPOINT); return 0; } found = 1; } #endif return 1; }
/* * Process a key_share extension received in the ClientHello. |pkt| contains * the raw PACKET data for the extension. Returns 1 on success or 0 on failure. */ int tls_parse_ctos_key_share(SSL *s, PACKET *pkt, unsigned int context, X509 *x, size_t chainidx) { #ifndef OPENSSL_NO_TLS1_3 unsigned int group_id; PACKET key_share_list, encoded_pt; const uint16_t *clntgroups, *srvrgroups; size_t clnt_num_groups, srvr_num_groups; int found = 0; if (s->hit && (s->ext.psk_kex_mode & TLSEXT_KEX_MODE_FLAG_KE_DHE) == 0) return 1; /* Sanity check */ if (s->s3->peer_tmp != NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } if (!PACKET_as_length_prefixed_2(pkt, &key_share_list)) { SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_LENGTH_MISMATCH); return 0; } /* Get our list of supported groups */ tls1_get_supported_groups(s, &srvrgroups, &srvr_num_groups); /* Get the clients list of supported groups. */ tls1_get_peer_groups(s, &clntgroups, &clnt_num_groups); if (clnt_num_groups == 0) { /* * This can only happen if the supported_groups extension was not sent, * because we verify that the length is non-zero when we process that * extension. */ SSLfatal(s, SSL_AD_MISSING_EXTENSION, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_MISSING_SUPPORTED_GROUPS_EXTENSION); return 0; } while (PACKET_remaining(&key_share_list) > 0) { if (!PACKET_get_net_2(&key_share_list, &group_id) || !PACKET_get_length_prefixed_2(&key_share_list, &encoded_pt) || PACKET_remaining(&encoded_pt) == 0) { SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_LENGTH_MISMATCH); return 0; } /* * If we already found a suitable key_share we loop through the * rest to verify the structure, but don't process them. */ if (found) continue; /* Check if this share is in supported_groups sent from client */ if (!check_in_list(s, group_id, clntgroups, clnt_num_groups, 0)) { SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_BAD_KEY_SHARE); return 0; } /* Check if this share is for a group we can use */ if (!check_in_list(s, group_id, srvrgroups, srvr_num_groups, 1)) { /* Share not suitable */ continue; } if ((s->s3->peer_tmp = ssl_generate_param_group(group_id)) == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS); return 0; } s->s3->group_id = group_id; if (!EVP_PKEY_set1_tls_encodedpoint(s->s3->peer_tmp, PACKET_data(&encoded_pt), PACKET_remaining(&encoded_pt))) { SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_BAD_ECPOINT); return 0; } found = 1; } #endif return 1; }
/* * Process a key_share extension received in the ClientHello. |pkt| contains * the raw PACKET data for the extension. Returns 1 on success or 0 on failure. * If a failure occurs then |*al| is set to an appropriate alert value. */ int tls_parse_ctos_key_share(SSL *s, PACKET *pkt, int *al) { unsigned int group_id; PACKET key_share_list, encoded_pt; const unsigned char *clntcurves, *srvrcurves; size_t clnt_num_curves, srvr_num_curves; int group_nid, found = 0; unsigned int curve_flags; if (s->hit) return 1; /* Sanity check */ if (s->s3->peer_tmp != NULL) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } if (!PACKET_as_length_prefixed_2(pkt, &key_share_list)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_LENGTH_MISMATCH); return 0; } /* Get our list of supported curves */ if (!tls1_get_curvelist(s, 0, &srvrcurves, &srvr_num_curves)) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } /* * Get the clients list of supported curves. * TODO(TLS1.3): We should validate that we actually received * supported_groups! */ if (!tls1_get_curvelist(s, 1, &clntcurves, &clnt_num_curves)) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_INTERNAL_ERROR); return 0; } while (PACKET_remaining(&key_share_list) > 0) { if (!PACKET_get_net_2(&key_share_list, &group_id) || !PACKET_get_length_prefixed_2(&key_share_list, &encoded_pt) || PACKET_remaining(&encoded_pt) == 0) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_LENGTH_MISMATCH); return 0; } /* * If we already found a suitable key_share we loop through the * rest to verify the structure, but don't process them. */ if (found) continue; /* Check if this share is in supported_groups sent from client */ if (!check_in_list(s, group_id, clntcurves, clnt_num_curves, 0)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_BAD_KEY_SHARE); return 0; } /* Check if this share is for a group we can use */ if (!check_in_list(s, group_id, srvrcurves, srvr_num_curves, 1)) { /* Share not suitable */ continue; } group_nid = tls1_ec_curve_id2nid(group_id, &curve_flags); if (group_nid == 0) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS); return 0; } if ((curve_flags & TLS_CURVE_TYPE) == TLS_CURVE_CUSTOM) { /* Can happen for some curves, e.g. X25519 */ EVP_PKEY *key = EVP_PKEY_new(); if (key == NULL || !EVP_PKEY_set_type(key, group_nid)) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_EVP_LIB); EVP_PKEY_free(key); return 0; } s->s3->peer_tmp = key; } else { /* Set up EVP_PKEY with named curve as parameters */ EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL); if (pctx == NULL || EVP_PKEY_paramgen_init(pctx) <= 0 || EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, group_nid) <= 0 || EVP_PKEY_paramgen(pctx, &s->s3->peer_tmp) <= 0) { *al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, ERR_R_EVP_LIB); EVP_PKEY_CTX_free(pctx); return 0; } EVP_PKEY_CTX_free(pctx); pctx = NULL; } s->s3->group_id = group_id; if (!EVP_PKEY_set1_tls_encodedpoint(s->s3->peer_tmp, PACKET_data(&encoded_pt), PACKET_remaining(&encoded_pt))) { *al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_TLS_PARSE_CTOS_KEY_SHARE, SSL_R_BAD_ECPOINT); return 0; } found = 1; } return 1; }
/* * osprd_ioctl(inode, filp, cmd, arg) * Called to perform an ioctl on the named file. */ int osprd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { osprd_info_t *d = file2osprd(filp); // device info pid_t pid; node_t check; int wait, i; unsigned ticket; // is file open for writing? int filp_writable = (filp->f_mode & FMODE_WRITE) != 0; if (cmd == OSPRDIOCACQUIRE) { // The process id trying to get this lock is the unique identifier // in our list pid = current->pid; // Check if this pid has already tried to get a lock on this device check = check_in_list(d->lock_list, pid); if (check) { return -EDEADLK; } /* This whole section is to prevent deadlock. The global lock is necessary to avoid raceconditions, like processes getting inserted into the list after we've already checked it. This is a huge unfortunate bottleneck. I does prevent deadlock, however. */ osp_spin_lock(&g_mutex); for (i = 0; i < NOSPRD; i++) { if(d == &osprds[i]) { //Checks if this our own device continue; } else { node_t traversal = d->lock_list; //Traversal set to beginning of our lock linked list node_t start; // The starting position of our pid in the other list check = NULL; start = check_in_list(osprds[i].lock_list, pid); // We only care if our is also has another lock in this node if(start == NULL) continue; while(traversal != NULL) { // Only care about stuff after our pid check = check_in_list(start, traversal->pid); // We must make sure that our pid does not hold // a lock that would block anything in another queue if(check != NULL) { osp_spin_unlock(&g_mutex); return -EDEADLK; } traversal = traversal->next; } } } // It doesn't cause deadlock, so me must insert it into the list so // that others can check if this process will cause deadlock insert_node (&d->lock_list, pid, d->read_locks, d->write_locks); osp_spin_unlock(&g_mutex); // EXERCISE: Lock the ramdisk. // // If *filp is open for writing (filp_writable), then attempt // to write-lock the ramdisk; otherwise attempt to read-lock // the ramdisk. // // This lock request must block using 'd->blockq' until: // 1) no other process holds a write lock; // 2) either the request is for a read lock, or no other process // holds a read lock; and // 3) lock requests should be serviced in order, so no process // that blocked earlier is still blocked waiting for the // lock. // // If a process acquires a lock, mark this fact by setting // 'filp->f_flags |= F_OSPRD_LOCKED'. You also need to // keep track of how many read and write locks are held: // change the 'osprd_info_t' structure to do this. // // Also wake up processes waiting on 'd->blockq' as needed. // // If the lock request would cause a deadlock, return -EDEADLK. // If the lock request blocks and is awoken by a signal, then // return -ERESTARTSYS. // Otherwise, if we can grant the lock request, return 0. // 'd->ticket_head' and 'd->ticket_tail' should help you // service lock requests in order. These implement a ticket // order: 'ticket_tail' is the next ticket, and 'ticket_head' // is the ticket currently being served. You should set a local // variable to 'd->ticket_head' and increment 'd->ticket_head'. // Then, block at least until 'd->ticket_tail == local_ticket'. // (Some of these operations are in a critical section and must // be protected by a spinlock; which ones?) if (filp_writable) { // Get the next ticket osp_spin_lock(&d->mutex); ticket = d->ticket_tail; d->ticket_tail++; osp_spin_unlock(&d->mutex); // Wait for the ticket to become the current job wait = wait_event_interruptible( d->blockq, (ticket == d->ticket_head && d->read_locks == 0 && d->write_locks == 0) ); // Interupt handling if (wait != 0) { // Wait until we have the ticket // If we increment too early, another process could wait // forever while(ticket != d->ticket_head) { yield(); continue; } osp_spin_lock(&d->mutex); d->ticket_head++; osp_spin_unlock(&d->mutex); return wait; } // Give the lock for this job osp_spin_lock(&d->mutex); d->write_locks++; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); } else { // Read only // Get the next ticket osp_spin_lock(&d->mutex); ticket = d->ticket_tail; d->ticket_tail++; osp_spin_unlock(&d->mutex); // Wait for the ticket to become the current job wait = wait_event_interruptible( d->blockq, // We don't need to check for other read tasks, take note (ticket == d->ticket_head && d->write_locks == 0) ); // Interrupt handling if (wait != 0) { // Wait until we have the ticket // If we increment too early, another process could wait // forever while(ticket != d->ticket_head) { yield(); continue; } osp_spin_lock(&d->mutex); d->ticket_head++; osp_spin_unlock(&d->mutex); wake_up_all(&d->blockq); return wait; } // Give the lock to the current job osp_spin_lock(&d->mutex); d->read_locks++; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); // Since the next task might be a read task, we wake everyone wake_up_all(&d->blockq); } return 0; /* */ } else if (cmd == OSPRDIOCTRYACQUIRE) { // EXERCISE: ATTEMPT to lock the ramdisk. // // This is just like OSPRDIOCACQUIRE, except it should never // block. If OSPRDIOCACQUIRE would block or return deadlock, // OSPRDIOCTRYACQUIRE should return -EBUSY. // Otherwise, if we can grant the lock request, return 0. if (filp_writable) { // Get the next ticket osp_spin_lock(&d->mutex); ticket = d->ticket_tail; d->ticket_tail++; osp_spin_unlock(&d->mutex); // Check if we can get the ticket if (ticket == d->ticket_head && d->read_locks == 0 && d->write_locks == 0) { osp_spin_lock(&d->mutex); d->write_locks++; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); } else { // Clean up the tickets, make sure they are back to // where they should be osp_spin_lock(&d->mutex); d->ticket_head++; osp_spin_unlock(&d->mutex); return -EBUSY; } } else { // Read only // Get the next ticket osp_spin_lock(&d->mutex); ticket = d->ticket_tail; d->ticket_tail++; osp_spin_unlock(&d->mutex); // Check if we can get the ticket if (ticket == d->ticket_head && d->write_locks == 0) { osp_spin_lock(&d->mutex); d->read_locks++; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); // Wake up everyone for the same reasons wake_up_all(&d->blockq); } else { // Clean up the tickets, make sure they are back to // where they should be osp_spin_lock(&d->mutex); d->ticket_head++; osp_spin_unlock(&d->mutex); return -EBUSY; } } // Success return 0; } else if (cmd == OSPRDIOCRELEASE) { // EXERCISE: Unlock the ramdisk. // // If the file hasn't locked the ramdisk, return -EINVAL. // Otherwise, clear the lock from filp->f_flags, wake up // the wait queue, perform any additional accounting steps // you need, and return 0. // Your code here (instead of the next line). // Check to make sure has lock if (!(filp->f_flags & F_OSPRD_LOCKED)) { return -EINVAL; } // Clear the locked flag filp->f_flags &= ~(F_OSPRD_LOCKED); // Increment the current job and increment the number of finished // writing/reading tasks if(filp_writable) { osp_spin_lock(&d->mutex); d->ticket_head++; d->write_locks--; osp_spin_unlock(&d->mutex); } else { osp_spin_lock(&d->mutex); d->ticket_head++; d->read_locks--; osp_spin_unlock(&d->mutex); } // We're finished, notify everyone wake_up_all(&d->blockq); return 0; } else return -ENOTTY; /* unknown command */ }