Example #1
0
ERR_STATE *ERR_get_state(void)
{
    ERR_STATE *state;
    int saveerrno = get_last_sys_error();

    if (!OPENSSL_init_crypto(OPENSSL_INIT_BASE_ONLY, NULL))
        return NULL;

    if (!RUN_ONCE(&err_init, err_do_init))
        return NULL;

    state = CRYPTO_THREAD_get_local(&err_thread_local);
    if (state == (ERR_STATE*)-1)
        return NULL;

    if (state == NULL) {
        if (!CRYPTO_THREAD_set_local(&err_thread_local, (ERR_STATE*)-1))
            return NULL;

        if ((state = OPENSSL_zalloc(sizeof(*state))) == NULL) {
            CRYPTO_THREAD_set_local(&err_thread_local, NULL);
            return NULL;
        }

        if (!ossl_init_thread_start(OPENSSL_INIT_THREAD_ERR_STATE)
                || !CRYPTO_THREAD_set_local(&err_thread_local, state)) {
            ERR_STATE_free(state);
            CRYPTO_THREAD_set_local(&err_thread_local, NULL);
            return NULL;
        }

        /* Ignore failures from these */
        OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CRYPTO_STRINGS, NULL);
    }

    set_sys_error(saveerrno);
    return state;
}
Example #2
0
int RAND_set_rand_engine(ENGINE *engine)
{
    const RAND_METHOD *tmp_meth = NULL;

    if (!RUN_ONCE(&rand_lock_init, do_rand_lock_init))
        return 0;

    if (engine) {
        if (!ENGINE_init(engine))
            return 0;
        tmp_meth = ENGINE_get_RAND(engine);
        if (tmp_meth == NULL) {
            ENGINE_finish(engine);
            return 0;
        }
    }
    CRYPTO_THREAD_write_lock(rand_engine_lock);
    /* This function releases any prior ENGINE so call it first */
    RAND_set_rand_method(tmp_meth);
    funct_ref = engine;
    CRYPTO_THREAD_unlock(rand_engine_lock);
    return 1;
}
Example #3
0
int
sysmon_init(void)
{
	int error;
#ifdef _MODULE
	devmajor_t bmajor, cmajor;
#endif

	error = RUN_ONCE(&once_sm, sm_init_once);

#ifdef _MODULE
	mutex_enter(&sysmon_minor_mtx);
	if (!sm_is_attached) {
		bmajor = cmajor = -1;
		error = devsw_attach("sysmon", NULL, &bmajor,
				&sysmon_cdevsw, &cmajor);
		sm_is_attached = (error != 0);
	}
	mutex_exit(&sysmon_minor_mtx);
#endif

	return error;
}
Example #4
0
/*-
 * BIO_lookup - look up the node and service you want to connect to.
 * @node: the node you want to connect to.
 * @service: the service you want to connect to.
 * @lookup_type: declare intent with the result, client or server.
 * @family: the address family you want to use.  Use AF_UNSPEC for any, or
 *  AF_INET, AF_INET6 or AF_UNIX.
 * @socktype: The socket type you want to use.  Can be SOCK_STREAM, SOCK_DGRAM
 *  or 0 for all.
 * @res: Storage place for the resulting list of returned addresses
 *
 * This will do a lookup of the node and service that you want to connect to.
 * It returns a linked list of different addresses you can try to connect to.
 *
 * When no longer needed you should call BIO_ADDRINFO_free() to free the result.
 *
 * The return value is 1 on success or 0 in case of error.
 */
int BIO_lookup(const char *host, const char *service,
               enum BIO_lookup_type lookup_type,
               int family, int socktype, BIO_ADDRINFO **res)
{
    int ret = 0;                 /* Assume failure */

    switch(family) {
    case AF_INET:
#ifdef AF_INET6
    case AF_INET6:
#endif
#ifdef AF_UNIX
    case AF_UNIX:
#endif
#ifdef AF_UNSPEC
    case AF_UNSPEC:
#endif
        break;
    default:
        BIOerr(BIO_F_BIO_LOOKUP, BIO_R_UNSUPPORTED_PROTOCOL_FAMILY);
        return 0;
    }

#ifdef AF_UNIX
    if (family == AF_UNIX) {
        if (addrinfo_wrap(family, socktype, host, strlen(host), 0, res))
            return 1;
        else
            BIOerr(BIO_F_BIO_LOOKUP, ERR_R_MALLOC_FAILURE);
        return 0;
    }
#endif

    if (BIO_sock_init() != 1)
        return 0;

    if (1) {
        int gai_ret = 0;
#ifdef AI_PASSIVE
        struct addrinfo hints;
        memset(&hints, 0, sizeof hints);

        hints.ai_family = family;
        hints.ai_socktype = socktype;

        if (lookup_type == BIO_LOOKUP_SERVER)
            hints.ai_flags |= AI_PASSIVE;

        /* Note that |res| SHOULD be a 'struct addrinfo **' thanks to
         * macro magic in bio_lcl.h
         */
        switch ((gai_ret = getaddrinfo(host, service, &hints, res))) {
# ifdef EAI_SYSTEM
        case EAI_SYSTEM:
            SYSerr(SYS_F_GETADDRINFO, get_last_socket_error());
            BIOerr(BIO_F_BIO_LOOKUP, ERR_R_SYS_LIB);
            break;
# endif
        case 0:
            ret = 1;             /* Success */
            break;
        default:
            BIOerr(BIO_F_BIO_LOOKUP, ERR_R_SYS_LIB);
            ERR_add_error_data(1, gai_strerror(gai_ret));
            break;
        }
    } else {
#endif
        const struct hostent *he;
        /*
         * Because struct hostent is defined for 32-bit pointers only with
         * VMS C, we need to make sure that '&he_fallback_address' and
         * '&he_fallback_addresses' are 32-bit pointers
         */
#if defined(OPENSSL_SYS_VMS) && defined(__DECC)
# pragma pointer_size save
# pragma pointer_size 32
#endif
        /* Windows doesn't seem to have in_addr_t */
#ifdef OPENSSL_SYS_WINDOWS
        static uint32_t he_fallback_address;
        static const char *he_fallback_addresses[] =
        { (char *)&he_fallback_address, NULL };
#else
        static in_addr_t he_fallback_address;
        static const char *he_fallback_addresses[] =
        { (char *)&he_fallback_address, NULL };
#endif
        static const struct hostent he_fallback =
        {   NULL, NULL, AF_INET, sizeof(he_fallback_address),
            (char **)&he_fallback_addresses
        };
#if defined(OPENSSL_SYS_VMS) && defined(__DECC)
# pragma pointer_size restore
#endif

        struct servent *se;
        /* Apparently, on WIN64, s_proto and s_port have traded places... */
#ifdef _WIN64
        struct servent se_fallback = { NULL, NULL, NULL, 0 };
#else
        struct servent se_fallback = { NULL, NULL, 0, NULL };
#endif

        if (!RUN_ONCE(&bio_lookup_init, do_bio_lookup_init)) {
            BIOerr(BIO_F_BIO_LOOKUP, ERR_R_MALLOC_FAILURE);
            ret = 0;
            goto err;
        }

        CRYPTO_THREAD_write_lock(bio_lookup_lock);
        he_fallback_address = INADDR_ANY;
        if (host == NULL) {
            he = &he_fallback;
            switch(lookup_type) {
            case BIO_LOOKUP_CLIENT:
                he_fallback_address = INADDR_LOOPBACK;
                break;
            case BIO_LOOKUP_SERVER:
                he_fallback_address = INADDR_ANY;
                break;
            default:
                OPENSSL_assert(("We forgot to handle a lookup type!" == 0));
                break;
            }
        } else {
            he = gethostbyname(host);

            if (he == NULL) {
#ifndef OPENSSL_SYS_WINDOWS
                BIOerr(BIO_F_BIO_LOOKUP, ERR_R_SYS_LIB);
                ERR_add_error_data(1, hstrerror(h_errno));
#else
                SYSerr(SYS_F_GETHOSTBYNAME, WSAGetLastError());
#endif
                ret = 0;
                goto err;
            }
        }

        if (service == NULL) {
            se_fallback.s_port = 0;
            se_fallback.s_proto = NULL;
            se = &se_fallback;
        } else {
            char *endp = NULL;
            long portnum = strtol(service, &endp, 10);

            /*
             * Because struct servent is defined for 32-bit pointers only with
             * VMS C, we need to make sure that 'proto' is a 32-bit pointer.
             */
#if defined(OPENSSL_SYS_VMS) && defined(__DECC)
# pragma pointer_size save
# pragma pointer_size 32
#endif
            char *proto = NULL;
#if defined(OPENSSL_SYS_VMS) && defined(__DECC)
# pragma pointer_size restore
#endif

            switch (socktype) {
            case SOCK_STREAM:
                proto = "tcp";
                break;
            case SOCK_DGRAM:
                proto = "udp";
                break;
            }

            if (endp != service && *endp == '\0'
                    && portnum > 0 && portnum < 65536) {
                se_fallback.s_port = htons(portnum);
                se_fallback.s_proto = proto;
                se = &se_fallback;
            } else if (endp == service) {
                se = getservbyname(service, proto);

                if (se == NULL) {
#ifndef OPENSSL_SYS_WINDOWS
                    BIOerr(BIO_F_BIO_LOOKUP, ERR_R_SYS_LIB);
                    ERR_add_error_data(1, hstrerror(h_errno));
#else
                    SYSerr(SYS_F_GETSERVBYNAME, WSAGetLastError());
#endif
                    goto err;
                }
            } else {
                BIOerr(BIO_F_BIO_LOOKUP, BIO_R_MALFORMED_HOST_OR_SERVICE);
                goto err;
            }
        }

        *res = NULL;

        {
            /*
             * Because hostent::h_addr_list is an array of 32-bit pointers with VMS C,
             * we must make sure our iterator designates the same element type, hence
             * the pointer size dance.
             */
#if defined(OPENSSL_SYS_VMS) && defined(__DECC)
# pragma pointer_size save
# pragma pointer_size 32
#endif
            char **addrlistp;
#if defined(OPENSSL_SYS_VMS) && defined(__DECC)
# pragma pointer_size restore
#endif
            size_t addresses;
            BIO_ADDRINFO *tmp_bai = NULL;

            /* The easiest way to create a linked list from an
               array is to start from the back */
            for(addrlistp = he->h_addr_list; *addrlistp != NULL;
                    addrlistp++)
                ;

            for(addresses = addrlistp - he->h_addr_list;
                    addrlistp--, addresses-- > 0; ) {
                if (!addrinfo_wrap(he->h_addrtype, socktype,
                                   *addrlistp, he->h_length,
                                   se->s_port, &tmp_bai))
                    goto addrinfo_malloc_err;
                tmp_bai->bai_next = *res;
                *res = tmp_bai;
                continue;
addrinfo_malloc_err:
                BIO_ADDRINFO_free(*res);
                *res = NULL;
                BIOerr(BIO_F_BIO_LOOKUP, ERR_R_MALLOC_FAILURE);
                ret = 0;
                goto err;
            }

            ret = 1;
        }
err:
        CRYPTO_THREAD_unlock(bio_lookup_lock);
    }

    return ret;
}
Example #5
0
/*
 * If this function is called with a non NULL settings value then it must be
 * called prior to any threads making calls to any OpenSSL functions,
 * i.e. passing a non-null settings value is assumed to be single-threaded.
 */
int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings)
{
    static int stoperrset = 0;

    if (stopped) {
        if (!stoperrset) {
            /*
             * We only ever set this once to avoid getting into an infinite
             * loop where the error system keeps trying to init and fails so
             * sets an error etc
             */
            stoperrset = 1;
            CRYPTOerr(CRYPTO_F_OPENSSL_INIT_CRYPTO, ERR_R_INIT_FAIL);
        }
        return 0;
    }

    if (!base_inited && !RUN_ONCE(&base, ossl_init_base))
        return 0;

    if ((opts & OPENSSL_INIT_NO_LOAD_CRYPTO_STRINGS)
            && !RUN_ONCE(&load_crypto_strings,
                         ossl_init_no_load_crypto_strings))
        return 0;

    if ((opts & OPENSSL_INIT_LOAD_CRYPTO_STRINGS)
            && !RUN_ONCE(&load_crypto_strings, ossl_init_load_crypto_strings))
        return 0;

    if ((opts & OPENSSL_INIT_NO_ADD_ALL_CIPHERS)
            && !RUN_ONCE(&add_all_ciphers, ossl_init_no_add_algs))
        return 0;

    if ((opts & OPENSSL_INIT_ADD_ALL_CIPHERS)
            && !RUN_ONCE(&add_all_ciphers, ossl_init_add_all_ciphers))
        return 0;

    if ((opts & OPENSSL_INIT_NO_ADD_ALL_DIGESTS)
            && !RUN_ONCE(&add_all_digests, ossl_init_no_add_algs))
        return 0;

    if ((opts & OPENSSL_INIT_ADD_ALL_DIGESTS)
            && !RUN_ONCE(&add_all_digests, ossl_init_add_all_digests))
        return 0;

    if ((opts & OPENSSL_INIT_NO_LOAD_CONFIG)
            && !RUN_ONCE(&config, ossl_init_no_config))
        return 0;

    if (opts & OPENSSL_INIT_LOAD_CONFIG) {
        int ret;
        CRYPTO_THREAD_write_lock(init_lock);
        appname = (settings == NULL) ? NULL : settings->appname;
        ret = RUN_ONCE(&config, ossl_init_config);
        CRYPTO_THREAD_unlock(init_lock);
        if (!ret)
            return 0;
    }

    if ((opts & OPENSSL_INIT_ASYNC)
            && !RUN_ONCE(&async, ossl_init_async))
        return 0;

#ifndef OPENSSL_NO_ENGINE
    if ((opts & OPENSSL_INIT_ENGINE_OPENSSL)
            && !RUN_ONCE(&engine_openssl, ossl_init_engine_openssl))
        return 0;
# if !defined(OPENSSL_NO_HW) && \
    (defined(__OpenBSD__) || defined(__FreeBSD__) || defined(HAVE_CRYPTODEV))
    if ((opts & OPENSSL_INIT_ENGINE_CRYPTODEV)
            && !RUN_ONCE(&engine_cryptodev, ossl_init_engine_cryptodev))
        return 0;
# endif
# ifndef OPENSSL_NO_RDRAND
    if ((opts & OPENSSL_INIT_ENGINE_RDRAND)
            && !RUN_ONCE(&engine_rdrand, ossl_init_engine_rdrand))
        return 0;
# endif
    if ((opts & OPENSSL_INIT_ENGINE_DYNAMIC)
            && !RUN_ONCE(&engine_dynamic, ossl_init_engine_dynamic))
        return 0;
# ifndef OPENSSL_NO_STATIC_ENGINE
#  if !defined(OPENSSL_NO_HW) && !defined(OPENSSL_NO_HW_PADLOCK)
    if ((opts & OPENSSL_INIT_ENGINE_PADLOCK)
            && !RUN_ONCE(&engine_padlock, ossl_init_engine_padlock))
        return 0;
#  endif
#  if defined(OPENSSL_SYS_WIN32) && !defined(OPENSSL_NO_CAPIENG)
    if ((opts & OPENSSL_INIT_ENGINE_CAPI)
            && !RUN_ONCE(&engine_capi, ossl_init_engine_capi))
        return 0;
#  endif
#  if !defined(OPENSSL_NO_AFALGENG)
    if ((opts & OPENSSL_INIT_ENGINE_AFALG)
            && !RUN_ONCE(&engine_afalg, ossl_init_engine_afalg))
        return 0;
#  endif
# endif
    if (opts & (OPENSSL_INIT_ENGINE_ALL_BUILTIN
                | OPENSSL_INIT_ENGINE_OPENSSL
                | OPENSSL_INIT_ENGINE_AFALG)) {
        ENGINE_register_all_complete();
    }
#endif

#ifndef OPENSSL_NO_COMP
    if ((opts & OPENSSL_INIT_ZLIB)
            && !RUN_ONCE(&zlib, ossl_init_zlib))
        return 0;
#endif

    return 1;
}
Example #6
0
/*
 * If this function is called with a non NULL settings value then it must be
 * called prior to any threads making calls to any OpenSSL functions,
 * i.e. passing a non-null settings value is assumed to be single-threaded.
 */
int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings)
{
    if (stopped) {
        if (!(opts & OPENSSL_INIT_BASE_ONLY))
            CRYPTOerr(CRYPTO_F_OPENSSL_INIT_CRYPTO, ERR_R_INIT_FAIL);
        return 0;
    }

    /*
     * When the caller specifies OPENSSL_INIT_BASE_ONLY, that should be the
     * *only* option specified.  With that option we return immediately after
     * doing the requested limited initialization.  Note that
     * err_shelve_state() called by us via ossl_init_load_crypto_nodelete()
     * re-enters OPENSSL_init_crypto() with OPENSSL_INIT_BASE_ONLY, but with
     * base already initialized this is a harmless NOOP.
     *
     * If we remain the only caller of err_shelve_state() the recursion should
     * perhaps be removed, but if in doubt, it can be left in place.
     */
    if (!RUN_ONCE(&base, ossl_init_base))
        return 0;

    if (opts & OPENSSL_INIT_BASE_ONLY)
        return 1;

    /*
     * Now we don't always set up exit handlers, the INIT_BASE_ONLY calls
     * should not have the side-effect of setting up exit handlers, and
     * therefore, this code block is below the INIT_BASE_ONLY-conditioned early
     * return above.
     */
    if ((opts & OPENSSL_INIT_NO_ATEXIT) != 0) {
        if (!RUN_ONCE_ALT(&register_atexit, ossl_init_no_register_atexit,
                          ossl_init_register_atexit))
            return 0;
    } else if (!RUN_ONCE(&register_atexit, ossl_init_register_atexit)) {
        return 0;
    }

    if (!RUN_ONCE(&load_crypto_nodelete, ossl_init_load_crypto_nodelete))
        return 0;

    if ((opts & OPENSSL_INIT_NO_LOAD_CRYPTO_STRINGS)
            && !RUN_ONCE_ALT(&load_crypto_strings,
                             ossl_init_no_load_crypto_strings,
                             ossl_init_load_crypto_strings))
        return 0;

    if ((opts & OPENSSL_INIT_LOAD_CRYPTO_STRINGS)
            && !RUN_ONCE(&load_crypto_strings, ossl_init_load_crypto_strings))
        return 0;

    if ((opts & OPENSSL_INIT_NO_ADD_ALL_CIPHERS)
            && !RUN_ONCE_ALT(&add_all_ciphers, ossl_init_no_add_all_ciphers,
                             ossl_init_add_all_ciphers))
        return 0;

    if ((opts & OPENSSL_INIT_ADD_ALL_CIPHERS)
            && !RUN_ONCE(&add_all_ciphers, ossl_init_add_all_ciphers))
        return 0;

    if ((opts & OPENSSL_INIT_NO_ADD_ALL_DIGESTS)
            && !RUN_ONCE_ALT(&add_all_digests, ossl_init_no_add_all_digests,
                             ossl_init_add_all_digests))
        return 0;

    if ((opts & OPENSSL_INIT_ADD_ALL_DIGESTS)
            && !RUN_ONCE(&add_all_digests, ossl_init_add_all_digests))
        return 0;

    if ((opts & OPENSSL_INIT_NO_ADD_ALL_MACS)
            && !RUN_ONCE_ALT(&add_all_macs, ossl_init_no_add_all_macs,
                             ossl_init_add_all_macs))
        return 0;

    if ((opts & OPENSSL_INIT_ADD_ALL_MACS)
            && !RUN_ONCE(&add_all_macs, ossl_init_add_all_macs))
        return 0;

    if ((opts & OPENSSL_INIT_ATFORK)
            && !openssl_init_fork_handlers())
        return 0;

    if ((opts & OPENSSL_INIT_NO_LOAD_CONFIG)
            && !RUN_ONCE_ALT(&config, ossl_init_no_config, ossl_init_config))
        return 0;

    if (opts & OPENSSL_INIT_LOAD_CONFIG) {
        int ret;
        CRYPTO_THREAD_write_lock(init_lock);
        conf_settings = settings;
        ret = RUN_ONCE(&config, ossl_init_config);
        conf_settings = NULL;
        CRYPTO_THREAD_unlock(init_lock);
        if (!ret)
            return 0;
    }

    if ((opts & OPENSSL_INIT_ASYNC)
            && !RUN_ONCE(&async, ossl_init_async))
        return 0;

#ifndef OPENSSL_NO_ENGINE
    if ((opts & OPENSSL_INIT_ENGINE_OPENSSL)
            && !RUN_ONCE(&engine_openssl, ossl_init_engine_openssl))
        return 0;
# ifndef OPENSSL_NO_RDRAND
    if ((opts & OPENSSL_INIT_ENGINE_RDRAND)
            && !RUN_ONCE(&engine_rdrand, ossl_init_engine_rdrand))
        return 0;
# endif
    if ((opts & OPENSSL_INIT_ENGINE_DYNAMIC)
            && !RUN_ONCE(&engine_dynamic, ossl_init_engine_dynamic))
        return 0;
# ifndef OPENSSL_NO_STATIC_ENGINE
#  ifndef OPENSSL_NO_DEVCRYPTOENG
    if ((opts & OPENSSL_INIT_ENGINE_CRYPTODEV)
            && !RUN_ONCE(&engine_devcrypto, ossl_init_engine_devcrypto))
        return 0;
#  endif
#  if !defined(OPENSSL_NO_PADLOCKENG)
    if ((opts & OPENSSL_INIT_ENGINE_PADLOCK)
            && !RUN_ONCE(&engine_padlock, ossl_init_engine_padlock))
        return 0;
#  endif
#  if defined(OPENSSL_SYS_WIN32) && !defined(OPENSSL_NO_CAPIENG)
    if ((opts & OPENSSL_INIT_ENGINE_CAPI)
            && !RUN_ONCE(&engine_capi, ossl_init_engine_capi))
        return 0;
#  endif
#  if !defined(OPENSSL_NO_AFALGENG)
    if ((opts & OPENSSL_INIT_ENGINE_AFALG)
            && !RUN_ONCE(&engine_afalg, ossl_init_engine_afalg))
        return 0;
#  endif
# endif
    if (opts & (OPENSSL_INIT_ENGINE_ALL_BUILTIN
                | OPENSSL_INIT_ENGINE_OPENSSL
                | OPENSSL_INIT_ENGINE_AFALG)) {
        ENGINE_register_all_complete();
    }
#endif

#ifndef OPENSSL_NO_COMP
    if ((opts & OPENSSL_INIT_ZLIB)
            && !RUN_ONCE(&zlib, ossl_init_zlib))
        return 0;
#endif

    return 1;
}
Example #7
0
static int rand_bytes(unsigned char *buf, int num, int pseudo)
{
    static volatile int stirred_pool = 0;
    int i, j, k;
    size_t num_ceil, st_idx, st_num;
    int ok;
    long md_c[2];
    unsigned char local_md[MD_DIGEST_LENGTH];
    EVP_MD_CTX *m;
#ifndef GETPID_IS_MEANINGLESS
    pid_t curr_pid = getpid();
#endif
    time_t curr_time = time(NULL);
    int do_stir_pool = 0;
    /* time value for various platforms */
#ifdef OPENSSL_SYS_WIN32
    FILETIME tv;
# ifdef _WIN32_WCE
    SYSTEMTIME t;
    GetSystemTime(&t);
    SystemTimeToFileTime(&t, &tv);
# else
    GetSystemTimeAsFileTime(&tv);
# endif
#elif defined(OPENSSL_SYS_VXWORKS)
    struct timespec tv;
    clock_gettime(CLOCK_REALTIME, &ts);
#elif defined(OPENSSL_SYS_DSPBIOS)
    unsigned long long tv, OPENSSL_rdtsc();
    tv = OPENSSL_rdtsc();
#else
    struct timeval tv;
    gettimeofday(&tv, NULL);
#endif

#ifdef PREDICT
    if (rand_predictable) {
        static unsigned char val = 0;

        for (i = 0; i < num; i++)
            buf[i] = val++;
        return (1);
    }
#endif

    if (num <= 0)
        return 1;

    m = EVP_MD_CTX_new();
    if (m == NULL)
        goto err_mem;

    /* round upwards to multiple of MD_DIGEST_LENGTH/2 */
    num_ceil =
        (1 + (num - 1) / (MD_DIGEST_LENGTH / 2)) * (MD_DIGEST_LENGTH / 2);

    /*
     * (Based on the rand(3) manpage:)
     *
     * For each group of 10 bytes (or less), we do the following:
     *
     * Input into the hash function the local 'md' (which is initialized from
     * the global 'md' before any bytes are generated), the bytes that are to
     * be overwritten by the random bytes, and bytes from the 'state'
     * (incrementing looping index). From this digest output (which is kept
     * in 'md'), the top (up to) 10 bytes are returned to the caller and the
     * bottom 10 bytes are xored into the 'state'.
     *
     * Finally, after we have finished 'num' random bytes for the
     * caller, 'count' (which is incremented) and the local and global 'md'
     * are fed into the hash function and the results are kept in the
     * global 'md'.
     */

    if (!RUN_ONCE(&rand_lock_init, do_rand_lock_init))
        goto err_mem;

    CRYPTO_THREAD_write_lock(rand_lock);
    /*
     * We could end up in an async engine while holding this lock so ensure
     * we don't pause and cause a deadlock
     */
    ASYNC_block_pause();

    /* prevent rand_bytes() from trying to obtain the lock again */
    CRYPTO_THREAD_write_lock(rand_tmp_lock);
    locking_threadid = CRYPTO_THREAD_get_current_id();
    CRYPTO_THREAD_unlock(rand_tmp_lock);
    crypto_lock_rand = 1;

    if (!initialized) {
        RAND_poll();
        initialized = 1;
    }

    if (!stirred_pool)
        do_stir_pool = 1;

    ok = (entropy >= ENTROPY_NEEDED);
    if (!ok) {
        /*
         * If the PRNG state is not yet unpredictable, then seeing the PRNG
         * output may help attackers to determine the new state; thus we have
         * to decrease the entropy estimate. Once we've had enough initial
         * seeding we don't bother to adjust the entropy count, though,
         * because we're not ambitious to provide *information-theoretic*
         * randomness. NOTE: This approach fails if the program forks before
         * we have enough entropy. Entropy should be collected in a separate
         * input pool and be transferred to the output pool only when the
         * entropy limit has been reached.
         */
        entropy -= num;
        if (entropy < 0)
            entropy = 0;
    }

    if (do_stir_pool) {
        /*
         * In the output function only half of 'md' remains secret, so we
         * better make sure that the required entropy gets 'evenly
         * distributed' through 'state', our randomness pool. The input
         * function (rand_add) chains all of 'md', which makes it more
         * suitable for this purpose.
         */

        int n = STATE_SIZE;     /* so that the complete pool gets accessed */
        while (n > 0) {
#if MD_DIGEST_LENGTH > 20
# error "Please adjust DUMMY_SEED."
#endif
#define DUMMY_SEED "...................." /* at least MD_DIGEST_LENGTH */
            /*
             * Note that the seed does not matter, it's just that
             * rand_add expects to have something to hash.
             */
            rand_add(DUMMY_SEED, MD_DIGEST_LENGTH, 0.0);
            n -= MD_DIGEST_LENGTH;
        }
        if (ok)
            stirred_pool = 1;
    }

    st_idx = state_index;
    st_num = state_num;
    md_c[0] = md_count[0];
    md_c[1] = md_count[1];
    memcpy(local_md, md, sizeof md);

    state_index += num_ceil;
    if (state_index > state_num)
        state_index %= state_num;

    /*
     * state[st_idx], ..., state[(st_idx + num_ceil - 1) % st_num] are now
     * ours (but other threads may use them too)
     */

    md_count[0] += 1;

    /* before unlocking, we must clear 'crypto_lock_rand' */
    crypto_lock_rand = 0;
    ASYNC_unblock_pause();
    CRYPTO_THREAD_unlock(rand_lock);

    while (num > 0) {
        /* num_ceil -= MD_DIGEST_LENGTH/2 */
        j = (num >= MD_DIGEST_LENGTH / 2) ? MD_DIGEST_LENGTH / 2 : num;
        num -= j;
        if (!MD_Init(m))
            goto err;
#ifndef GETPID_IS_MEANINGLESS
        if (curr_pid) {         /* just in the first iteration to save time */
            if (!MD_Update(m, (unsigned char *)&curr_pid, sizeof curr_pid))
                goto err;
            curr_pid = 0;
        }
#endif
        if (curr_time) {        /* just in the first iteration to save time */
            if (!MD_Update(m, (unsigned char *)&curr_time, sizeof curr_time))
                goto err;
            if (!MD_Update(m, (unsigned char *)&tv, sizeof tv))
                goto err;
            curr_time = 0;
            if (!rand_hw_seed(m))
                goto err;
        }
        if (!MD_Update(m, local_md, MD_DIGEST_LENGTH))
            goto err;
        if (!MD_Update(m, (unsigned char *)&(md_c[0]), sizeof(md_c)))
            goto err;

        k = (st_idx + MD_DIGEST_LENGTH / 2) - st_num;
        if (k > 0) {
            if (!MD_Update(m, &(state[st_idx]), MD_DIGEST_LENGTH / 2 - k))
                goto err;
            if (!MD_Update(m, &(state[0]), k))
                goto err;
        } else if (!MD_Update(m, &(state[st_idx]), MD_DIGEST_LENGTH / 2))
            goto err;
        if (!MD_Final(m, local_md))
            goto err;

        for (i = 0; i < MD_DIGEST_LENGTH / 2; i++) {
            /* may compete with other threads */
            state[st_idx++] ^= local_md[i];
            if (st_idx >= st_num)
                st_idx = 0;
            if (i < j)
                *(buf++) = local_md[i + MD_DIGEST_LENGTH / 2];
        }
    }

    if (!MD_Init(m)
            || !MD_Update(m, (unsigned char *)&(md_c[0]), sizeof(md_c))
            || !MD_Update(m, local_md, MD_DIGEST_LENGTH))
        goto err;
    CRYPTO_THREAD_write_lock(rand_lock);
    /*
     * Prevent deadlocks if we end up in an async engine
     */
    ASYNC_block_pause();
    if (!MD_Update(m, md, MD_DIGEST_LENGTH) || !MD_Final(m, md)) {
        CRYPTO_THREAD_unlock(rand_lock);
        goto err;
    }
    ASYNC_unblock_pause();
    CRYPTO_THREAD_unlock(rand_lock);

    EVP_MD_CTX_free(m);
    if (ok)
        return (1);
    else if (pseudo)
        return 0;
    else {
        RANDerr(RAND_F_RAND_BYTES, RAND_R_PRNG_NOT_SEEDED);
        ERR_add_error_data(1, "You need to read the OpenSSL FAQ, "
                           "https://www.openssl.org/docs/faq.html");
        return (0);
    }
err:
    RANDerr(RAND_F_RAND_BYTES, ERR_R_EVP_LIB);
    EVP_MD_CTX_free(m);
    return 0;
err_mem:
    RANDerr(RAND_F_RAND_BYTES, ERR_R_MALLOC_FAILURE);
    EVP_MD_CTX_free(m);
    return 0;

}
Example #8
0
static int rand_add(const void *buf, int num, double add)
{
    int i, j, k, st_idx;
    long md_c[2];
    unsigned char local_md[MD_DIGEST_LENGTH];
    EVP_MD_CTX *m;
    int do_not_lock;
    int rv = 0;

    if (!num)
        return 1;

    /*
     * (Based on the rand(3) manpage)
     *
     * The input is chopped up into units of 20 bytes (or less for
     * the last block).  Each of these blocks is run through the hash
     * function as follows:  The data passed to the hash function
     * is the current 'md', the same number of bytes from the 'state'
     * (the location determined by in incremented looping index) as
     * the current 'block', the new key data 'block', and 'count'
     * (which is incremented after each use).
     * The result of this is kept in 'md' and also xored into the
     * 'state' at the same locations that were used as input into the
     * hash function.
     */

    m = EVP_MD_CTX_new();
    if (m == NULL)
        goto err;

    if (!RUN_ONCE(&rand_lock_init, do_rand_lock_init))
        goto err;

    /* check if we already have the lock */
    if (crypto_lock_rand) {
        CRYPTO_THREAD_ID cur = CRYPTO_THREAD_get_current_id();
        CRYPTO_THREAD_read_lock(rand_tmp_lock);
        do_not_lock = CRYPTO_THREAD_compare_id(locking_threadid, cur);
        CRYPTO_THREAD_unlock(rand_tmp_lock);
    } else
        do_not_lock = 0;

    if (!do_not_lock)
        CRYPTO_THREAD_write_lock(rand_lock);
    st_idx = state_index;

    /*
     * use our own copies of the counters so that even if a concurrent thread
     * seeds with exactly the same data and uses the same subarray there's
     * _some_ difference
     */
    md_c[0] = md_count[0];
    md_c[1] = md_count[1];

    memcpy(local_md, md, sizeof md);

    /* state_index <= state_num <= STATE_SIZE */
    state_index += num;
    if (state_index >= STATE_SIZE) {
        state_index %= STATE_SIZE;
        state_num = STATE_SIZE;
    } else if (state_num < STATE_SIZE) {
        if (state_index > state_num)
            state_num = state_index;
    }
    /* state_index <= state_num <= STATE_SIZE */

    /*
     * state[st_idx], ..., state[(st_idx + num - 1) % STATE_SIZE] are what we
     * will use now, but other threads may use them as well
     */

    md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0);

    if (!do_not_lock)
        CRYPTO_THREAD_unlock(rand_lock);

    for (i = 0; i < num; i += MD_DIGEST_LENGTH) {
        j = (num - i);
        j = (j > MD_DIGEST_LENGTH) ? MD_DIGEST_LENGTH : j;

        if (!MD_Init(m))
            goto err;
        if (!MD_Update(m, local_md, MD_DIGEST_LENGTH))
            goto err;
        k = (st_idx + j) - STATE_SIZE;
        if (k > 0) {
            if (!MD_Update(m, &(state[st_idx]), j - k))
                goto err;
            if (!MD_Update(m, &(state[0]), k))
                goto err;
        } else if (!MD_Update(m, &(state[st_idx]), j))
            goto err;

        /* DO NOT REMOVE THE FOLLOWING CALL TO MD_Update()! */
        if (!MD_Update(m, buf, j))
            goto err;
        /*
         * We know that line may cause programs such as purify and valgrind
         * to complain about use of uninitialized data.  The problem is not,
         * it's with the caller.  Removing that line will make sure you get
         * really bad randomness and thereby other problems such as very
         * insecure keys.
         */

        if (!MD_Update(m, (unsigned char *)&(md_c[0]), sizeof(md_c)))
            goto err;
        if (!MD_Final(m, local_md))
            goto err;
        md_c[1]++;

        buf = (const char *)buf + j;

        for (k = 0; k < j; k++) {
            /*
             * Parallel threads may interfere with this, but always each byte
             * of the new state is the XOR of some previous value of its and
             * local_md (intermediate values may be lost). Alway using locking
             * could hurt performance more than necessary given that
             * conflicts occur only when the total seeding is longer than the
             * random state.
             */
            state[st_idx++] ^= local_md[k];
            if (st_idx >= STATE_SIZE)
                st_idx = 0;
        }
    }

    if (!do_not_lock)
        CRYPTO_THREAD_write_lock(rand_lock);
    /*
     * Don't just copy back local_md into md -- this could mean that other
     * thread's seeding remains without effect (except for the incremented
     * counter).  By XORing it we keep at least as much entropy as fits into
     * md.
     */
    for (k = 0; k < (int)sizeof(md); k++) {
        md[k] ^= local_md[k];
    }
    if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */
        entropy += add;
    if (!do_not_lock)
        CRYPTO_THREAD_unlock(rand_lock);

    rv = 1;
err:
    EVP_MD_CTX_free(m);
    return rv;
}
Example #9
0
void err_free_strings_int(void)
{
    if (!RUN_ONCE(&err_string_init, do_err_strings_init))
        return;
}
Example #10
0
/*
 * Do "physical I/O" on behalf of a user.  "Physical I/O" is I/O directly
 * from the raw device to user buffers, and bypasses the buffer cache.
 *
 * Comments in brackets are from Leffler, et al.'s pseudo-code implementation.
 */
int
physio(void (*strategy)(struct buf *), struct buf *obp, dev_t dev, int flags,
    void (*min_phys)(struct buf *), struct uio *uio)
{
	struct iovec *iovp;
	struct lwp *l = curlwp;
	struct proc *p = l->l_proc;
	int i, error;
	struct buf *bp = NULL;
	struct physio_stat *ps;
	int concurrency = PHYSIO_CONCURRENCY - 1;

	error = RUN_ONCE(&physio_initialized, physio_init);
	if (__predict_false(error != 0)) {
		return error;
	}

	DPRINTF(("%s: called: off=%" PRIu64 ", resid=%zu\n",
	    __func__, uio->uio_offset, uio->uio_resid));

	flags &= B_READ | B_WRITE;

	if ((ps = kmem_zalloc(sizeof(*ps), KM_SLEEP)) == NULL)
		return ENOMEM;
	/* ps->ps_running = 0; */
	/* ps->ps_error = 0; */
	/* ps->ps_failed = 0; */
	ps->ps_orig_bp = obp;
	ps->ps_endoffset = -1;
	mutex_init(&ps->ps_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&ps->ps_cv, "physio");

	/* Make sure we have a buffer, creating one if necessary. */
	if (obp != NULL) {
		/* [raise the processor priority level to splbio;] */
		mutex_enter(&bufcache_lock);
		/* Mark it busy, so nobody else will use it. */
		while (bbusy(obp, false, 0, NULL) == EPASSTHROUGH)
			;
		mutex_exit(&bufcache_lock);
		concurrency = 0; /* see "XXXkludge" comment below */
	}

	uvm_lwp_hold(l);

	for (i = 0; i < uio->uio_iovcnt; i++) {
		bool sync = true;

		iovp = &uio->uio_iov[i];
		while (iovp->iov_len > 0) {
			size_t todo;
			vaddr_t endp;

			mutex_enter(&ps->ps_lock);
			if (ps->ps_failed != 0) {
				goto done_locked;
			}
			physio_wait(ps, sync ? 0 : concurrency);
			mutex_exit(&ps->ps_lock);
			if (obp != NULL) {
				/*
				 * XXXkludge
				 * some drivers use "obp" as an identifier.
				 */
				bp = obp;
			} else {
				bp = getiobuf(NULL, true);
				bp->b_cflags = BC_BUSY;
			}
			bp->b_dev = dev;
			bp->b_proc = p;
			bp->b_private = ps;

			/*
			 * [mark the buffer busy for physical I/O]
			 * (i.e. set B_PHYS (because it's an I/O to user
			 * memory, and B_RAW, because B_RAW is to be
			 * "Set by physio for raw transfers.", in addition
			 * to the "busy" and read/write flag.)
			 */
			bp->b_oflags = 0;
			bp->b_cflags = BC_BUSY;
			bp->b_flags = flags | B_PHYS | B_RAW;
			bp->b_iodone = physio_biodone;

			/* [set up the buffer for a maximum-sized transfer] */
			bp->b_blkno = btodb(uio->uio_offset);
			if (dbtob(bp->b_blkno) != uio->uio_offset) {
				error = EINVAL;
				goto done;
			}
			bp->b_bcount = MIN(MAXPHYS, iovp->iov_len);
			bp->b_data = iovp->iov_base;

			/*
			 * [call minphys to bound the transfer size]
			 * and remember the amount of data to transfer,
			 * for later comparison.
			 */
			(*min_phys)(bp);
			todo = bp->b_bufsize = bp->b_bcount;
#if defined(DIAGNOSTIC)
			if (todo > MAXPHYS)
				panic("todo(%zu) > MAXPHYS; minphys broken",
				    todo);
#endif /* defined(DIAGNOSTIC) */

			sync = false;
			endp = (vaddr_t)bp->b_data + todo;
			if (trunc_page(endp) != endp) {
				/*
				 * following requests can overlap.
				 * note that uvm_vslock does round_page.
				 */
				sync = true;
			}

			/*
			 * [lock the part of the user address space involved
			 *    in the transfer]
			 * Beware vmapbuf(); it clobbers b_data and
			 * saves it in b_saveaddr.  However, vunmapbuf()
			 * restores it.
			 */
			error = uvm_vslock(p->p_vmspace, bp->b_data, todo,
			    (flags & B_READ) ?  VM_PROT_WRITE : VM_PROT_READ);
			if (error) {
				goto done;
			}
			vmapbuf(bp, todo);

			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);

			mutex_enter(&ps->ps_lock);
			ps->ps_running++;
			mutex_exit(&ps->ps_lock);

			/* [call strategy to start the transfer] */
			(*strategy)(bp);
			bp = NULL;

			iovp->iov_len -= todo;
			iovp->iov_base = (char *)iovp->iov_base + todo;
			uio->uio_offset += todo;
			uio->uio_resid -= todo;
		}
	}

done:
	mutex_enter(&ps->ps_lock);
done_locked:
	physio_wait(ps, 0);
	mutex_exit(&ps->ps_lock);

	if (ps->ps_failed != 0) {
		off_t delta;

		delta = uio->uio_offset - ps->ps_endoffset;
		KASSERT(delta > 0);
		uio->uio_resid += delta;
		/* uio->uio_offset = ps->ps_endoffset; */
	} else {
		KASSERT(ps->ps_endoffset == -1);
	}
	if (bp != NULL && bp != obp) {
		putiobuf(bp);
	}
	if (error == 0) {
		error = ps->ps_error;
	}
	mutex_destroy(&ps->ps_lock);
	cv_destroy(&ps->ps_cv);
	kmem_free(ps, sizeof(*ps));

	/*
	 * [clean up the state of the buffer]
	 * Remember if somebody wants it, so we can wake them up below.
	 * Also, if we had to steal it, give it back.
	 */
	if (obp != NULL) {
		KASSERT((obp->b_cflags & BC_BUSY) != 0);

		/*
		 * [if another process is waiting for the raw I/O buffer,
		 *    wake up processes waiting to do physical I/O;
		 */
		mutex_enter(&bufcache_lock);
		obp->b_cflags &= ~(BC_BUSY | BC_WANTED);
		obp->b_flags &= ~(B_PHYS | B_RAW);
		obp->b_iodone = NULL;
		cv_broadcast(&obp->b_busy);
		mutex_exit(&bufcache_lock);
	}
	uvm_lwp_rele(l);

	DPRINTF(("%s: done: off=%" PRIu64 ", resid=%zu\n",
	    __func__, uio->uio_offset, uio->uio_resid));

	return error;
}
Example #11
0
/*
 * dmio_usrreq_init:
 *
 *	Build a request structure.
 */
static int
dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
    struct dmio_usrreq *req, struct dmover_request *dreq)
{
	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
	struct dmover_session *dses = ds->ds_session;
	struct uio *uio_out = &dus->dus_uio_out;
	struct uio *uio_in;
	dmio_buffer inbuf;
	size_t len;
	int i, error;
	u_int j;

	/* XXX How should malloc interact w/ FNONBLOCK? */

	error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
	if (error) {
		return error;
	}

	error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
	if (error) {
		return error;
	}

	if (req->req_outbuf.dmbuf_iovcnt != 0) {
		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
			return (EINVAL);
		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
		    len);
		if (error) {
			free(uio_out->uio_iov, M_TEMP);
			return (error);
		}

		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
			len += uio_out->uio_iov[j].iov_len;
			if (len > SSIZE_MAX) {
				free(uio_out->uio_iov, M_TEMP);
				return (EINVAL);
			}
		}

		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
		uio_out->uio_resid = len;
		uio_out->uio_rw = UIO_READ;
		uio_out->uio_vmspace = dus->dus_vmspace;

		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
		dreq->dreq_outbuf.dmbuf_uio = uio_out;
	} else {
		uio_out->uio_iov = NULL;
		uio_out = NULL;
		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
	}

	memcpy(dreq->dreq_immediate, req->req_immediate,
	    sizeof(dreq->dreq_immediate));

	if (dses->dses_ninputs == 0) {
		/* No inputs; all done. */
		return (0);
	}

	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;

	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
	    M_TEMP, M_WAITOK);
	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);

	for (i = 0; i < dses->dses_ninputs; i++) {
		uio_in = &dus->dus_uio_in[i];

		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
		if (error)
			goto bad;

		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
			error = EINVAL;
			goto bad;
		}
		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
		if (len == 0) {
			error = EINVAL;
			goto bad;
		}
		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);

		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
		if (error) {
			free(uio_in->uio_iov, M_TEMP);
			goto bad;
		}

		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
			len += uio_in->uio_iov[j].iov_len;
			if (len > SSIZE_MAX) {
				free(uio_in->uio_iov, M_TEMP);
				error = EINVAL;
				goto bad;
			}
		}

		if (uio_out != NULL && len != uio_out->uio_resid) {
			free(uio_in->uio_iov, M_TEMP);
			error = EINVAL;
			goto bad;
		}

		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
		uio_in->uio_resid = len;
		uio_in->uio_rw = UIO_WRITE;
		uio_in->uio_vmspace = dus->dus_vmspace;

		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
	}

	return (0);

 bad:
	if (i > 0) {
		for (--i; i >= 0; i--) {
			uio_in = &dus->dus_uio_in[i];
			free(uio_in->uio_iov, M_TEMP);
		}
	}
	free(dus->dus_uio_in, M_TEMP);
	if (uio_out != NULL)
		free(uio_out->uio_iov, M_TEMP);
	uvmspace_free(dus->dus_vmspace);
	return (error);
}