static apr_status_t cached_explode(apr_time_exp_t *xt, apr_time_t t, struct exploded_time_cache_element *cache, int use_gmt) { apr_int64_t seconds = apr_time_sec(t); struct exploded_time_cache_element *cache_element = &(cache[seconds & TIME_CACHE_MASK]); struct exploded_time_cache_element cache_element_snapshot; /* The cache is implemented as a ring buffer. Each second, * it uses a different element in the buffer. The timestamp * in the element indicates whether the element contains the * exploded time for the current second (vs the time * 'now - AP_TIME_RECENT_THRESHOLD' seconds ago). If the * cached value is for the current time, we use it. Otherwise, * we compute the apr_time_exp_t and store it in this * cache element. Note that the timestamp in the cache * element is updated only after the exploded time. Thus * if two threads hit this cache element simultaneously * at the start of a new second, they'll both explode the * time and store it. I.e., the writers will collide, but * they'll be writing the same value. */ if (cache_element->t >= seconds) { /* There is an intentional race condition in this design: * in a multithreaded app, one thread might be reading * from this cache_element to resolve a timestamp from * TIME_CACHE_SIZE seconds ago at the same time that * another thread is copying the exploded form of the * current time into the same cache_element. (I.e., the * first thread might hit this element of the ring buffer * just as the element is being recycled.) This can * also happen at the start of a new second, if a * reader accesses the cache_element after a writer * has updated cache_element.t but before the writer * has finished updating the whole cache_element. * * Rather than trying to prevent this race condition * with locks, we allow it to happen and then detect * and correct it. The detection works like this: * Step 1: Take a "snapshot" of the cache element by * copying it into a temporary buffer. * Step 2: Check whether the snapshot contains consistent * data: the timestamps at the start and end of * the cache_element should both match the 'seconds' * value that we computed from the input time. * If these three don't match, then the snapshot * shows the cache_element in the middle of an * update, and its contents are invalid. * Step 3: If the snapshot is valid, use it. Otherwise, * just give up on the cache and explode the * input time. */ memcpy(&cache_element_snapshot, cache_element, sizeof(struct exploded_time_cache_element)); if ((seconds != cache_element_snapshot.t) || (seconds != cache_element_snapshot.t_validate)) { /* Invalid snapshot */ if (use_gmt) { return apr_time_exp_gmt(xt, t); } else { return apr_time_exp_lt(xt, t); } } else { /* Valid snapshot */ memcpy(xt, &(cache_element_snapshot.xt), sizeof(apr_time_exp_t)); } } else { apr_status_t r; if (use_gmt) { r = apr_time_exp_gmt(xt, t); } else { r = apr_time_exp_lt(xt, t); } if (r != APR_SUCCESS) { return r; } cache_element->t = seconds; memcpy(&(cache_element->xt), xt, sizeof(apr_time_exp_t)); cache_element->t_validate = seconds; } xt->tm_usec = (int)apr_time_usec(t); return APR_SUCCESS; }
APR_DECLARE(apr_status_t) apr_pollset_poll(apr_pollset_t *pollset, apr_interval_time_t timeout, apr_int32_t *num, const apr_pollfd_t **descriptors) { int rv; apr_uint32_t i, j; struct timeval tv, *tvptr; fd_set readset, writeset, exceptset; if (timeout < 0) { tvptr = NULL; } else { tv.tv_sec = (long) apr_time_sec(timeout); tv.tv_usec = (long) apr_time_usec(timeout); tvptr = &tv; } memcpy(&readset, &(pollset->readset), sizeof(fd_set)); memcpy(&writeset, &(pollset->writeset), sizeof(fd_set)); memcpy(&exceptset, &(pollset->exceptset), sizeof(fd_set)); #ifdef NETWARE if (HAS_PIPES(pollset->set_type)) { rv = pipe_select(pollset->maxfd + 1, &readset, &writeset, &exceptset, tvptr); } else #endif rv = select(pollset->maxfd + 1, &readset, &writeset, &exceptset, tvptr); (*num) = rv; if (rv < 0) { return apr_get_netos_error(); } if (rv == 0) { return APR_TIMEUP; } j = 0; for (i = 0; i < pollset->nelts; i++) { apr_os_sock_t fd; if (pollset->query_set[i].desc_type == APR_POLL_SOCKET) { fd = pollset->query_set[i].desc.s->socketdes; } else { #if !APR_FILES_AS_SOCKETS return APR_EBADF; #else fd = pollset->query_set[i].desc.f->filedes; #endif } if (FD_ISSET(fd, &readset) || FD_ISSET(fd, &writeset) || FD_ISSET(fd, &exceptset)) { pollset->result_set[j] = pollset->query_set[i]; pollset->result_set[j].rtnevents = 0; if (FD_ISSET(fd, &readset)) { pollset->result_set[j].rtnevents |= APR_POLLIN; } if (FD_ISSET(fd, &writeset)) { pollset->result_set[j].rtnevents |= APR_POLLOUT; } if (FD_ISSET(fd, &exceptset)) { pollset->result_set[j].rtnevents |= APR_POLLERR; } j++; } } (*num) = j; if (descriptors) *descriptors = pollset->result_set; return APR_SUCCESS; }
static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex, const char *fname) { #define APR_POSIXSEM_NAME_MIN 13 sem_t *psem; char semname[32]; /* * This bogusness is to follow what appears to be the * lowest common denominator in Posix semaphore naming: * - start with '/' * - be at most 14 chars * - be unique and not match anything on the filesystem * * Because of this, we use fname to generate a (unique) hash * and use that as the name of the semaphore. If no filename was * given, we create one based on the time. We tuck the name * away, since it might be useful for debugging. We use 2 hashing * functions to try to avoid collisions. * * To make this as robust as possible, we initially try something * larger (and hopefully more unique) and gracefully fail down to the * LCD above. * * NOTE: Darwin (Mac OS X) seems to be the most restrictive * implementation. Versions previous to Darwin 6.2 had the 14 * char limit, but later rev's allow up to 31 characters. * */ if (fname) { apr_ssize_t flen = strlen(fname); char *p = apr_pstrndup(new_mutex->pool, fname, strlen(fname)); unsigned int h1, h2; h1 = (apr_hashfunc_default((const char *)p, &flen) & 0xffffffff); h2 = (rshash(p) & 0xffffffff); apr_snprintf(semname, sizeof(semname), "/ApR.%xH%x", h1, h2); } else { apr_time_t now; unsigned long sec; unsigned long usec; now = apr_time_now(); sec = apr_time_sec(now); usec = apr_time_usec(now); apr_snprintf(semname, sizeof(semname), "/ApR.%lxZ%lx", sec, usec); } do { psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); if (psem == (sem_t *)SEM_FAILED) { if (errno == ENAMETOOLONG) { /* Oh well, good try */ semname[APR_POSIXSEM_NAME_MIN] = '\0'; } else { return errno; } do { psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); } if (psem == (sem_t *)SEM_FAILED) { return errno; } /* Ahhh. The joys of Posix sems. Predelete it... */ sem_unlink(semname); new_mutex->os.psem_interproc = psem; new_mutex->fname = apr_pstrdup(new_mutex->pool, semname); apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, apr_proc_mutex_cleanup, apr_pool_cleanup_null); return APR_SUCCESS; }
APR_DECLARE(apr_status_t) apr_poll(apr_pollfd_t *aprset, int num, apr_int32_t *nsds, apr_interval_time_t timeout) { fd_set readset, writeset, exceptset; int rv, i; int maxfd = -1; struct timeval tv, *tvptr; #ifdef NETWARE apr_datatype_e set_type = APR_NO_DESC; #endif if (timeout < 0) { tvptr = NULL; } else { tv.tv_sec = (long) apr_time_sec(timeout); tv.tv_usec = (long) apr_time_usec(timeout); tvptr = &tv; } FD_ZERO(&readset); FD_ZERO(&writeset); FD_ZERO(&exceptset); for (i = 0; i < num; i++) { apr_os_sock_t fd; aprset[i].rtnevents = 0; if (aprset[i].desc_type == APR_POLL_SOCKET) { #ifdef NETWARE if (HAS_PIPES(set_type)) { return APR_EBADF; } else { set_type = APR_POLL_SOCKET; } #endif fd = aprset[i].desc.s->socketdes; } else if (aprset[i].desc_type == APR_POLL_FILE) { #if !APR_FILES_AS_SOCKETS return APR_EBADF; #else #ifdef NETWARE if (aprset[i].desc.f->is_pipe && !HAS_SOCKETS(set_type)) { set_type = APR_POLL_FILE; } else return APR_EBADF; #endif /* NETWARE */ fd = aprset[i].desc.f->filedes; #endif /* APR_FILES_AS_SOCKETS */ } else { break; } #if !defined(WIN32) && !defined(NETWARE) /* socket sets handled with array of handles */ if (fd >= FD_SETSIZE) { /* XXX invent new error code so application has a clue */ return APR_EBADF; } #endif if (aprset[i].reqevents & APR_POLLIN) { FD_SET(fd, &readset); } if (aprset[i].reqevents & APR_POLLOUT) { FD_SET(fd, &writeset); } if (aprset[i].reqevents & (APR_POLLPRI | APR_POLLERR | APR_POLLHUP | APR_POLLNVAL)) { FD_SET(fd, &exceptset); } if ((int) fd > maxfd) { maxfd = (int) fd; } } #ifdef NETWARE if (HAS_PIPES(set_type)) { rv = pipe_select(maxfd + 1, &readset, &writeset, &exceptset, tvptr); } else { #endif rv = select(maxfd + 1, &readset, &writeset, &exceptset, tvptr); #ifdef NETWARE } #endif (*nsds) = rv; if ((*nsds) == 0) { return APR_TIMEUP; } if ((*nsds) < 0) { return apr_get_netos_error(); } (*nsds) = 0; for (i = 0; i < num; i++) { apr_os_sock_t fd; if (aprset[i].desc_type == APR_POLL_SOCKET) { fd = aprset[i].desc.s->socketdes; } else if (aprset[i].desc_type == APR_POLL_FILE) { #if !APR_FILES_AS_SOCKETS return APR_EBADF; #else fd = aprset[i].desc.f->filedes; #endif } else { break; } if (FD_ISSET(fd, &readset)) { aprset[i].rtnevents |= APR_POLLIN; } if (FD_ISSET(fd, &writeset)) { aprset[i].rtnevents |= APR_POLLOUT; } if (FD_ISSET(fd, &exceptset)) { aprset[i].rtnevents |= APR_POLLERR; } if (aprset[i].rtnevents) { (*nsds)++; } } return APR_SUCCESS; }
char *p = semname; int i; apr_md5(digest, fname, strlen(fname)); *p++ = '/'; /* must start with /, right? */ for (i = 0; i < sizeof(digest); i++) { *p++ = hex[digest[i] >> 4]; *p++ = hex[digest[i] & 0xF]; } semname[APR_POSIXSEM_NAME_MAX] = '\0'; } else { apr_time_t now; unsigned long sec; unsigned long usec; now = apr_time_now(); sec = apr_time_sec(now); usec = apr_time_usec(now); apr_snprintf(semname, sizeof(semname), "/ApR.%lxZ%lx", sec, usec); } do { psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); if (psem == (sem_t *)SEM_FAILED) { if (errno == ENAMETOOLONG) { /* Oh well, good try */ semname[APR_POSIXSEM_NAME_MIN] = '\0'; } else { return errno; } do { psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); } while (psem == (sem_t *)SEM_FAILED && errno == EINTR);
static apr_status_t impl_pollset_poll(apr_pollset_t *pollset, apr_interval_time_t timeout, apr_int32_t *num, const apr_pollfd_t **descriptors) { int ret, i, j; struct timespec tv, *tvptr; apr_status_t rv = APR_SUCCESS; apr_pollfd_t fd; if (timeout < 0) { tvptr = NULL; } else { tv.tv_sec = (long) apr_time_sec(timeout); tv.tv_nsec = (long) apr_time_usec(timeout) * 1000; tvptr = &tv; } ret = kevent(pollset->p->kqueue_fd, NULL, 0, pollset->p->ke_set, pollset->p->setsize, tvptr); (*num) = ret; if (ret < 0) { rv = apr_get_netos_error(); } else if (ret == 0) { rv = APR_TIMEUP; } else { for (i = 0, j = 0; i < ret; i++) { fd = (((pfd_elem_t*)(pollset->p->ke_set[i].udata))->pfd); if ((pollset->flags & APR_POLLSET_WAKEABLE) && fd.desc_type == APR_POLL_FILE && fd.desc.f == pollset->wakeup_pipe[0]) { apr_pollset_drain_wakeup_pipe(pollset); rv = APR_EINTR; } else { pollset->p->result_set[j] = fd; pollset->p->result_set[j].rtnevents = get_kqueue_revent(pollset->p->ke_set[i].filter, pollset->p->ke_set[i].flags); j++; } } if ((*num = j)) { /* any event besides wakeup pipe? */ rv = APR_SUCCESS; if (descriptors) { *descriptors = pollset->p->result_set; } } } pollset_lock_rings(); /* Shift all PFDs in the Dead Ring to the Free Ring */ APR_RING_CONCAT(&(pollset->p->free_ring), &(pollset->p->dead_ring), pfd_elem_t, link); pollset_unlock_rings(); return rv; }
APR_DECLARE(apr_status_t) apr_pollset_poll(apr_pollset_t *pollset, apr_interval_time_t timeout, apr_int32_t *num, const apr_pollfd_t **descriptors) { int rv; apr_uint32_t i, j; struct timeval tv, *tvptr; fd_set readset, writeset, exceptset; if (timeout < 0) { tvptr = NULL; } else { tv.tv_sec = (long)apr_time_sec(timeout); tv.tv_usec = (long)apr_time_usec(timeout); tvptr = &tv; } memcpy(&readset, &(pollset->readset), sizeof(fd_set)); memcpy(&writeset, &(pollset->writeset), sizeof(fd_set)); memcpy(&exceptset, &(pollset->exceptset), sizeof(fd_set)); #ifdef NETWARE if (HAS_PIPES(pollset->set_type)) { rv = pipe_select(pollset->maxfd + 1, &readset, &writeset, &exceptset, tvptr); } else #endif rv = select(pollset->maxfd + 1, &readset, &writeset, &exceptset, tvptr); /* Set initial *num now for expected -1 / 0 failures, or errors below */ (*num) = rv; if (rv < 0) { return apr_get_netos_error(); } if (rv == 0) { return APR_TIMEUP; } j = 0; for (i = 0; i < pollset->nelts; i++) { apr_os_sock_t fd; if (pollset->query_set[i].desc_type == APR_POLL_SOCKET) { fd = pollset->query_set[i].desc.s->socketdes; } else { #if !APR_FILES_AS_SOCKETS return APR_EBADF; #else fd = pollset->query_set[i].desc.f->filedes; #endif } if (FD_ISSET(fd, &readset) || FD_ISSET(fd, &writeset) || FD_ISSET(fd, &exceptset)) { pollset->result_set[j] = pollset->query_set[i]; pollset->result_set[j].rtnevents = 0; if (FD_ISSET(fd, &readset)) { pollset->result_set[j].rtnevents |= APR_POLLIN; } if (FD_ISSET(fd, &writeset)) { pollset->result_set[j].rtnevents |= APR_POLLOUT; } if (FD_ISSET(fd, &exceptset)) { pollset->result_set[j].rtnevents |= APR_POLLERR; } j++; } } /* Reset computed *num to account for multiply-polled fd's which * select() - on some platforms, treats as a single fd result. * The *num returned must match the size of result_set[] */ (*num) = j; *descriptors = pollset->result_set; return APR_SUCCESS; }
static apt_bool_t rtp_rx_packet_receive(mpf_rtp_stream_t *rtp_stream, void *buffer, apr_size_t size) { rtp_receiver_t *receiver = &rtp_stream->receiver; mpf_codec_descriptor_t *descriptor = rtp_stream->base->rx_descriptor; apr_time_t time; rtp_ssrc_result_e ssrc_result; rtp_header_t *header = rtp_rx_header_skip(&buffer,&size); if(!header) { /* invalid RTP packet */ receiver->stat.invalid_packets++; return FALSE; } header->sequence = ntohs((apr_uint16_t)header->sequence); header->timestamp = ntohl(header->timestamp); header->ssrc = ntohl(header->ssrc); time = apr_time_now(); RTP_TRACE("RTP time=%6u ssrc=%8x pt=%3u %cts=%9u seq=%5u size=%"APR_SIZE_T_FMT"\n", (apr_uint32_t)apr_time_usec(time), header->ssrc, header->type, (header->marker == 1) ? '*' : ' ', header->timestamp, header->sequence, size); if(!receiver->stat.received_packets) { /* initialization */ rtp_rx_stat_init(receiver,header,&time); } ssrc_result = rtp_rx_ssrc_update(receiver,header->ssrc); if(ssrc_result == RTP_SSRC_PROBATION) { receiver->stat.invalid_packets++; return FALSE; } else if(ssrc_result == RTP_SSRC_RESTART) { rtp_rx_restart(receiver); rtp_rx_stat_init(receiver,header,&time); } rtp_rx_seq_update(receiver,(apr_uint16_t)header->sequence); if(header->type == descriptor->payload_type) { /* codec */ if(rtp_rx_ts_update(receiver,descriptor,&time,header->timestamp) == RTP_TS_DRIFT) { rtp_rx_restart(receiver); return FALSE; } if(mpf_jitter_buffer_write(receiver->jb,buffer,size,header->timestamp) != JB_OK) { receiver->stat.discarded_packets++; rtp_rx_failure_threshold_check(receiver); } } else if(rtp_stream->base->rx_event_descriptor && header->type == rtp_stream->base->rx_event_descriptor->payload_type) { /* named event */ mpf_named_event_frame_t *named_event = (mpf_named_event_frame_t *)buffer; named_event->duration = ntohs((apr_uint16_t)named_event->duration); if(mpf_jitter_buffer_event_write(receiver->jb,named_event,header->timestamp,(apr_byte_t)header->marker) != JB_OK) { receiver->stat.discarded_packets++; } } else if(header->type == RTP_PT_CN) { /* CN packet */ receiver->stat.ignored_packets++; } else { /* invalid payload type */ receiver->stat.ignored_packets++; } return TRUE; }