int netlink_request(int fd, int family, int type) { // send at least 16 bytes of data, some old kernels want it unsigned char buf[sizeof(struct nlmsghdr) + sizeof(struct rtgenmsg)+15]; struct nlmsghdr *nh; struct rtgenmsg *ng; struct sockaddr_nl snl; struct iovec iov; nh = (struct nlmsghdr *)(buf); ng = (struct rtgenmsg *)(buf + sizeof(struct nlmsghdr)); dprintf("Setting up netlink request"); memset(&snl, 0, sizeof(struct sockaddr_nl)); memset(buf, 0, sizeof(buf)); snl.nl_family = AF_NETLINK; // I keep auto typing AF_INET :~( nh->nlmsg_len = NLMSG_LENGTH(sizeof(buf) - sizeof(struct nlmsghdr)); nh->nlmsg_type = type; // NLM_F_ROOT Return the complete table instead of a single entry. // Create, remove or receive information about a network route. These // messages contain an rtmsg structure with an optional sequence of // rtattr structures following. For RTM_GETROUTE setting rtm_dst_len // and rtm_src_len to 0 means you get all entries for the specified // routing table. For the other fields except rtm_table and // rtm_protocol 0 is the wildcard. nh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ROOT; // NLM_F_ACK Request for an acknowledgment on success, maybe an idea. // would need to implement re-sending if it fails, etc. // for now we will assume it's reliable (even though the docs say it's not) //nh->nlmsg_pid = NETLINKID(); // some systems require pid to 0 nh->nlmsg_pid = 0; nh->nlmsg_seq = __atomic_inc(&seqno); ng->rtgen_family = family; dprintf("Sending request"); if(sendto(fd, buf, sizeof(buf), 0, (void *)(&snl), sizeof(struct sockaddr_nl)) == -1) { dprintf("Failed to send netlink request. Got %s", strerror(errno)); return -1; } dprintf("Request sent"); return seqno; // XXX, may wrap, etc. just use zero? }
__hidden void filefd_opened(int fd, enum filefd_type fd_type) { ALOGV("%s(fd:%d) {", __func__, fd); if (fd >= 0 && fd < __FD_SETSIZE) { if (filefd_mapped_file[fd] == UNUSED_FD_TYPE) { __atomic_inc(&filefd_mapped_files); filefd_mapped_file[fd] = fd_type; } ASSERT(filefd_mapped_file[fd] == fd_type); } ALOGV("%s: }", __func__); }
static int import_fd_env(int verify) { char *type_env_allocated = NULL; char *fd_env_allocated = NULL; char *type_token_saved_ptr; char *fd_token_saved_ptr; enum filefd_type fd_type; char *type_env, *fd_env; int saved_errno; char *type_token; char *fd_token; int rv = 0; int fd; ALOGV("%s:(verify:%d) {", __func__, verify); saved_errno = errno; /* * get file descriptor environment pointer and make a * a copy of the string. */ fd_env = getenv(fd_env_name); if (fd_env == NULL) { ALOGV("%s: fd_env = NULL = getenv('%s');", __func__, fd_env_name); goto done; } else { ALOGV("%s: fd_env = '%s' = getenv('%s');", __func__, fd_env, fd_env_name); fd_env_allocated = malloc(strlen(fd_env)+1); if (fd_env_allocated == NULL) { ALOGE("%s: fd_env_allocated = NULL; malloc failed", __func__); goto done; } strcpy(fd_env_allocated, fd_env); } /* * get file descriptor environment pointer and make a copy of * the string to our stack. */ type_env = getenv(type_env_name); if (type_env == NULL) { ALOGV("%s: type_env = NULL = getenv(type_env_name:'%s');", __func__, type_env_name); goto done; } else { ALOGV("%s: type_env = '%s' = getenv(type_env_name:'%s');", __func__, type_env, type_env_name); type_env_allocated = malloc(strlen(type_env)+1); if (type_env_allocated == NULL) { ALOGE("%s: type_env_allocated = NULL; malloc failed", __func__); goto done; } strcpy(type_env_allocated, type_env); } /* * Setup strtok_r(), use it to parse the env tokens, and * initialise the filefd_mapped_file array. */ fd_token = strtok_r(fd_env_allocated, ",", &fd_token_saved_ptr); type_token = strtok_r(type_env_allocated, ",", &type_token_saved_ptr); while (fd_token && type_token) { fd = atoi(fd_token); ASSERT(fd >= 0 ); ASSERT(fd < __FD_SETSIZE); fd_type = (enum filefd_type) atoi(type_token); ASSERT(fd_type > UNUSED_FD_TYPE); ASSERT(fd_type < MAX_FD_TYPE); if (fd >= 0 && fd < __FD_SETSIZE) { if (fd_type > UNUSED_FD_TYPE && fd_type < MAX_FD_TYPE) { if (verify) { ASSERT(filefd_mapped_file[fd] == fd_type); ALOGV("%s: filefd_mapped_file[fd:%d] == fd_type:%d;", __func__, fd, fd_type); } else { ASSERT(filefd_mapped_file[fd] == UNUSED_FD_TYPE); __atomic_inc(&filefd_mapped_files); ALOGV("%s: ++filefd_mapped_files:%d;", __func__, filefd_mapped_files); filefd_mapped_file[fd] = fd_type; ALOGV("%s: filefd_mapped_file[fd:%d] = fd_type:%d;", __func__, fd, fd_type); } } } fd_token = strtok_r(NULL, ",", &fd_token_saved_ptr); type_token = strtok_r(NULL, ",", &type_token_saved_ptr); } done: if (type_env_allocated) free(type_env_allocated); if (fd_env_allocated) free(fd_env_allocated); errno = saved_errno; ALOGV("%s: return(rv:%d); }", __func__, rv); return rv; }
void CPacketPool::AddRef(CPacket *pBuffer) { __atomic_inc(&pBuffer->mRefCount); }
void CPacketPool::AddRef() { __atomic_inc(&mRefCount); }
/* * Start tests, show results. */ bool dvmTestAtomicSpeed() { pthread_t threads[THREAD_COUNT]; void *(*startRoutine)(void *) = atomicTest; int64_t startWhen, endWhen; #if defined(__ARM_ARCH__) dvmFprintf(stdout, "__ARM_ARCH__ is %d\n", __ARM_ARCH__); #endif #if defined(ANDROID_SMP) dvmFprintf(stdout, "ANDROID_SMP is %d\n", ANDROID_SMP); #endif dvmFprintf(stdout, "Creating threads\n"); int i; for (i = 0; i < THREAD_COUNT; i++) { void *arg = (void *) i; if (pthread_create(&threads[i], NULL, startRoutine, arg) != 0) { dvmFprintf(stderr, "thread create failed\n"); } } /* wait for all the threads to reach the starting line */ while (1) { pthread_mutex_lock(&waitLock); if (threadsStarted == THREAD_COUNT) { dvmFprintf(stdout, "Starting test\n"); startWhen = getRelativeTimeNsec(); pthread_cond_broadcast(&waitCond); pthread_mutex_unlock(&waitLock); break; } pthread_mutex_unlock(&waitLock); usleep(100000); } for (i = 0; i < THREAD_COUNT; i++) { void *retval; if (pthread_join(threads[i], &retval) != 0) { dvmFprintf(stderr, "thread join (%d) failed\n", i); } } endWhen = getRelativeTimeNsec(); dvmFprintf(stdout, "All threads stopped, time is %.6fms\n", (endWhen - startWhen) / 1000000.0); /* * Show results; expecting: * * incTest = 5000000 * decTest = -5000000 * addTest = 7500000 * casTest = 10000000 * wideCasTest = 0x6600000077000000 */ dvmFprintf(stdout, "incTest = %d\n", incTest); dvmFprintf(stdout, "decTest = %d\n", decTest); dvmFprintf(stdout, "addTest = %d\n", addTest); dvmFprintf(stdout, "casTest = %d\n", casTest); dvmFprintf(stdout, "wideCasTest = 0x%llx\n", wideCasTest); /* do again, serially (SMP check) */ startWhen = getRelativeTimeNsec(); for (i = 0; i < THREAD_COUNT; i++) { doAtomicTest(i); } endWhen = getRelativeTimeNsec(); dvmFprintf(stdout, "Same iterations done serially: time is %.6fms\n", (endWhen - startWhen) / 1000000.0); /* * Hard to do a meaningful thrash test on these, so just do a simple * function test. */ andTest = 0xffd7fa96; orTest = 0x122221ff; android_atomic_and(0xfffdaf96, &andTest); android_atomic_or(0xdeaaeb00, &orTest); if (android_atomic_release_cas(failingCasTest + 1, failingCasTest - 1, &failingCasTest) == 0) dvmFprintf(stdout, "failing test did not fail!\n"); dvmFprintf(stdout, "andTest = %#x\n", andTest); dvmFprintf(stdout, "orTest = %#x\n", orTest); dvmFprintf(stdout, "failingCasTest = %d\n", failingCasTest); #ifdef TEST_BIONIC /* * Quick function test on the bionic ops. */ int prev; int tester = 7; prev = __atomic_inc(&tester); __atomic_inc(&tester); __atomic_inc(&tester); dvmFprintf(stdout, "bionic 3 inc: %d -> %d\n", prev, tester); prev = __atomic_dec(&tester); __atomic_dec(&tester); __atomic_dec(&tester); dvmFprintf(stdout, "bionic 3 dec: %d -> %d\n", prev, tester); prev = __atomic_swap(27, &tester); dvmFprintf(stdout, "bionic swap: %d -> %d\n", prev, tester); int swapok = __atomic_cmpxchg(27, 72, &tester); dvmFprintf(stdout, "bionic cmpxchg: %d (%d)\n", tester, swapok); #endif testAtomicSpeed(); return 0; }
static int import_fd_env(int verify) { char *type_env_allocated = NULL; char *fd_env_allocated = NULL; char *type_token_saved_ptr; char *fd_token_saved_ptr; enum filefd_type fd_type; char *type_env, *fd_env; int saved_errno; char *type_token; char *fd_token; int rv = 0; int fd; ALOGV("%s:(verify:%d) {", __func__, verify); saved_errno = *REAL(__errno)(); fd_env = getenv(fd_env_name); if (fd_env == NULL) { ALOGV("%s: fd_env = NULL = getenv('%s');", __func__, fd_env_name); goto done; } else { ALOGV("%s: fd_env = '%s' = getenv('%s');", __func__, fd_env, fd_env_name); fd_env_allocated = malloc(strlen(fd_env)+1); if (fd_env_allocated == NULL) { ALOGE("%s: fd_env_allocated = NULL; malloc failed", __func__); goto done; } strcpy(fd_env_allocated, fd_env); } type_env = getenv(type_env_name); if (type_env == NULL) { ALOGV("%s: type_env = NULL = getenv(type_env_name:'%s');", __func__, type_env_name); goto done; } else { ALOGV("%s: type_env = '%s' = getenv(type_env_name:'%s');", __func__, type_env, type_env_name); type_env_allocated = malloc(strlen(type_env)+1); if (type_env_allocated == NULL) { ALOGE("%s: type_env_allocated = NULL; malloc failed", __func__); goto done; } strcpy(type_env_allocated, type_env); } fd_token = strtok_r(fd_env_allocated, ",", &fd_token_saved_ptr); type_token = strtok_r(type_env_allocated, ",", &type_token_saved_ptr); while (fd_token && type_token) { fd = atoi(fd_token); ASSERT(fd >= 0 ); ASSERT(fd < __FD_SETSIZE); fd_type = (enum filefd_type) atoi(type_token); ASSERT(fd_type > UNUSED_FD_TYPE); ASSERT(fd_type < MAX_FD_TYPE); if (fd >= 0 && fd < __FD_SETSIZE) { if (fd_type > UNUSED_FD_TYPE && fd_type < MAX_FD_TYPE) { if (verify) { ASSERT(filefd_mapped_file[fd] == fd_type); ALOGV("%s: filefd_mapped_file[fd:%d] == fd_type:%d;", __func__, fd, fd_type); } else { ASSERT(filefd_mapped_file[fd] == UNUSED_FD_TYPE); __atomic_inc(&filefd_mapped_files); ALOGV("%s: ++filefd_mapped_files:%d;", __func__, filefd_mapped_files); filefd_mapped_file[fd] = fd_type; ALOGV("%s: filefd_mapped_file[fd:%d] = fd_type:%d;", __func__, fd, fd_type); } } } fd_token = strtok_r(NULL, ",", &fd_token_saved_ptr); type_token = strtok_r(NULL, ",", &type_token_saved_ptr); } done: if (type_env_allocated) free(type_env_allocated); if (fd_env_allocated) free(fd_env_allocated); *REAL(__errno)() = saved_errno; ALOGV("%s: return(rv:%d); }", __func__, rv); return rv; }
/* Internal version of pthread_create. See comment in pt-internal.h. */ int __pthread_create_internal (struct __pthread **thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { int err; struct __pthread *pthread; const struct __pthread_attr *setup; sigset_t sigset; /* Allocate a new thread structure. */ err = __pthread_alloc (&pthread); if (err) goto failed; /* Use the default attributes if ATTR is NULL. */ setup = attr ? attr : &__pthread_default_attr; /* Initialize the thread state. */ pthread->state = (setup->detachstate == PTHREAD_CREATE_DETACHED ? PTHREAD_DETACHED : PTHREAD_JOINABLE); /* If the user supplied a stack, it is not our responsibility to setup a stack guard. */ if (setup->stackaddr) pthread->guardsize = 0; else pthread->guardsize = (setup->guardsize <= setup->stacksize ? setup->guardsize : setup->stacksize); /* Find a stack. There are several scenarios: if a detached thread kills itself, it has no way to deallocate its stack, thus it leaves PTHREAD->stack set to true. We try to reuse it here, however, if the user supplied a stack, we cannot use the old one. Right now, we simply deallocate it. */ if (pthread->stack) { if (setup->stackaddr != __pthread_default_attr.stackaddr) { __pthread_stack_dealloc (pthread->stackaddr, pthread->stacksize); pthread->stackaddr = setup->stackaddr; pthread->stacksize = setup->stacksize; } } else { err = __pthread_stack_alloc (&pthread->stackaddr, setup->stacksize); if (err) goto failed_stack_alloc; pthread->stacksize = setup->stacksize; pthread->stack = 1; } /* Allocate the kernel thread and other required resources. */ err = __pthread_thread_alloc (pthread); if (err) goto failed_thread_alloc; #ifdef ENABLE_TLS pthread->tcb = _dl_allocate_tls (NULL); if (!pthread->tcb) goto failed_thread_tls_alloc; pthread->tcb->tcb = pthread->tcb; #endif /* ENABLE_TLS */ /* And initialize the rest of the machine context. This may include additional machine- and system-specific initializations that prove convenient. */ err = __pthread_setup (pthread, entry_point, start_routine, arg); if (err) goto failed_setup; /* Initialize the system-specific signal state for the new thread. */ err = __pthread_sigstate_init (pthread); if (err) goto failed_sigstate; /* Set the new thread's signal mask and set the pending signals to empty. POSIX says: "The signal mask shall be inherited from the creating thread. The set of signals pending for the new thread shall be empty." If the currnet thread is not a pthread then we just inherit the process' sigmask. */ if (__pthread_num_threads == 1) err = sigprocmask (0, 0, &sigset); else err = __pthread_sigstate (_pthread_self (), 0, 0, &sigset, 0); assert_perror (err); err = __pthread_sigstate (pthread, SIG_SETMASK, &sigset, 0, 1); assert_perror (err); /* Increase the total number of threads. We do this before actually starting the new thread, since the new thread might immediately call `pthread_exit' which decreases the number of threads and calls `exit' if the number of threads reaches zero. Increasing the number of threads from within the new thread isn't an option since this thread might return and call `pthread_exit' before the new thread runs. */ __atomic_inc (&__pthread_total); /* Store a pointer to this thread in the thread ID lookup table. We could use __thread_setid, however, we only lock for reading as no other thread should be using this entry (we also assume that the store is atomic). */ pthread_rwlock_rdlock (&__pthread_threads_lock); __pthread_threads[pthread->thread - 1] = pthread; pthread_rwlock_unlock (&__pthread_threads_lock); /* At this point it is possible to guess our pthread ID. We have to make sure that all functions taking a pthread_t argument can handle the fact that this thread isn't really running yet. */ /* Schedule the new thread. */ err = __pthread_thread_start (pthread); if (err) goto failed_starting; /* At this point the new thread is up and running. */ *thread = pthread; return 0; failed_starting: __pthread_setid (pthread->thread, NULL); __atomic_dec (&__pthread_total); failed_sigstate: __pthread_sigstate_destroy (pthread); failed_setup: #ifdef ENABLE_TLS _dl_deallocate_tls (pthread->tcb, 1); failed_thread_tls_alloc: #endif /* ENABLE_TLS */ __pthread_thread_dealloc (pthread); __pthread_thread_halt (pthread); failed_thread_alloc: __pthread_stack_dealloc (pthread->stackaddr, pthread->stacksize); pthread->stack = 0; failed_stack_alloc: __pthread_dealloc (pthread); failed: return err; }
void MediaBuffer::add_ref() { (void) __atomic_inc(&mRefCount); }