Пример #1
0
void test_op_and_fetch (void)
{
  sc = __sync_add_and_fetch (&sc, uc);
  uc = __sync_add_and_fetch (&uc, uc);
  ss = __sync_add_and_fetch (&ss, uc);
  us = __sync_add_and_fetch (&us, uc);
  si = __sync_add_and_fetch (&si, uc);
  ui = __sync_add_and_fetch (&ui, uc);

  sc = __sync_sub_and_fetch (&sc, uc);
  uc = __sync_sub_and_fetch (&uc, uc);
  ss = __sync_sub_and_fetch (&ss, uc);
  us = __sync_sub_and_fetch (&us, uc);
  si = __sync_sub_and_fetch (&si, uc);
  ui = __sync_sub_and_fetch (&ui, uc);

  sc = __sync_or_and_fetch (&sc, uc);
  uc = __sync_or_and_fetch (&uc, uc);
  ss = __sync_or_and_fetch (&ss, uc);
  us = __sync_or_and_fetch (&us, uc);
  si = __sync_or_and_fetch (&si, uc);
  ui = __sync_or_and_fetch (&ui, uc);

  sc = __sync_xor_and_fetch (&sc, uc);
  uc = __sync_xor_and_fetch (&uc, uc);
  ss = __sync_xor_and_fetch (&ss, uc);
  us = __sync_xor_and_fetch (&us, uc);
  si = __sync_xor_and_fetch (&si, uc);
  ui = __sync_xor_and_fetch (&ui, uc);

  sc = __sync_and_and_fetch (&sc, uc);
  uc = __sync_and_and_fetch (&uc, uc);
  ss = __sync_and_and_fetch (&ss, uc);
  us = __sync_and_and_fetch (&us, uc);
  si = __sync_and_and_fetch (&si, uc);
  ui = __sync_and_and_fetch (&ui, uc);

  sc = __sync_nand_and_fetch (&sc, uc);
  uc = __sync_nand_and_fetch (&uc, uc);
  ss = __sync_nand_and_fetch (&ss, uc);
  us = __sync_nand_and_fetch (&us, uc);
  si = __sync_nand_and_fetch (&si, uc);
  ui = __sync_nand_and_fetch (&ui, uc);
}
Пример #2
0
void h2o_socketpool_dispose(h2o_socketpool_t *pool)
{
    pthread_mutex_lock(&pool->_shared.mutex);
    while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
        struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
        destroy_attached(entry);
        __sync_sub_and_fetch(&pool->_shared.count, 1);
    }
    pthread_mutex_unlock(&pool->_shared.mutex);
    pthread_mutex_destroy(&pool->_shared.mutex);

    if (pool->_interval_cb.loop != NULL) {
        h2o_timeout_unlink(&pool->_interval_cb.entry);
        h2o_timeout_dispose(pool->_interval_cb.loop, &pool->_interval_cb.timeout);
    }
    switch (pool->type) {
    case H2O_SOCKETPOOL_TYPE_NAMED:
        free(pool->peer.named.host.base);
        free(pool->peer.named.port.base);
        break;
    case H2O_SOCKETPOOL_TYPE_SOCKADDR:
        break;
    }
}
Пример #3
0
void __export triton_md_unregister_handler(struct triton_md_handler_t *ud)
{
    struct _triton_md_handler_t *h = (struct _triton_md_handler_t *)ud->tpd;
    triton_md_disable_handler(ud, MD_MODE_READ | MD_MODE_WRITE);

    spin_lock(&h->ctx->lock);
    h->ud = NULL;
    list_del(&h->entry);
    if (h->pending) {
        list_del(&h->entry2);
        __sync_sub_and_fetch(&triton_stat.md_handler_pending, 1);
    }
    spin_unlock(&h->ctx->lock);

    sched_yield();

    pthread_mutex_lock(&freed_list_lock);
    list_add_tail(&h->entry, &freed_list);
    pthread_mutex_unlock(&freed_list_lock);

    ud->tpd = NULL;

    triton_stat.md_handler_count--;
}
Пример #4
0
TEST(cwlock, contended_release_only_once_done) {
  const unsigned thread_count = 30;
  cwlock_t cwlock;
  unsigned inside = 0;
  unsigned owners = 0;

  cwlock_init(&cwlock);

  measure_time_concurrent(thread_count, [&] (unsigned) {
      if (cwlock_lock(&cwlock)) {
        EXPECT_EQ(__sync_fetch_and_add(&owners, 1), 0);
        /*
         * Sleep for half a second so that all threads will block before we
         * increment the inside count and release the other threads.
         */
        usleep(500000);
        __sync_fetch_and_add(&inside, 1);
        EXPECT_EQ(__sync_sub_and_fetch(&owners, 1), 0);
        cwlock_unlock(&cwlock);
      }

      EXPECT_EQ(inside, 1);
    });
}
Пример #5
0
int main (int argc, char **argv) {
    int x = 0;
    int r;

    r = __sync_fetch_and_add(&x, 1);
    printf("r = %d, x = %d\n", r, x);

    r = __sync_add_and_fetch(&x, 1);
    printf("r = %d, x = %d\n", r, x);

    r = __sync_bool_compare_and_swap(&x, 1, 5);
    printf("r = %d, x = %d\n", r, x);

    r = __sync_bool_compare_and_swap(&x, 2, 5);
    printf("r = %d, x = %d\n", r, x);

    r = __sync_fetch_and_sub(&x, 1);
    printf("r = %d, x = %d\n", r, x);

    r = __sync_sub_and_fetch(&x, 1);
    printf("r = %d, x = %d\n", r, x);

    return(0);
}
Пример #6
0
void
_vproc_transaction_end(void)
{
#if !TARGET_OS_EMBEDDED
	typeof(vproc_shmem->vp_shmem_transaction_cnt) newval;

	if (unlikely(vproc_shmem == NULL)) {
		return;
	}

	newval = __sync_sub_and_fetch(&vproc_shmem->vp_shmem_transaction_cnt, 1);

	runtime_ktrace(RTKT_VPROC_TRANSACTION_DECREMENT, newval, 0, 0);
	if (unlikely(newval < 0)) {
		if (vproc_shmem->vp_shmem_flags & VPROC_SHMEM_EXITING) {
			raise(SIGKILL);
			__crashreporter_info__ = "raise(SIGKILL) failed";
		} else {
			__crashreporter_info__ = "Unbalanced: vproc_transaction_end()";
		}
		abort();
	}
#endif
}
Пример #7
0
void g_attrib_unref(GAttrib *attrib)
{
	if (!attrib)
		return;

	DBG("%p: g_attrib_unref=%d ", attrib, attrib->ref_count - 1);

	if (__sync_sub_and_fetch(&attrib->ref_count, 1))
		return;

	if (attrib->destroy)
		attrib->destroy(attrib->destroy_user_data);

	bt_att_unref(attrib->att);

	queue_destroy(attrib->callbacks, attrib_callbacks_destroy);
	queue_destroy(attrib->track_ids, free);

	free(attrib->buf);

	g_io_channel_unref(attrib->io);

	free(attrib);
}
Пример #8
0
/*
 * Caller want to know the ntop hot keys. It should provide string buffer.
 * command is specified as hotkey command type.
 */
int
hot_key_report(Reporter_t *preporter, int ntop, char *rpbuf, int rpsize,
               int command, uint32_t client_ip)
{
    int i                       = 0;
    ReporterInstance_t *rpt     = NULL;
    int trace_ip                = 0;
    char *pos                   = rpbuf;
    int ninstances              = preporter->cur_instance;
    int dump_ops                = 0;
    int dump_num                = __sync_add_and_fetch(&preporter->dump_num, 1);

    if (dump_num > MAX_DUMP_NUM) {
        (void)__sync_sub_and_fetch(&preporter->dump_num, 1);
        return -1;
    }

    if (preporter->client_mode == CLIENT_SEPARATE) {
        trace_ip = 1;
    }

    command = convert_to_hotkey_cmd(command);
    if (command != HOTKEY_CMD_GET && command != HOTKEY_CMD_UPD) {
        plat_log_msg(21052,
                     LOG_CAT_HOTKEY,
                     PLAT_LOG_LEVEL_ERROR,
                     "unexpected command : %d",
                     command);
        (void)__sync_sub_and_fetch(&preporter->dump_num, 1);
        return (-1);
    }

    if (ntop > preporter->maxtop) {
        plat_log_msg(21066,
                     LOG_CAT_HOTKEY,
                     PLAT_LOG_LEVEL_DEBUG,
                     "ntop is larger than maxtop: ntop=%d, maxtop=%d",
                     ntop, preporter->maxtop);
        ntop = preporter->maxtop;
    }

    if (ninstances == MAX_INSTANCES) {
        dump_ops = (ntop + MAX_INSTANCES - 1) / MAX_INSTANCES;
    } else {
        dump_ops = ntop;
    }
    
    for (i = 0; i < ninstances; i++) {
        rpt = &preporter->instances[i];

        /* only dump one hotkey */
        if (i == 1 && ntop == 1) {
            continue;
        } else if (i == 1 && ntop % MAX_INSTANCES != 0) {
            /* FIXME: only MAX_INSTANCES==2 can do like this */
            dump_ops--;
        }

        pos = build_instance_snapshot(rpt, dump_ops, pos, rpsize,  
                                      preporter->last_tm, trace_ip);

        if (pos == NULL) {
            (void)__sync_sub_and_fetch(&preporter->dump_num, 1);
            return -1;
        }
    }
    (void)__sync_sub_and_fetch(&preporter->dump_num, 1);

    return (0);
}
Пример #9
0
auint8_t & auint8_t::operator-=( const abool & aValue )
{
	__sync_sub_and_fetch(&_value, (aValue.getValue() ? 1 : 0));
	return *this;
}
Пример #10
0
void* thread_func(void*) {
    pthread_t thread_id = pthread_self();
    //printf("%d\n", sched_getcpu());
    int k = 0;

    unsigned int *local_queue = (unsigned int*) malloc(sizeof(unsigned int)*num_of_nodes);
    int local_queue_size = 0;

    while(1) {
        if (k%2 ==0) {
            while (current_a_size > 0) {
                unsigned int index;
                int read_pos = __sync_sub_and_fetch(&current_a_size, 1);
                if (read_pos >= 0) {
                    index = current_a[read_pos];
                    Node cur_node = node_list[index];
                    for (int i = cur_node.start; i < (cur_node.start+cur_node.edge_num); i++) {
                        unsigned int id = edge_list[i].dest;

                        int its_color = sync_test_and_set_bit(id, bitmap);
                        if (!its_color) {
                            cost[id] = cost[index] + 1;
                            //int write_pos = __sync_fetch_and_add(&current_b_size, 1);
                            //current_b[write_pos] = id;
                            local_queue[local_queue_size] = id;
                            local_queue_size += 1;
                        }
                    }
                } else {
                    __sync_fetch_and_add(&current_a_size, 1);
                }
            }
            if (local_queue_size) {
                int write_pos = __sync_fetch_and_add(&current_b_size, local_queue_size);
                for (int i=0; i<local_queue_size; i++) {
                    current_b[write_pos+i] = local_queue[i];
                }
                local_queue_size = 0;
            }
            pthread_barrier_wait(&barr);
            if (current_b_size == 0)  break;
            pthread_barrier_wait(&barr2);
        } else {
            while (current_b_size > 0) {
                unsigned int index;
                int read_pos = __sync_sub_and_fetch(&current_b_size, 1);
                if (read_pos >= 0) {
                    index = current_b[read_pos];
                    Node cur_node = node_list[index];
                    for (int i = cur_node.start; i < (cur_node.start+cur_node.edge_num); i++) {
                        unsigned int id = edge_list[i].dest;
                        int its_color = sync_test_and_set_bit(id, bitmap);
                        if (!its_color) {
                            cost[id] = cost[index] + 1;
                            //int write_pos = __sync_fetch_and_add(&current_a_size, 1);
                            //current_a[write_pos] = id;
                            local_queue[local_queue_size] = id;
                            local_queue_size += 1;
                        }
                    }
                } else {
                    __sync_fetch_and_add(&current_b_size, 1);
                }
            }
            if (local_queue_size) {
                int write_pos = __sync_fetch_and_add(&current_a_size, local_queue_size);
                for (int i=0; i<local_queue_size; i++) {
                    current_a[write_pos+i] = local_queue[i];
                }
                local_queue_size = 0;
            }
            pthread_barrier_wait(&barr);
            if (current_a_size == 0)  break;
            pthread_barrier_wait(&barr2);
        }
        k++;
    }
}
Пример #11
0
 os_uint32 pa_decrement(os_uint32 *count)
 {
    return __sync_sub_and_fetch (count, 1);
 }
Пример #12
0
 //! Performs an atomic decrement by 'val', returning the new value
 T dec(const T val) { return __sync_sub_and_fetch(&value, val);  }
Пример #13
0
Qiniu_Count Qiniu_Count_Dec(Qiniu_Count* self)
{
	return __sync_sub_and_fetch(self, 1);
}
Пример #14
0
    void Daemon::pruneTraceEntries(bool i_all)
    {
        ComponentList::List::iterator component;

        size_t pruned = 0;

        // Iterate through the components...
        bool more = iv_service->iv_compList->first(component);
        while(more)
        {
            Entry* entry = component->iv_last;
            Entry* orig_entry = entry;

            // Invalidate entries until the component is small enough.
            while((entry) &&
                    ((component->iv_curSize > component->iv_maxSize) ||
                     i_all)
                 )
            {
                if (!reinterpret_cast<BufferPage*>(
                        ALIGN_PAGE_DOWN(
                            reinterpret_cast<uint64_t>(entry)))->commonPage)
                {
                    break;
                }

                entry->comp = NULL; // Invalidate entry.

                __sync_sub_and_fetch(&component->iv_curSize, entry->size);
                pruned += entry->size;

                entry = entry->prev;
            }

            if (entry != orig_entry)
            {
                printkd("%s,", component->iv_compName);

                // Break chain of linked list.
                if (entry != NULL)
                {
                    entry->next = NULL;
                }

                // Update component pointers.
                Buffer* b =
                    iv_service->iv_buffers[component->iv_bufferType];

                // consumerOp pseudo-code:
                //    if (entry == NULL) component->first = NULL;
                //    component->last = entry;
                b->consumerOp(&entry, NULL,
                              &component->iv_first, NULL,
                              &component->iv_last, entry);
            }

            // Get next component.
            more = iv_service->iv_compList->next(component);
        }

        // Record size of pruned entries in a global.
        if (pruned)
        {
            printkd(": pruned %ld\n", pruned);
            iv_totalPruned += pruned;
        }
    }
Пример #15
0
void global_unlock_read(global_lock* gl){
    __sync_sub_and_fetch(&(gl->lock.global_read),1);
}
Пример #16
0
void local_unlock_write(global_lock* gl){
    __sync_sub_and_fetch(&(gl->lock.local_write),1);
}
Пример #17
0
int nx_atomic_dec(volatile nxint32* value)
{
	return __sync_sub_and_fetch(value,1);
}
Пример #18
0
	int AtomicInteger::DecrementAndGet(volatile int *ptr)
	{
		return __sync_sub_and_fetch(ptr, 1);
	}
Пример #19
0
void code_release(code_t * code) {
  if (!__sync_sub_and_fetch(&code->retainCount, 1)) {
    _free_code(code);
  }
}
void *thread_sub_and_fetch(void *arg)
{
	for(int i = 0; i < 10000; ++i)
		__sync_sub_and_fetch((T*)arg, 0x0000000100000001ULL);
	pthread_exit(0);
}
int main()
{
	{
		T x = HILO(5, 3);
		T y = __sync_add_and_fetch(&x, DUP(1));
		assert(y == HILO(6, 4));
		assert(x == HILO(6, 4));
		volatile T n = HILO(2, 1);
		if (emscripten_has_threading_support())
		{
			for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_add_and_fetch, (void*)&n);
			for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL);
			printf("n: %llx\n", n);
			assert(n == HILO(NUM_THREADS*10000ULL+2ULL, NUM_THREADS*10000ULL+1ULL));
		}
	}
	{
		T x = HILO(15, 13);
		T y = __sync_sub_and_fetch(&x, HILO(10, 10));
		assert(y == HILO(5, 3));
		assert(x == HILO(5, 3));
		volatile T n = HILO(NUM_THREADS*10000ULL+5ULL, NUM_THREADS*10000ULL+3ULL);
		if (emscripten_has_threading_support())
		{
			for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_sub_and_fetch, (void*)&n);
			for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL);
			printf("n: %llx\n", n);
			assert(n == HILO(5,3));
		}
	}
	{
		T x = HILO(32768 + 5, 5);
		T y = __sync_or_and_fetch(&x, HILO(65536 + 9, 9));
		assert(y == HILO(32768 + 65536 + 13, 13));
		assert(x == HILO(32768 + 65536 + 13, 13));
		for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived.
		{
			or_and_fetch_data = HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS);
			if (emscripten_has_threading_support())
			{
				for(int i = 0; i < NUM_THREADS; ++i)
				{
					threadArg[i] = DUP(1 << i);
					pthread_create(&thread[i], NULL, thread_or_and_fetch, (void*)&threadArg[i]);
				}
				for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL);
				assert(or_and_fetch_data == HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1));
			}
		}
	}
	{
		T x = HILO(32768 + 5, 5);
		T y = __sync_and_and_fetch(&x, HILO(32768 + 9, 9));
		assert(y == HILO(32768 + 1, 1));
		assert(x == HILO(32768 + 1, 1));
		if (emscripten_has_threading_support())
		{
			for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived.
			{
				and_and_fetch_data = HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1);
				for(int i = 0; i < NUM_THREADS; ++i)
				{
					threadArg[i] = DUP(~(1UL<<i));
					pthread_create(&thread[i], NULL, thread_and_and_fetch, (void*)&threadArg[i]);
				}
				for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL);
				assert(and_and_fetch_data == HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS));
			}
		}
	}
	{
		T x = HILO(32768 + 5, 5);
		T y = __sync_xor_and_fetch(&x, HILO(16384 + 9, 9));
		assert(y == HILO(32768 + 16384 + 12, 12));
		assert(x == HILO(32768 + 16384 + 12, 12));
		if (emscripten_has_threading_support())
		{
			for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived.
			{
				xor_and_fetch_data = HILO(32768 + (1<<NUM_THREADS), 1<<NUM_THREADS);
				for(int i = 0; i < NUM_THREADS; ++i)
				{
					threadArg[i] = DUP(~(1UL<<i));
					pthread_create(&thread[i], NULL, thread_xor_and_fetch, (void*)&threadArg[i]);
				}
				for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL);
				assert(xor_and_fetch_data == HILO(32768 + ((1<<(NUM_THREADS+1))-1), (1<<(NUM_THREADS+1))-1));
			}
		}
	}
// XXX NAND support does not exist in Atomics API.
#if 0
	{
		T x = 5;
		T y = __sync_nand_and_fetch(&x, 9);
		assert(y == 5);
		assert(x == -2);
		const int oddNThreads = NUM_THREADS-1;
		for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived.
		{
			nand_and_fetch_data = 0;
			for(int i = 0; i < oddNThreads; ++i) pthread_create(&thread[i], NULL, thread_nand_and_fetch, (void*)-1);
			for(int i = 0; i < oddNThreads; ++i) pthread_join(thread[i], NULL);
			assert(nand_and_fetch_data == -1);
		}
	}
#endif

#ifdef REPORT_RESULT
	REPORT_RESULT(0);
#endif
}
Пример #22
0
/*
 * This creates a bunch of threads to simulate arriving trains and passengers.
 */
int
main()
{
	struct station station;
	station_init(&station);

	srandom(getpid() ^ time(NULL));

	signal(SIGALRM, alarm_handler);

	// Make sure station_load_train() returns immediately if no waiting passengers.
	_alarm(1, "station_load_train() did not return immediately when no waiting passengers");
	station_load_train(&station, 0);
	station_load_train(&station, 10);
	_alarm(0, NULL);

	// Create a bunch of 'passengers', each in their own thread.
	int i;
	const int total_passengers = 100;//edit
	int passengers_left = total_passengers;
	for (i = 0; i < total_passengers; i++) {
		pthread_t tid;
		int ret = pthread_create(&tid, NULL, passenger_thread, &station);
		if (ret != 0) {
			// If this fails, perhaps we exceeded some system limit.
			// Try reducing 'total_passengers'.
			perror("pthread_create");
			exit(1);
		}
	}

	// Make sure station_load_train() returns immediately if no free seats.
	_alarm(2, "station_load_train() did not return immediately when no free seats");
	station_load_train(&station, 0);
	_alarm(0, NULL);

	// Tons of random tests.
	int total_passengers_boarded = 0;
	const int max_free_seats_per_train = 50;
	int pass = 0;
	while (passengers_left > 0) {
		_alarm(2, "Some more complicated issue appears to have caused passengers "
			"not to board when given the opportunity");

		int free_seats = random() % max_free_seats_per_train;

		printf("Train entering station with %d free seats\n", free_seats);
		load_train_returned = 0;
		struct load_train_args args = { &station, free_seats };
		pthread_t lt_tid;
		int ret = pthread_create(&lt_tid, NULL, load_train_thread, &args);
		if (ret != 0) {
			perror("pthread_create");
			exit(1);
		}

		int threads_to_reap = MIN(passengers_left, free_seats);
		int threads_reaped = 0;
		while (threads_reaped < threads_to_reap) {
			if (load_train_returned) {
				fprintf(stderr, "Error: station_load_train returned early!\n");
				exit(1);
			}
			if (threads_completed > 0) {
				if ((pass % 2) == 0)
					usleep(random() % 2);
				threads_reaped++;
				station_on_board(&station);
				__sync_sub_and_fetch(&threads_completed, 1);
			}
		}

		// Wait a little bit longer. Give station_load_train() a chance to return
		// and ensure that no additional passengers board the train. One second
		// should be tons of time, but if you're on a horribly overloaded system,
		// this may need to be tweaked.
		for (i = 0; i < 1000; i++) {
			if (i > 50 && load_train_returned)
				break;
			usleep(1000);
		}

		if (!load_train_returned) {
			fprintf(stderr, "Error: station_load_train failed to return\n");
			exit(1);
		}

		while (threads_completed > 0) {
			threads_reaped++;
			__sync_sub_and_fetch(&threads_completed, 1);
		}

		passengers_left -= threads_reaped;
		total_passengers_boarded += threads_reaped;
		printf("Train departed station with %d new passenger(s) (expected %d)%s\n",
			threads_to_reap, threads_reaped,
			(threads_to_reap != threads_reaped) ? " *****" : "");

		if (threads_to_reap != threads_reaped) {
			fprintf(stderr, "Error: Too many passengers on this train!\n");
			exit(1);
		}

		pass++;
	}

	if (total_passengers_boarded == total_passengers) {
		printf("Looks good!\n");
		return 0;
	} else {
		// I don't think this is reachable, but just in case.
		fprintf(stderr, "Error: expected %d total boarded passengers, but got %d!\n",
			total_passengers, total_passengers_boarded);
		return 1;
	}
}
Пример #23
0
 //! Performs an atomic decrement by 1, returning the new value
 T dec() { return __sync_sub_and_fetch(&value, 1);  }
Пример #24
0
static inline void
globalmq_dec(struct global_queue *q) {
	__sync_sub_and_fetch(&q->total,1);
}
Пример #25
0
static void *
cache_bg_thread(void *arg)
{
	struct cache *c = (struct cache *)arg;
	int i, dud;

	while (1) {
		enif_mutex_lock(c->ctrl_lock);

		/* if we've been told to die, quit this loop and start cleaning up */
		if (c->flags & FL_DYING) {
			enif_mutex_unlock(c->ctrl_lock);
			break;
		}

		/* sleep until there is work to do */
		enif_cond_wait(c->check_cond, c->ctrl_lock);

		__sync_add_and_fetch(&(c->wakeups), 1);
		dud = 1;

		/* we have to let go of ctrl_lock so we can take cache_lock then
		   ctrl_lock again to get them back in the right order */
		enif_mutex_unlock(c->ctrl_lock);
		enif_rwlock_rwlock(c->cache_lock);
		enif_mutex_lock(c->ctrl_lock);

		/* first process the promotion queue before we do any evicting */
		for (i = 0; i < N_INCR_BKT; ++i) {
			enif_mutex_lock(c->incr_lock[i]);
			while (!TAILQ_EMPTY(&(c->incr_head[i]))) {
				struct cache_incr_node *n;
				n = TAILQ_FIRST(&(c->incr_head[i]));
				TAILQ_REMOVE(&(c->incr_head[i]), n, entry);
				__sync_sub_and_fetch(&(c->incr_count), 1);

				dud = 0;

				/* let go of the ctrl_lock here, we don't need it when we aren't looking
				   at the incr_queue, and this way other threads can use it while we shuffle
				   queue nodes around */
				enif_mutex_unlock(c->incr_lock[i]);
				enif_mutex_unlock(c->ctrl_lock);

				if (n->node->q == &(c->q1)) {
					TAILQ_REMOVE(&(c->q1.head), n->node, entry);
					c->q1.size -= n->node->size;
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
					n->node->q = &(c->q2);
					c->q2.size += n->node->size;

				} else if (n->node->q == &(c->q2)) {
					TAILQ_REMOVE(&(c->q2.head), n->node, entry);
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
				}

				enif_free(n);

				/* take the ctrl_lock back again for the next loop around */
				enif_mutex_lock(c->ctrl_lock);
				enif_mutex_lock(c->incr_lock[i]);
			}
			enif_mutex_unlock(c->incr_lock[i]);
		}

		/* let go of the ctrl_lock here for two reasons:
		   1. avoid lock inversion, because if we have evictions to do we
		      will need to take lookup_lock, and we must take lookup_lock
		      before taking ctrl_lock
		   2. if we don't need to do evictions, we're done with the structures
		      that are behind ctrl_lock so we should give it up for others */
		enif_mutex_unlock(c->ctrl_lock);

		/* do timed evictions -- if anything has expired, nuke it */
		{
			struct cache_node *n;
			if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) {
				struct timespec now;
				clock_now(&now);
				while (n && n->expiry.tv_sec < now.tv_sec) {
					enif_mutex_lock(c->ctrl_lock);
					dud = 0;
					destroy_cache_node(n);
					enif_mutex_unlock(c->ctrl_lock);
					n = RB_MIN(expiry_tree, &(c->expiry_head));
				}
			}
		}

		/* now check if we need to do ordinary size limit evictions */
		if (c->q1.size + c->q2.size > c->max_size) {
			enif_rwlock_rwlock(c->lookup_lock);
			enif_mutex_lock(c->ctrl_lock);

			while ((c->q1.size + c->q2.size > c->max_size) &&
					(c->q1.size > c->min_q1_size)) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q1.head), cache_q);
				destroy_cache_node(n);
			}

			while (c->q1.size + c->q2.size > c->max_size) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q2.head), cache_q);
				destroy_cache_node(n);
			}

			dud = 0;

			enif_mutex_unlock(c->ctrl_lock);
			enif_rwlock_rwunlock(c->lookup_lock);
		}

		if (dud)
			__sync_add_and_fetch(&(c->dud_wakeups), 1);
		/* now let go of the cache_lock that we took right back at the start of
		   this iteration */
		enif_rwlock_rwunlock(c->cache_lock);
	}

	/* first remove us from the atom_tree, so we get no new operations coming in */
	enif_rwlock_rwlock(gbl->atom_lock);
	RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node);
	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_free(c->atom_node);

	/* now take all of our locks, to make sure any pending operations are done */
	enif_rwlock_rwlock(c->cache_lock);
	enif_rwlock_rwlock(c->lookup_lock);
	enif_mutex_lock(c->ctrl_lock);

	c->atom_node = NULL;

	/* free the actual cache queues */
	{
		struct cache_node *n, *nextn;
		nextn = TAILQ_FIRST(&(c->q1.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
		nextn = TAILQ_FIRST(&(c->q2.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
	}

	for (i = 0; i < N_INCR_BKT; ++i)
		enif_mutex_lock(c->incr_lock[i]);

	/* free the incr_queue */
	for (i = 0; i < N_INCR_BKT; ++i) {
		struct cache_incr_node *in, *nextin;
		nextin = TAILQ_FIRST(&(c->incr_head[i]));
		while ((in = nextin)) {
			nextin = TAILQ_NEXT(in, entry);
			TAILQ_REMOVE(&(c->incr_head[i]), in, entry);
			in->node = 0;
			enif_free(in);
		}
		enif_mutex_unlock(c->incr_lock[i]);
		enif_mutex_destroy(c->incr_lock[i]);
	}

	/* unlock and destroy! */
	enif_cond_destroy(c->check_cond);

	enif_mutex_unlock(c->ctrl_lock);
	enif_mutex_destroy(c->ctrl_lock);

	enif_rwlock_rwunlock(c->lookup_lock);
	enif_rwlock_destroy(c->lookup_lock);

	enif_rwlock_rwunlock(c->cache_lock);
	enif_rwlock_destroy(c->cache_lock);

	enif_free(c);

	return 0;
}
Пример #26
0
errno_t sss_nss_mc_getpwnam(const char *name, size_t name_len,
                            struct passwd *result,
                            char *buffer, size_t buflen)
{
    struct sss_mc_rec *rec = NULL;
    struct sss_mc_pwd_data *data;
    char *rec_name;
    uint32_t hash;
    uint32_t slot;
    int ret;
    const size_t strs_offset = offsetof(struct sss_mc_pwd_data, strs);
    size_t data_size;

    ret = sss_nss_mc_get_ctx("passwd", &pw_mc_ctx);
    if (ret) {
        return ret;
    }

    /* Get max size of data table. */
    data_size = pw_mc_ctx.dt_size;

    /* hashes are calculated including the NULL terminator */
    hash = sss_nss_mc_hash(&pw_mc_ctx, name, name_len + 1);
    slot = pw_mc_ctx.hash_table[hash];

    /* If slot is not within the bounds of mmaped region and
     * it's value is not MC_INVALID_VAL, then the cache is
     * probbably corrupted. */
    while (MC_SLOT_WITHIN_BOUNDS(slot, data_size)) {
        /* free record from previous iteration */
        free(rec);
        rec = NULL;

        ret = sss_nss_mc_get_record(&pw_mc_ctx, slot, &rec);
        if (ret) {
            goto done;
        }

        /* check record matches what we are searching for */
        if (hash != rec->hash1) {
            /* if name hash does not match we can skip this immediately */
            slot = sss_nss_mc_next_slot_with_hash(rec, hash);
            continue;
        }

        data = (struct sss_mc_pwd_data *)rec->data;
        /* Integrity check
         * - name_len cannot be longer than all strings
         * - data->name cannot point outside strings
         * - all strings must be within copy of record
         * - size of record must be lower that data table size */
        if (name_len > data->strs_len
            || (data->name + name_len) > (strs_offset + data->strs_len)
            || data->strs_len > rec->len
            || rec->len > data_size) {
            ret = ENOENT;
            goto done;
        }

        rec_name = (char *)data + data->name;
        if (strcmp(name, rec_name) == 0) {
            break;
        }

        slot = sss_nss_mc_next_slot_with_hash(rec, hash);
    }

    if (!MC_SLOT_WITHIN_BOUNDS(slot, data_size)) {
        ret = ENOENT;
        goto done;
    }

    ret = sss_nss_mc_parse_result(rec, result, buffer, buflen);

done:
    free(rec);
    __sync_sub_and_fetch(&pw_mc_ctx.active_threads, 1);
    return ret;
}
Пример #27
0
/* ==================== server_handle_write() ==================== */ 
int server_handle_write(session_t *session, message_t *request)
{
    int ret = 0;
    msgidx_t msgidx;
    msgidx_init(&msgidx);

    /** ----------------------------------------
     *    Parse request
     *  ---------------------------------------- */

    if ( parse_write_request(session, request, &msgidx) != 0 ){
        error_log("parse_write_request() failed. key:%s", msgidx.key);
        ret = -1;
    } else {

        if ( session->total_writed == 0 ){
            __sync_add_and_fetch(&session->running_tasks, 1);
        }
        /** ----------------------------------------
         *    Write cache
         *  ---------------------------------------- */

        object_t *object = server_write_to_cache(session, &msgidx);
        if (  object == NULL ) {
            error_log("session_write_to_cache() failed.");
            __sync_add_and_fetch(&session->finished_works, 1);
            session->total_writed = 0;
            ret = -1;
        } else {

            /** ----------------------------------------
             *    Write to storage & Response to client.
             *  ---------------------------------------- */

            if ( session->total_writed >= msgidx.object_size ){

                /* FIXME 2014-10-23 15:49:37 */
                /*vnode_t *vnode = get_vnode_by_key(SERVER(session), &object->key_md5);*/
                /*vnode_enqueue_write_queue(vnode, session, object);*/

                server_write_to_storage(SERVER(session), object);
                session_response(session, RESULT_SUCCESS);

                __sync_add_and_fetch(&session->finished_works, 1);
                __sync_sub_and_fetch(&session->running_tasks, 1);

                /*message_t *response = alloc_response_message(RESULT_SUCCESS);*/
                /*listAddNodeTail(session->responseQueue, response);*/

                session->total_writed = 0;

                /* FIXME */
                /*vnode_write_queue_entry_t *entry = (vnode_write_queue_entry_t*)zmalloc(sizeof(vnode_write_queue_entry_t));*/
                /*memset(entry, 0, sizeof(vnode_write_queue_entry_t));*/
                /*entry->session = session;*/
                /*entry->object = object;*/

                /*enqueue_work(vnode->write_queue, entry);*/


                /*pthread_yield();*/
                /*sched_yield();*/
            } 
        }
    }

    return ret;
}
Пример #28
0
static inline LONG WINAPI InterlockedDecrement( LONG volatile *dest )
{
    return __sync_sub_and_fetch(dest, 1);;
}
Пример #29
0
errno_t sss_nss_mc_getpwuid(uid_t uid,
                            struct passwd *result,
                            char *buffer, size_t buflen)
{
    struct sss_mc_rec *rec = NULL;
    struct sss_mc_pwd_data *data;
    char uidstr[11];
    uint32_t hash;
    uint32_t slot;
    int len;
    int ret;

    ret = sss_nss_mc_get_ctx("passwd", &pw_mc_ctx);
    if (ret) {
        return ret;
    }

    len = snprintf(uidstr, 11, "%ld", (long)uid);
    if (len > 10) {
        ret = EINVAL;
        goto done;
    }

    /* hashes are calculated including the NULL terminator */
    hash = sss_nss_mc_hash(&pw_mc_ctx, uidstr, len+1);
    slot = pw_mc_ctx.hash_table[hash];

    /* If slot is not within the bounds of mmaped region and
     * it's value is not MC_INVALID_VAL, then the cache is
     * probbably corrupted. */
    while (MC_SLOT_WITHIN_BOUNDS(slot, pw_mc_ctx.dt_size)) {
        /* free record from previous iteration */
        free(rec);
        rec = NULL;

        ret = sss_nss_mc_get_record(&pw_mc_ctx, slot, &rec);
        if (ret) {
            goto done;
        }

        /* check record matches what we are searching for */
        if (hash != rec->hash2) {
            /* if uid hash does not match we can skip this immediately */
            slot = sss_nss_mc_next_slot_with_hash(rec, hash);
            continue;
        }

        data = (struct sss_mc_pwd_data *)rec->data;
        if (uid == data->uid) {
            break;
        }

        slot = sss_nss_mc_next_slot_with_hash(rec, hash);
    }

    if (!MC_SLOT_WITHIN_BOUNDS(slot, pw_mc_ctx.dt_size)) {
        ret = ENOENT;
        goto done;
    }

    ret = sss_nss_mc_parse_result(rec, result, buffer, buflen);

done:
    free(rec);
    __sync_sub_and_fetch(&pw_mc_ctx.active_threads, 1);
    return ret;
}
Пример #30
0
auint8_t & auint8_t::operator-=( int64_t aValue )
{
	__sync_sub_and_fetch(&_value, (uint8_t)aValue);
	return *this;
}