예제 #1
0
/*
 * Read get result from the remote web server.
 * Apply trigger check to this result.
 */
int
http_response_thread(thread_t * thread)
{
	SOCK *sock_obj = THREAD_ARG(thread);

	/* Handle read timeout */
	if (thread->type == THREAD_READ_TIMEOUT)
		return epilog(thread);

	/* Allocate & clean the get buffer */
	sock_obj->buffer = (char *) MALLOC(MAX_BUFFER_LENGTH);

	/* Initalize the hash context */
	sock_obj->hash = &hashes[req->hash];
	HASH_INIT(sock_obj);

	/* Register asynchronous http/ssl read thread */
	if (req->ssl)
		thread_add_read(thread->master, ssl_read_thread, sock_obj,
				thread->u.fd, HTTP_CNX_TIMEOUT);
	else
		thread_add_read(thread->master, http_read_thread, sock_obj,
				thread->u.fd, HTTP_CNX_TIMEOUT);
	return 0;
}
void TestGen::resultat()
{
	ofstream results("resultat.txt");
	results << "Le test de " << Ntest << " algorithmes génétiques a généré : "<<endl;
	results << "cylce:  Wmoy:  Wmax:  "<<endl;
	for(int i=1;i<34;i++) 
		results << i << "  " << Wmoy[i] << Wmax[i] << endl;
	

	ofstream epilog("epi.txt");
	Objectif* objmoy;
	objmoy = new Objectif();
	objmoy->recupgenome(getWmoy());
	Objectif* objmax;
	objmax = new Objectif();
	objmax->recupgenome(getWmax());
	Objectif* objtemoin;
	objtemoin = new Objectif();
	double* Wt;
	Wt = new double[34];
	for(int i=1;i<34;i++) 
		Wt[i]=1;
	objtemoin->recupgenome(Wt);
	epilog << "masses de l'épi à chaque cycle pour Wmoy et Wmax obtenus à l'aide de TestGen sur " << Ntest << " algorithmes génétiques." << endl;
	epilog << "cycle:  Wmoy:  Wmax:  temoin: " << endl;
	for(int i=1;i<34;i++)
		epilog << i << "  " << objmoy->masse(i) << "  " << objmax->masse(i) << "  " << objtemoin->masse(i) << endl;
	system("gnuplot ScriptGraphe");
	delete[] Wt;
	delete objtemoin;
	delete objmax;
	delete objmoy;
}
예제 #3
0
/* Asynchronous HTTP stream reader */
int
http_read_thread(thread_t * thread)
{
	SOCK *sock_obj = THREAD_ARG(thread);
	int r = 0;

	/* Handle read timeout */
	if (thread->type == THREAD_READ_TIMEOUT)
		return epilog(thread);

	/* read the HTTP stream */
	r = MAX_BUFFER_LENGTH - sock_obj->size;
	if (r <= 0) {
		/* defensive check, should not occur */
		fprintf(stderr, "HTTP socket buffer overflow (not consumed)\n");
		r = MAX_BUFFER_LENGTH;
	}
	memset(sock_obj->buffer + sock_obj->size, 0, r);
	r = read(thread->u.fd, sock_obj->buffer + sock_obj->size, r);

	DBG(" [l:%d,fd:%d]\n", r, sock_obj->fd);

	if (r == -1 || r == 0) {	/* -1:error , 0:EOF */
		if (r == -1) {
			/* We have encourred a real read error */
			DBG("Read error with server [%s]:%d: %s\n",
			    req->ipaddress, ntohs(req->addr_port),
			    strerror(errno));
			return epilog(thread);
		}

		/* All the HTTP stream has been parsed */
		finalize(thread);
	} else {
		/* Handle the response stream */
		http_process_stream(sock_obj, r);

		/*
		 * Register next http stream reader.
		 * Register itself to not perturbe global I/O multiplexer.
		 */
		thread_add_read(thread->master, http_read_thread, sock_obj,
				thread->u.fd, HTTP_CNX_TIMEOUT);
	}

	return 0;
}
예제 #4
0
파일: Main.cpp 프로젝트: elliot627/CS2270
void MacroMain(int argc, char *argv[])
{
	prolog(0);						// No local variables.

	push(argv[1]);					// Push <fileName>.
	call(MacroDisplayFileContent());// Call <MacroDisplayFileContent()>.

	epilog(8);						// Pop <argc> & <argv> parameters (8 bytes).
}
/* Register device that userspace will read to store results in log file */
int vmon_p_init(dev_t dev)
{
	int result, devno;

	prolog ("");

	vmon_p_devno = dev;

	vmon_p_devices = \
		kmalloc(VMON_P_NR_DEVS * sizeof(struct vmon_pipe), GFP_KERNEL);

	if (vmon_p_devices == NULL) {
		critical ( "vmon_pipe couldn't be allocated!" );
		unregister_chrdev_region(dev, VMON_P_NR_DEVS);
		return 0;
	}

	prolog ( "vmon_p_devices=0x%p", vmon_p_devices );
	
	memset(vmon_p_devices, 0, VMON_P_NR_DEVS * sizeof(struct vmon_pipe));
	init_waitqueue_head(&vmon_p_devices->buffer_wait);
	vmon_p_devices->buffer_watershed = VMON_P_BUFFER_WATERSHED;

	if (vmon_p_devices->buffer_watershed >= VMON_P_BUFFERSIZE)
		return -EINVAL;

	if (!vmon_p_devices->buffer) {
		/*
		 * kmalloc (kernel)
		 * allocates contiguous memory, up to 128KB
		 * vmalloc (virtual)
		 * allocates non continuous memory, can go above 128KB
		 */

		vmon_p_devices->buffer = \
		kmalloc(sizeof(u64) * VMON_P_BUFFERSIZE, GFP_KERNEL);

		if (!vmon_p_devices->buffer) {
			critical ( "couldn't allocate vmon_p_buffer!" );
			return -ENOMEM;
		}
	}
	prolog ( "vmon_p_devices->buffer=0x%p", vmon_p_devices->buffer );
	vmon_p_devices->buffersize = VMON_P_BUFFERSIZE;

	devno = MKDEV(vmon_major, vmon_minor + 0);
	cdev_init(&vmon_p_devices->cdev, &vmon_pipe_fops);
    vmon_p_devices->cdev.owner = THIS_MODULE;
    result = cdev_add (&vmon_p_devices->cdev, devno, 1);

	/* Fail gracefully if need be */
    if (result)
		critical ("error %d adding /dev/vmon", result);

	epilog ( "device vmon major=%d, minor=%d", vmon_major, vmon_minor );
	return result;
}
/*
 * This is called whenever a process attempts to read the device file
 */
static ssize_t vmon_p_read(struct file *filp, \
		char __user *buf, size_t count, loff_t *f_pos)
{
	struct vmon_pipe *dev = filp->private_data;
	int retval = -EINVAL;
	size_t const max = dev->buffersize * sizeof(unsigned long long);

	prolog ("");

	/* handling partial reads is more trouble than it's worth */
	if (count != max || *f_pos)
		return -EINVAL;
	
	wait_event_interruptible(dev->buffer_wait, atomic_read(&buffer_ready));
	if (signal_pending(current))
		return -EINTR;
	
	/* can't currently happen */
	if (!atomic_read(&buffer_ready))
		return -EAGAIN;

	mutex_lock(&buffer_mutex);
	atomic_set(&buffer_ready, 0);

	retval = -EFAULT;

	/* buffer_pos unit is unsigned long (4Bytes)
	 * count unit is Bytes */
	count = dev->buffer_pos * sizeof(unsigned long long);
 
	if (copy_to_user(buf, dev->buffer, count))
		goto out;

	epilog("\"%s\" did read %li bytes\n", current->comm, (long long)count);
	/* we expect the user always reads the entire buffer */
	
	retval = count;
	dev->buffer_pos = 0;
 
out:
	mutex_unlock(&buffer_mutex);
	epilog ("");
	return retval;
}
예제 #7
0
파일: codegen.c 프로젝트: ras52/bootstrap
static
fn_decl(stream, name, decl, block, frame_sz) {
    auto ret = new_label();
    start_fn(decl);
    prolog(stream, name, frame_sz);
    do_block(stream, block, -1, -1, ret);
    emit_label( stream, ret );
    epilog(stream, frame_sz);
    end_fn();
}
예제 #8
0
파일: ssl.c 프로젝트: Addision/LVS
/* Asynchronous SSL stream reader */
int
ssl_read_thread(thread_t * thread)
{
	SOCK *sock_obj = THREAD_ARG(thread);
	int r = 0;
	int error;

	/* Handle read timeout */
	if (thread->type == THREAD_READ_TIMEOUT)
		return epilog(thread);

	/*
	 * The design implemented here is a workaround for use
	 * with OpenSSL. This goto loop is a 'read until not
	 * end of stream'. But this break a little our global
	 * I/O multiplexer thread framework because it enter
	 * a synchronous read process for each GET reply.
	 * Sound a little nasty !.
	 * 
	 * Why OpenSSL doesn t handle underlying fd. This
	 * break the I/O (select()) approach !...
	 * If you read this and know the answer, please reply
	 * I am probably missing something... :)
	 * My test show that sometime it return from select,
	 * and sometime not...
	 */

      read_stream:

	/* read the SSL stream */
	memset(sock_obj->buffer, 0, MAX_BUFFER_LENGTH);
	r = SSL_read(sock_obj->ssl, sock_obj->buffer, MAX_BUFFER_LENGTH);
	error = SSL_get_error(sock_obj->ssl, r);

	DBG(" [l:%d,fd:%d]\n", r, sock_obj->fd);

	if (error) {
		/* All the SSL streal has been parsed */
		/* Handle response stream */
		if (error != SSL_ERROR_NONE)
			return finalize(thread);
	} else if (r > 0 && error == 0) {

		/* Handle the response stream */
		http_process_stream(sock_obj, r);

		/*
		 * Register next ssl stream reader.
		 * Register itself to not perturbe global I/O multiplexer.
		 */
		goto read_stream;
	}

	return 0;
}
예제 #9
0
static void p9_xos_start_write_pump(unsigned long int data)
{
	struct p9_xos_driver *drv = (struct p9_xos_driver *)data;

	prolog("d=%p", drv);

	if (!test_and_set_bit(WE_BIT, &drv->state))
		queue_work(drv->workqueue, &drv->wwork);

	epilog();
}
예제 #10
0
static void p9_xos_close(struct p9_client *client)
{
	struct p9_xos_device *device;

	prolog("c=%p", client);

	client->status = Disconnected;
	device = (struct p9_xos_device *)client->conn;
	device->client = NULL;
	device->c_name[0] = 0;

	epilog();
}
예제 #11
0
static void p9_xos_flow(unsigned int event, void *cookie)
{
	struct p9_xos_driver *drv = cookie;
	struct p9_xos_endpoint *ep;
	unsigned long flags;
	long int state = 0;

	prolog("e=%u c=%p", event, cookie);

	hw_raw_local_irq_save(flags);

	BUG_ON(event != 0);

	/* get empty packets */
	ep = &drv->ep[WR_EP];
	p9_xos_deque_move(ep->lqueue, &ep->regs[LHEAD], ep);

	if (ep->regs[STARVATION]) { /* Linux needs empty packets */
		ep->regs[STARVATION] = 0;
		set_bit(MUST_WRITE, &state);
	}

	/* get data */
	ep = &drv->ep[RD_EP];
	p9_xos_deque_move(ep->lqueue, &ep->regs[LHEAD], ep);
	if (ep->regs[STARVATION]) { /* RTK needs empty packets */
		drv->wake_status = 2;
		set_bit(MUST_SYNC, &state);
	}

	if (deque_head(ep->lqueue) != deque_null) {
		drv->wake_status = 2;
		set_bit(MUST_READ, &state);
	}

	hw_raw_local_irq_restore(flags);

	if (test_bit(MUST_SYNC, &state))
		queue_work(drv->workqueue, &drv->swork);

	if (test_bit(MUST_WRITE, &state))
		queue_work(drv->workqueue, &drv->wwork);

	if (test_bit(MUST_READ, &state))
		queue_work(drv->workqueue, &drv->rwork);

	if (drv->wake_status == 2)
		wake_lock(&drv->wake_lock);

	epilog();
}
예제 #12
0
int main( int argc, char *argv[] )
{
    N = 10000;

    if( argc > 1 )
        N = atoi(argv[1]);

    setbuf(stdout, NULL);

    calculate();

    epilog();

    return 0;
}
예제 #13
0
파일: stub.cpp 프로젝트: pombreda/main
	int mainCRTStartup()
	{
		TraceFunc();

		prolog();

		int argc = 0;
		wchar_t ** argv = ::CommandLineToArgvW(::GetCommandLineW(), &argc);

		int ret = wmain(argc, argv);

		::LocalFree(argv);

		TraceFunc();
		return epilog(ret);
	}
예제 #14
0
static void p9_xos_init(unsigned int event, void *cookie)
{
	struct p9_xos_driver *drv = cookie;

	prolog("e=%u c=%p", event, cookie);

	if (drv->ep[event].regs[FAIL])
		panic("9P2000 inter-OS transport initialization failed");

	if (event)
		xos_ctrl_unregister(drv->ctrl, event);
	else
		xos_ctrl_register(drv->ctrl, P9_XOS_EVENT, p9_xos_flow, drv, 0);

	epilog();
}
예제 #15
0
파일: Main.cpp 프로젝트: elliot627/CS2270
void MacroDisplayFileContent(/*const char* fileName*/)
{
	prolog(16);						// 16 bytes in local variables.

	memset(var(16), 0, 16);			// char content[16] = { 0 };

	push(var(16));					// Push <content>.
	push(param(0));					// Push <fileName>.
	call(LoadFile());				// Call <LoadFile()>.

	push(var(16));					// Push <content>.
	push("Content: '%s'\n");		// Push <format>.
	call(Print());					// Call <Print()>.

	epilog(4);						// Pop <fileName> parameter (4 bytes).
}
예제 #16
0
파일: stub.cpp 프로젝트: pombreda/main
	int	WinMainCRTStartup() // -mwindows
	{
		TraceFunc();

		prolog();

		STARTUPINFOW startupInfo;
		::RtlSecureZeroMemory(&startupInfo, sizeof(startupInfo));
		startupInfo.cb = sizeof(startupInfo);
		::GetStartupInfoW(&startupInfo);

		int ret = wWinMain(::GetModuleHandleW(nullptr), nullptr, ::GetCommandLineW(), startupInfo.dwFlags & STARTF_USESHOWWINDOW ? startupInfo.wShowWindow : SW_SHOWDEFAULT);

		TraceFunc();
		return epilog(ret);
	}
예제 #17
0
static int p9_xos_parse_opts(char *params, struct p9_xos_device *device)
{
	enum { opt_latency, opt_err, };
	static match_table_t tokens = {
		{opt_latency, "latency=%u"},
		{opt_err, NULL},
	};
	int retval = 0;
	char *options, *tmp_options, *p;

	prolog("p=%s c=%p", params, device);

	tmp_options = kstrdup(params, GFP_KERNEL);
	if (NULL == tmp_options) {
		retval = -ENOMEM;
		goto done;
	}

	options = tmp_options;

	while ((p = strsep(&options, ",")) != NULL) {
		int token, option;
		substring_t args[MAX_OPT_ARGS];

		if (!*p)
			continue;

		token = match_token(p, tokens, args);
		switch (token) {
		case opt_latency:
			if (!match_int(&args[0], &option))
				device->latency = msecs_to_jiffies(option);
			else
				warning("ignoring malformed latency option");
			break;
		default:
			break;
		}
	}

	kfree(tmp_options);

done:
	epilog("%d", retval);

	return retval;
}
예제 #18
0
static int p9_xos_create(struct p9_client *client, const char *addr, char *args)
{
	struct p9_xos_device *device = NULL;	/* avoid compiler warning */
	unsigned int id;
	int error;

	prolog("c=%p a=%s a=%s", client, addr, args);

	if (unlikely(!addr))
		addr = "anonymous";

	id = ARRAY_SIZE(driver.device);
	spin_lock(&driver.c_lock);
	while (id--) {
		device = &driver.device[id];
		if (device->client == NULL) {
			device->client = client;
			break;
		}
	}
	spin_unlock(&driver.c_lock);
	if (id > ARRAY_SIZE(driver.device)) {
		error = -ENOMEM;
		goto bail_out;
	}

	strncpy(device->c_name, addr, ARRAY_SIZE(device->c_name) - 1);
	device->c_name[ARRAY_SIZE(device->c_name) - 1] = 0;

	client->status = Connected;
	client->conn = (struct p9_conn *)device;
	device->latency = 0;

	error = p9_xos_parse_opts(args, device);
	if (error)
		warning("bad options for client '%s' (%d)", addr, error);

	info("client '%s' got device %u", addr, id);
	error = 0;

bail_out:
	epilog("%d", error);

	return error;
}
예제 #19
0
static int p9_xos_request(struct p9_client *client, struct p9_req_t *req)
{
	int retval = 0;
	struct p9_xos_device *device;
	struct p9_xos_driver *drv;

	prolog("c=%p r=%p", client, req);

	device = (struct p9_xos_device *)client->conn;
	drv = device->driver;

	req->aux = device;
	p9_xos_add_write_request(req, device->latency);

	epilog("%d", retval);

	return retval;
}
/*
 * This is called by cleanup_module or on failure.
 * It is required to never fail, even if nothing was initialized first
 */
void vmon_p_cleanup(void)
{
	prolog ("");

	if (!vmon_p_devices) {
		critical ( "vmon_p_devices already freed!" );
		return; /* nothing else to release */
	}

	if (!vmon_p_devices->buffer) {
		kfree(vmon_p_devices->buffer);
		vmon_p_devices->buffer = NULL;
	}
	vmon_p_devices->buffer_pos = 0;
	atomic_set(&buffer_ready, 0);

	cdev_del(&vmon_p_devices->cdev);
	kfree(vmon_p_devices->buffer);
	kfree(vmon_p_devices);
	unregister_chrdev_region(vmon_p_devno, VMON_P_NR_DEVS);
	vmon_p_devices = NULL; /* pedantic */
	epilog ("");
}
예제 #21
0
static void p9_xos_sync_work(struct work_struct *work)
{
	struct p9_xos_driver *drv;
	struct p9_xos_endpoint *ep;
	unsigned long flags;

	prolog("w=%p", work);

	drv = container_of(work, struct p9_xos_driver, swork);

	/* send data */
	ep = &drv->ep[WR_EP];
	hw_raw_local_irq_save(flags);
	p9_xos_deque_move(&ep->regs[RHEAD], ep->rqueue, ep);
	hw_raw_local_irq_restore(flags);

	/* send empty packets if needed */
	ep = &drv->ep[RD_EP];
	if (nb_free_packets >= MIN_PACKETS_TO_RELEASE || ep->regs[STARVATION]) {
		nb_free_packets = 0;
		hw_raw_local_irq_save(flags);
		p9_xos_deque_move(&ep->regs[RHEAD], ep->rqueue, ep);
		hw_raw_local_irq_restore(flags);
	}

	xos_ctrl_raise(drv->ctrl, P9_XOS_EVENT);

	if ((!drv->wake_count) && (drv->wake_status == 1)) {
		drv->wake_status = 0;
		wake_unlock(&drv->wake_lock);
		wmb();
		if (drv->wake_status == 2)
			wake_lock(&drv->wake_lock);
	}

	epilog();
}
예제 #22
0
int main(int argc,char *argv[]) {
    int number_processes = 0,
        process_deeper = 0,
        m_0_res = 0;
    char epilog_text[255];
    FILE *of;



	if(argc < 4){
		error("Invocation:\n\tmodel_generator <number_proc> <process_deeper> <initial_res_marking> <file>\n");
	}
	number_processes = atoi(argv[1]);
	process_deeper = atoi(argv[2]);
	m_0_res = atoi(argv[3]);
	of = fopen(argv[4],"w");

	/* Dealing with invocation parameters coreection */
	if(of == NULL)
		error("I am having problems with the output file");
	if(number_processes<2)
		error("At least 2 processes are required");
	if(process_deeper<1)
		error("Process deeper must be of at leats 1 state");
	if(m_0_res<1)
		error("Resource initial marking must be positive");

	/* Parameters are correct: go */
    preface(of);
    generate_model(of,number_processes,process_deeper,m_0_res);

    sprintf(epilog_text,"RAS_%1d_%1d_%1d",number_processes,process_deeper,m_0_res);
    epilog(of,epilog_text);


    return 0;
}
예제 #23
0
  address generate_call_stub(address& return_address)
  {
    assert (!TaggedStackInterpreter, "not supported");
    
    StubCodeMark mark(this, "StubRoutines", "call_stub");
    address start = __ enter();

    const Register call_wrapper    = r3;
    const Register result          = r4;
    const Register result_type     = r5;
    const Register method          = r6;
    const Register entry_point     = r7;
    const Register parameters      = r8;
    const Register parameter_words = r9;
    const Register thread          = r10;

#ifdef ASSERT
    // Make sure we have no pending exceptions
    {
      StackFrame frame;
      Label label;

      __ load (r0, Address(thread, Thread::pending_exception_offset()));
      __ compare (r0, 0);
      __ beq (label);
      __ prolog (frame);
      __ should_not_reach_here (__FILE__, __LINE__);
      __ epilog (frame);
      __ blr ();
      __ bind (label);
    }
#endif // ASSERT

    // Calculate the frame size
    StackFrame frame;
    for (int i = 0; i < StackFrame::max_crfs; i++)
      frame.get_cr_field();
    for (int i = 0; i < StackFrame::max_gprs; i++)
      frame.get_register();
    StubRoutines::set_call_stub_base_size(frame.unaligned_size() + 3*wordSize);
    // the 3 extra words are for call_wrapper, result and result_type

    const Register parameter_bytes = parameter_words;

    __ shift_left (parameter_bytes, parameter_words, LogBytesPerWord);    

    const Register frame_size = r11;
    const Register padding    = r12;

    __ addi (frame_size, parameter_bytes, StubRoutines::call_stub_base_size());
    __ calc_padding_for_alignment (padding, frame_size, StackAlignmentInBytes);
    __ add (frame_size, frame_size, padding);

    // Save the link register and create the new frame
    __ mflr (r0);
    __ store (r0, Address(r1, StackFrame::lr_save_offset * wordSize));
    __ neg (r0, frame_size);
    __ store_update_indexed (r1, r1, r0);
#ifdef PPC64
    __ mfcr (r0);
    __ store (r0, Address(r1, StackFrame::cr_save_offset * wordSize));
#endif // PPC64

    // Calculate the address of the interpreter's local variables
    const Register locals = frame_size;

    __ addi (locals, r1, frame.start_of_locals() - wordSize);
    __ add (locals, locals, padding);
    __ add (locals, locals, parameter_bytes);

    // Store the call wrapper address and the result stuff
    const int initial_offset = 1;
    int offset = initial_offset;

    __ store (call_wrapper, Address(locals, offset++ * wordSize));
    __ store (result,       Address(locals, offset++ * wordSize));
    __ store (result_type,  Address(locals, offset++ * wordSize));

    // Store the registers
#ifdef PPC32
    __ mfcr (r0);
    __ store (r0, Address(locals, offset++ * wordSize));
#endif // PPC32
    for (int i = 14; i < 32; i++) {
      __ store (as_Register(i), Address(locals, offset++ * wordSize));
    }
    const int final_offset = offset;

    // Store the location of call_wrapper
    frame::set_call_wrapper_offset((final_offset - initial_offset) * wordSize);

#ifdef ASSERT
    // Check that we wrote all the way to the end of the frame.
    // The frame may have been resized when we return from the
    // interpreter, so the start of the frame may have moved
    // but the end will be where we left it and we rely on this
    // to find our stuff.
    {
      StackFrame frame;
      Label label;

      __ load (r3, Address(r1, 0));
      __ subi (r3, r3, final_offset * wordSize);
      __ compare (r3, locals);
      __ beq (label);
      __ prolog (frame);
      __ should_not_reach_here (__FILE__, __LINE__);
      __ epilog (frame);
      __ blr ();
      __ bind (label);
    }
#endif // ASSERT

    // Pass parameters if any
    {
      Label loop, done;

      __ compare (parameter_bytes, 0);
      __ ble (done);

      const Register src = parameters;
      const Register dst = padding;

      __ mr (dst, locals);
      __ shift_right (r0, parameter_bytes, LogBytesPerWord);      
      __ mtctr (r0);
      __ bind (loop);
      __ load (r0, Address(src, 0));
      __ store (r0, Address(dst, 0));
      __ addi (src, src, wordSize);
      __ subi (dst, dst, wordSize);
      __ bdnz (loop);

      __ bind (done);
    }

    // Make the call
    __ mr (Rmethod, method);
    __ mr (Rlocals, locals);
    __ mr (Rthread, thread);
    __ mtctr (entry_point);
    __ bctrl();

    // This is used to identify call_stub stack frames
    return_address = __ pc();

    // Figure out where our stuff is stored
    __ load (locals, Address(r1, 0));
    __ subi (locals, locals, final_offset * wordSize);

#ifdef ASSERT
    // Rlocals should contain the address we just calculated.
    {
      StackFrame frame;
      Label label;

      __ compare (Rlocals, locals);
      __ beq (label);
      __ prolog (frame);
      __ should_not_reach_here (__FILE__, __LINE__);
      __ epilog (frame);
      __ blr ();
      __ bind (label);
    }
#endif // ASSERT
 
    // Is an exception being thrown?
    Label exit;

    __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
    __ compare (r0, 0);
    __ bne (exit);

    // Store result depending on type
    const Register result_addr = r6;

    Label is_int, is_long, is_object;

    offset = initial_offset + 1; // skip call_wrapper
    __ load (result_addr, Address(locals, offset++ * wordSize));
    __ load (result_type, Address(locals, offset++ * wordSize));
    __ compare (result_type, T_INT);
    __ beq (is_int);
    __ compare (result_type, T_LONG);
    __ beq (is_long);
    __ compare (result_type, T_OBJECT);
    __ beq (is_object);
    
    __ should_not_reach_here (__FILE__, __LINE__);

    __ bind (is_int);
    __ stw (r3, Address(result_addr, 0));
    __ b (exit);
    
    __ bind (is_long);
#ifdef PPC32
    __ store (r4, Address(result_addr, wordSize));
#endif
    __ store (r3, Address(result_addr, 0));
    __ b (exit);
    
    __ bind (is_object);
    __ store (r3, Address(result_addr, 0));
    //__ b (exit);

    // Restore the registers
    __ bind (exit);
#ifdef PPC32
    __ load (r0, Address(locals, offset++ * wordSize));
    __ mtcr (r0);
#endif // PPC32
    for (int i = 14; i < 32; i++) {
      __ load (as_Register(i), Address(locals, offset++ * wordSize));
    }
#ifdef PPC64
    __ load (r0, Address(r1, StackFrame::cr_save_offset * wordSize));
    __ mtcr (r0);
#endif // PPC64
    assert (offset == final_offset, "save and restore must match");

    // Unwind and return
    __ load (r1, Address(r1, StackFrame::back_chain_offset * wordSize));
    __ load (r0, Address(r1, StackFrame::lr_save_offset * wordSize));
    __ mtlr (r0);
    __ blr ();
    
    return start;
  }
예제 #24
0
파일: check_ssl.c 프로젝트: lark/keepalived
/* Asynchronous SSL stream reader */
int
ssl_read_thread(thread_t * thread)
{
	checker_t *checker = THREAD_ARG(thread);
	http_checker_t *http_get_check = CHECKER_ARG(checker);
	http_arg_t *http_arg = HTTP_ARG(http_get_check);
	request_t *req = HTTP_REQ(http_arg);
	unsigned char digest[16];
	int r = 0;
	int val;

	/* Handle read timeout */
	if (thread->type == THREAD_READ_TIMEOUT && !req->extracted)
		return timeout_epilog(thread, "=> SSL CHECK failed on service"
				      " : recevice data <=\n\n", "SSL read");

	/* Set descriptor non blocking */
	val = fcntl(thread->u.fd, F_GETFL, 0);
	fcntl(thread->u.fd, F_SETFL, val | O_NONBLOCK);

	/* read the SSL stream */
	r = SSL_read(req->ssl, req->buffer + req->len,
		     MAX_BUFFER_LENGTH - req->len);

	/* restore descriptor flags */
	fcntl(thread->u.fd, F_SETFL, val);

	req->error = SSL_get_error(req->ssl, r);

	if (req->error == SSL_ERROR_WANT_READ) {
		 /* async read unfinished */ 
		thread_add_read(thread->master, ssl_read_thread, checker,
				thread->u.fd, http_get_check->connection_to);
	} else if (r > 0 && req->error == 0) {
		/* Handle response stream */
		http_process_response(req, r);

		/*
		 * Register next ssl stream reader.
		 * Register itself to not perturbe global I/O multiplexer.
		 */
		thread_add_read(thread->master, ssl_read_thread, checker,
				thread->u.fd, http_get_check->connection_to);
	} else if (req->error) {

		/* All the SSL streal has been parsed */
		MD5_Final(digest, &req->context);
		SSL_set_quiet_shutdown(req->ssl, 1);

		r = (req->error == SSL_ERROR_ZERO_RETURN) ? SSL_shutdown(req->ssl) : 0;

		if (r && !req->extracted) {
			/* check if server is currently alive */
			if (svr_checker_up(checker->id, checker->rs)) {
				smtp_alert(checker->rs, NULL, NULL,
					   "DOWN",
					   "=> SSL CHECK failed on service"
					   " : cannot receive data <=\n\n");
				update_svr_checker_state(DOWN, checker->id
							     , checker->vs
							     , checker->rs);
			}
			return epilog(thread, 1, 0, 0);
		}

		/* Handle response stream */
		http_handle_response(thread, digest, (!req->extracted) ? 1 : 0);

	}

	return 0;
}
예제 #25
0
int aufs_rmdir(struct inode *dir, struct dentry *dentry)
{
	int err, rmdir_later;
	aufs_bindex_t bwh, bindex, bstart;
	struct au_dtime dt;
	struct au_pin pin;
	struct inode *inode;
	struct dentry *parent, *wh_dentry, *h_dentry;
	struct au_whtmp_rmdir *args;

	IMustLock(dir);

	err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
	if (unlikely(err))
		goto out;
	err = au_alive_dir(dentry);
	if (unlikely(err))
		goto out_unlock;
	inode = dentry->d_inode;
	IMustLock(inode);
	err = -ENOTDIR;
	if (unlikely(!S_ISDIR(inode->i_mode)))
		goto out_unlock; /* possible? */

	err = -ENOMEM;
	args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
	if (unlikely(!args))
		goto out_unlock;

	parent = dentry->d_parent; /* dir inode is locked */
	di_write_lock_parent(parent);
	err = au_test_empty(dentry, &args->whlist);
	if (unlikely(err))
		goto out_parent;

	bstart = au_dbstart(dentry);
	bwh = au_dbwh(dentry);
	bindex = -1;
	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &dt, &pin);
	err = PTR_ERR(wh_dentry);
	if (IS_ERR(wh_dentry))
		goto out_parent;

	h_dentry = au_h_dptr(dentry, bstart);
	dget(h_dentry);
	rmdir_later = 0;
	if (bindex == bstart) {
		err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir);
		if (err > 0) {
			rmdir_later = err;
			err = 0;
		}
	} else {
		/* stop monitoring */
		au_hn_free(au_hi(inode, bstart));

		/* dir inode is locked */
		IMustLock(wh_dentry->d_parent->d_inode);
		err = 0;
	}

	if (!err) {
		vfsub_dead_dir(inode);
		au_set_dbdiropq(dentry, -1);
		epilog(dir, dentry, bindex);

		if (rmdir_later) {
			au_whtmp_kick_rmdir(dir, bstart, h_dentry, args);
			args = NULL;
		}

		goto out_unpin; /* success */
	}

	/* revert */
	AuLabel(revert);
	if (wh_dentry) {
		int rerr;

		rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry, &dt);
		if (rerr)
			err = rerr;
	}

out_unpin:
	au_unpin(&pin);
	dput(wh_dentry);
	dput(h_dentry);
out_parent:
	di_write_unlock(parent);
	if (args)
		au_whtmp_rmdir_free(args);
out_unlock:
	aufs_read_unlock(dentry, AuLock_DW);
out:
	AuTraceErr(err);
	return err;
}
예제 #26
0
/*
 * when an error happened, remove the created whiteout and revert everything.
 */
static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
		     aufs_bindex_t bwh, struct dentry *wh_dentry,
		     struct dentry *dentry, struct au_dtime *dt)
{
	int rerr;
	struct path h_path = {
		.dentry	= wh_dentry,
		.mnt	= au_sbr_mnt(dir->i_sb, bindex)
	};

	rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
	if (!rerr) {
		au_set_dbwh(dentry, bwh);
		au_dtime_revert(dt);
		return 0;
	}

	AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
		AuDLNPair(dentry), err, rerr);
	return -EIO;
}

/* ---------------------------------------------------------------------- */

int aufs_unlink(struct inode *dir, struct dentry *dentry)
{
	int err;
	aufs_bindex_t bwh, bindex, bstart;
	struct au_dtime dt;
	struct au_pin pin;
	struct path h_path;
	struct inode *inode, *h_dir;
	struct dentry *parent, *wh_dentry;

	IMustLock(dir);

	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
	if (unlikely(err))
		goto out;
	err = au_d_hashed_positive(dentry);
	if (unlikely(err))
		goto out_unlock;
	inode = dentry->d_inode;
	IMustLock(inode);
	err = -EISDIR;
	if (unlikely(S_ISDIR(inode->i_mode)))
		goto out_unlock; /* possible? */

	bstart = au_dbstart(dentry);
	bwh = au_dbwh(dentry);
	bindex = -1;
	parent = dentry->d_parent; /* dir inode is locked */
	di_write_lock_parent(parent);
	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &dt, &pin);
	err = PTR_ERR(wh_dentry);
	if (IS_ERR(wh_dentry))
		goto out_parent;

	h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
	h_path.dentry = au_h_dptr(dentry, bstart);
	dget(h_path.dentry);
	if (bindex == bstart) {
		h_dir = au_pinned_h_dir(&pin);
		err = vfsub_unlink(h_dir, &h_path, /*force*/0);
	} else {
		/* dir inode is locked */
		h_dir = wh_dentry->d_parent->d_inode;
		IMustLock(h_dir);
		err = 0;
	}

	if (!err) {
		vfsub_drop_nlink(inode);
		epilog(dir, dentry, bindex);

		/* update target timestamps */
		if (bindex == bstart) {
			vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
			inode->i_ctime = h_path.dentry->d_inode->i_ctime;
		} else
			/* todo: this timestamp may be reverted later */
			inode->i_ctime = h_dir->i_ctime;
		goto out_unpin; /* success */
	}

	/* revert */
	if (wh_dentry) {
		int rerr;

		rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry, &dt);
		if (rerr)
			err = rerr;
	}

out_unpin:
	au_unpin(&pin);
	dput(wh_dentry);
	dput(h_path.dentry);
out_parent:
	di_write_unlock(parent);
out_unlock:
	aufs_read_unlock(dentry, AuLock_DW);
out:
	return err;
}
예제 #27
0
int GGLAssembler::scanline_core(const needs_t& needs, context_t const* c)
{
    int64_t duration = ggl_system_time();

    mBlendFactorCached = 0;
    mBlending = 0;
    mMasking = 0;
    mAA        = GGL_READ_NEEDS(P_AA, needs.p);
    mDithering = GGL_READ_NEEDS(P_DITHER, needs.p);
    mAlphaTest = GGL_READ_NEEDS(P_ALPHA_TEST, needs.p) + GGL_NEVER;
    mDepthTest = GGL_READ_NEEDS(P_DEPTH_TEST, needs.p) + GGL_NEVER;
    mFog       = GGL_READ_NEEDS(P_FOG, needs.p) != 0;
    mSmooth    = GGL_READ_NEEDS(SHADE, needs.n) != 0;
    mBuilderContext.needs = needs;
    mBuilderContext.c = c;
    mBuilderContext.Rctx = reserveReg(R0); // context always in R0
    mCbFormat = c->formats[ GGL_READ_NEEDS(CB_FORMAT, needs.n) ];

    // ------------------------------------------------------------------------

    decodeLogicOpNeeds(needs);

    decodeTMUNeeds(needs, c);

    mBlendSrc  = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_SRC, needs.n));
    mBlendDst  = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_DST, needs.n));
    mBlendSrcA = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_SRCA, needs.n));
    mBlendDstA = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_DSTA, needs.n));

    if (!mCbFormat.c[GGLFormat::ALPHA].h) {
        if ((mBlendSrc == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendSrc == GGL_DST_ALPHA)) {
            mBlendSrc = GGL_ONE;
        }
        if ((mBlendSrcA == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendSrcA == GGL_DST_ALPHA)) {
            mBlendSrcA = GGL_ONE;
        }
        if ((mBlendDst == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendDst == GGL_DST_ALPHA)) {
            mBlendDst = GGL_ONE;
        }
        if ((mBlendDstA == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendDstA == GGL_DST_ALPHA)) {
            mBlendDstA = GGL_ONE;
        }
    }

    // if we need the framebuffer, read it now
    const int blending =    blending_codes(mBlendSrc, mBlendDst) |
                            blending_codes(mBlendSrcA, mBlendDstA);

    // XXX: handle special cases, destination not modified...
    if ((mBlendSrc==GGL_ZERO) && (mBlendSrcA==GGL_ZERO) &&
        (mBlendDst==GGL_ONE) && (mBlendDstA==GGL_ONE)) {
        // Destination unmodified (beware of logic ops)
    } else if ((mBlendSrc==GGL_ZERO) && (mBlendSrcA==GGL_ZERO) &&
        (mBlendDst==GGL_ZERO) && (mBlendDstA==GGL_ZERO)) {
        // Destination is zero (beware of logic ops)
    }
    
    int fbComponents = 0;
    const int masking = GGL_READ_NEEDS(MASK_ARGB, needs.n);
    for (int i=0 ; i<4 ; i++) {
        const int mask = 1<<i;
        component_info_t& info = mInfo[i];
        int fs = i==GGLFormat::ALPHA ? mBlendSrcA : mBlendSrc;
        int fd = i==GGLFormat::ALPHA ? mBlendDstA : mBlendDst;
        if (fs==GGL_SRC_ALPHA_SATURATE && i==GGLFormat::ALPHA)
            fs = GGL_ONE;
        info.masked =   !!(masking & mask);
        info.inDest =   !info.masked && mCbFormat.c[i].h && 
                        ((mLogicOp & LOGIC_OP_SRC) || (!mLogicOp));
        if (mCbFormat.components >= GGL_LUMINANCE &&
                (i==GGLFormat::GREEN || i==GGLFormat::BLUE)) {
            info.inDest = false;
        }
        info.needed =   (i==GGLFormat::ALPHA) && 
                        (isAlphaSourceNeeded() || mAlphaTest != GGL_ALWAYS);
        info.replaced = !!(mTextureMachine.replaced & mask);
        info.iterated = (!info.replaced && (info.inDest || info.needed)); 
        info.smooth =   mSmooth && info.iterated;
        info.fog =      mFog && info.inDest && (i != GGLFormat::ALPHA);
        info.blend =    (fs != int(GGL_ONE)) || (fd > int(GGL_ZERO));

        mBlending |= (info.blend ? mask : 0);
        mMasking |= (mCbFormat.c[i].h && info.masked) ? mask : 0;
        fbComponents |= mCbFormat.c[i].h ? mask : 0;
    }

    mAllMasked = (mMasking == fbComponents);
    if (mAllMasked) {
        mDithering = 0;
    }
    
    fragment_parts_t parts;

    // ------------------------------------------------------------------------
    prolog();
    // ------------------------------------------------------------------------

    build_scanline_prolog(parts, needs);

    if (registerFile().status())
        return registerFile().status();

    // ------------------------------------------------------------------------
    label("fragment_loop");
    // ------------------------------------------------------------------------
    {
        Scratch regs(registerFile());

        if (mDithering) {
            // update the dither index.
            MOV(AL, 0, parts.count.reg,
                    reg_imm(parts.count.reg, ROR, GGL_DITHER_ORDER_SHIFT));
            ADD(AL, 0, parts.count.reg, parts.count.reg,
                    imm( 1 << (32 - GGL_DITHER_ORDER_SHIFT)));
            MOV(AL, 0, parts.count.reg,
                    reg_imm(parts.count.reg, ROR, 32 - GGL_DITHER_ORDER_SHIFT));
        }

        // XXX: could we do an early alpha-test here in some cases?
        // It would probaly be used only with smooth-alpha and no texture
        // (or no alpha component in the texture).

        // Early z-test
        if (mAlphaTest==GGL_ALWAYS) {
            build_depth_test(parts, Z_TEST|Z_WRITE);
        } else {
            // we cannot do the z-write here, because
            // it might be killed by the alpha-test later
            build_depth_test(parts, Z_TEST);
        }

        { // texture coordinates
            Scratch scratches(registerFile());

            // texel generation
            build_textures(parts, regs);
            if (registerFile().status())
                return registerFile().status();
        }

        if ((blending & (FACTOR_DST|BLEND_DST)) || 
                (mMasking && !mAllMasked) ||
                (mLogicOp & LOGIC_OP_DST)) 
        {
            // blending / logic_op / masking need the framebuffer
            mDstPixel.setTo(regs.obtain(), &mCbFormat);

            // load the framebuffer pixel
            comment("fetch color-buffer");
            load(parts.cbPtr, mDstPixel);
        }

        if (registerFile().status())
            return registerFile().status();

        pixel_t pixel;
        int directTex = mTextureMachine.directTexture;
        if (directTex | parts.packed) {
            // note: we can't have both here
            // iterated color or direct texture
            pixel = directTex ? parts.texel[directTex-1] : parts.iterated;
            pixel.flags &= ~CORRUPTIBLE;
        } else {
            if (mDithering) {
                const int ctxtReg = mBuilderContext.Rctx;
                const int mask = GGL_DITHER_SIZE-1;
                parts.dither = reg_t(regs.obtain());
                AND(AL, 0, parts.dither.reg, parts.count.reg, imm(mask));
                ADDR_ADD(AL, 0, parts.dither.reg, ctxtReg, parts.dither.reg);
                LDRB(AL, parts.dither.reg, parts.dither.reg,
                        immed12_pre(GGL_OFFSETOF(ditherMatrix)));
            }
        
            // allocate a register for the resulting pixel
            pixel.setTo(regs.obtain(), &mCbFormat, FIRST);

            build_component(pixel, parts, GGLFormat::ALPHA,    regs);

            if (mAlphaTest!=GGL_ALWAYS) {
                // only handle the z-write part here. We know z-test
                // was successful, as well as alpha-test.
                build_depth_test(parts, Z_WRITE);
            }

            build_component(pixel, parts, GGLFormat::RED,      regs);
            build_component(pixel, parts, GGLFormat::GREEN,    regs);
            build_component(pixel, parts, GGLFormat::BLUE,     regs);

            pixel.flags |= CORRUPTIBLE;
        }

        if (registerFile().status())
            return registerFile().status();
        
        if (pixel.reg == -1) {
            // be defensive here. if we're here it's probably
            // that this whole fragment is a no-op.
            pixel = mDstPixel;
        }
        
        if (!mAllMasked) {
            // logic operation
            build_logic_op(pixel, regs);
    
            // masking
            build_masking(pixel, regs); 
    
            comment("store");
            store(parts.cbPtr, pixel, WRITE_BACK);
        }
    }

    if (registerFile().status())
        return registerFile().status();

    // update the iterated color...
    if (parts.reload != 3) {
        build_smooth_shade(parts);
    }

    // update iterated z
    build_iterate_z(parts);

    // update iterated fog
    build_iterate_f(parts);

    SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
    B(PL, "fragment_loop");
    label("epilog");
    epilog(registerFile().touched());

    if ((mAlphaTest!=GGL_ALWAYS) || (mDepthTest!=GGL_ALWAYS)) {
        if (mDepthTest!=GGL_ALWAYS) {
            label("discard_before_textures");
            build_iterate_texture_coordinates(parts);
        }
        label("discard_after_textures");
        build_smooth_shade(parts);
        build_iterate_z(parts);
        build_iterate_f(parts);
        if (!mAllMasked) {
            ADDR_ADD(AL, 0, parts.cbPtr.reg, parts.cbPtr.reg, imm(parts.cbPtr.size>>3));
        }
        SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
        B(PL, "fragment_loop");
        epilog(registerFile().touched());
    }
예제 #28
0
int aufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
	int err, rerr;
	aufs_bindex_t bindex;
	unsigned char diropq;
	struct path h_path;
	struct dentry *wh_dentry, *parent, *opq_dentry;
	struct mutex *h_mtx;
	struct super_block *sb;
	struct {
		struct au_pin pin;
		struct au_dtime dt;
	} *a; /* reduce the stack usage */
	struct au_wr_dir_args wr_dir_args = {
		.force_btgt	= -1,
		.flags		= AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
	};

	IMustLock(dir);

	err = -ENOMEM;
	a = kmalloc(sizeof(*a), GFP_NOFS);
	if (unlikely(!a))
		goto out;

	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
	if (unlikely(err))
		goto out_free;
	err = au_d_may_add(dentry);
	if (unlikely(err))
		goto out_unlock;

	parent = dentry->d_parent; /* dir inode is locked */
	di_write_lock_parent(parent);
	wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
				      &a->pin, &wr_dir_args);
	err = PTR_ERR(wh_dentry);
	if (IS_ERR(wh_dentry))
		goto out_parent;

	sb = dentry->d_sb;
	bindex = au_dbstart(dentry);
	h_path.dentry = au_h_dptr(dentry, bindex);
	h_path.mnt = au_sbr_mnt(sb, bindex);
	err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
	if (unlikely(err))
		goto out_unpin;

	/* make the dir opaque */
	diropq = 0;
	h_mtx = &h_path.dentry->d_inode->i_mutex;
	if (wh_dentry
	    || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
		opq_dentry = au_diropq_create(dentry, bindex);
		mutex_unlock(h_mtx);
		err = PTR_ERR(opq_dentry);
		if (IS_ERR(opq_dentry))
			goto out_dir;
		dput(opq_dentry);
		diropq = 1;
	}

	err = epilog(dir, bindex, wh_dentry, dentry);
	if (!err) {
		inc_nlink(dir);
		goto out_unpin; /* success */
	}

	/* revert */
	if (diropq) {
		AuLabel(revert opq);
		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
		rerr = au_diropq_remove(dentry, bindex);
		mutex_unlock(h_mtx);
		if (rerr) {
			AuIOErr("%.*s reverting diropq failed(%d, %d)\n",
				AuDLNPair(dentry), err, rerr);
			err = -EIO;
		}
	}

out_dir:
	AuLabel(revert dir);
	rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
	if (rerr) {
		AuIOErr("%.*s reverting dir failed(%d, %d)\n",
			AuDLNPair(dentry), err, rerr);
		err = -EIO;
	}
	au_dtime_revert(&a->dt);
out_unpin:
	au_unpin(&a->pin);
	dput(wh_dentry);
out_parent:
	di_write_unlock(parent);
out_unlock:
	if (unlikely(err)) {
		au_update_dbstart(dentry);
		d_drop(dentry);
	}
	aufs_read_unlock(dentry, AuLock_DW);
out_free:
	kfree(a);
out:
	return err;
}
예제 #29
0
/*
 * initial procedure of adding a new entry.
 * prepare writable branch and the parent dir, lock it,
 * and lookup whiteout for the new entry.
 */
static struct dentry*
lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
		  struct dentry *src_dentry, struct au_pin *pin,
		  struct au_wr_dir_args *wr_dir_args)
{
	struct dentry *wh_dentry, *h_parent;
	struct super_block *sb;
	struct au_branch *br;
	int err;
	unsigned int udba;
	aufs_bindex_t bcpup;

	AuDbg("%.*s\n", AuDLNPair(dentry));

	err = au_wr_dir(dentry, src_dentry, wr_dir_args);
	bcpup = err;
	wh_dentry = ERR_PTR(err);
	if (unlikely(err < 0))
		goto out;

	sb = dentry->d_sb;
	udba = au_opt_udba(sb);
	err = au_pin(pin, dentry, bcpup, udba,
		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
	wh_dentry = ERR_PTR(err);
	if (unlikely(err))
		goto out;

	h_parent = au_pinned_h_parent(pin);
	if (udba != AuOpt_UDBA_NONE
	    && au_dbstart(dentry) == bcpup)
		err = au_may_add(dentry, bcpup, h_parent,
				 au_ftest_wrdir(wr_dir_args->flags, ISDIR));
	else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
		err = -ENAMETOOLONG;
	wh_dentry = ERR_PTR(err);
	if (unlikely(err))
		goto out_unpin;

	br = au_sbr(sb, bcpup);
	if (dt) {
		struct path tmp = {
			.dentry	= h_parent,
			.mnt	= br->br_mnt
		};
		au_dtime_store(dt, au_pinned_parent(pin), &tmp);
	}

	wh_dentry = NULL;
	if (bcpup != au_dbwh(dentry))
		goto out; /* success */

	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);

out_unpin:
	if (IS_ERR(wh_dentry))
		au_unpin(pin);
out:
	return wh_dentry;
}

/* ---------------------------------------------------------------------- */

enum { Mknod, Symlink, Creat };
struct simple_arg {
	int type;
	union {
		struct {
			int mode;
			struct nameidata *nd;
		} c;
		struct {
			const char *symname;
		} s;
		struct {
			int mode;
			dev_t dev;
		} m;
	} u;
};

static int add_simple(struct inode *dir, struct dentry *dentry,
		      struct simple_arg *arg)
{
	int err;
	aufs_bindex_t bstart;
	unsigned char created;
	struct au_dtime dt;
	struct au_pin pin;
	struct path h_path;
	struct dentry *wh_dentry, *parent;
	struct inode *h_dir;
	struct au_wr_dir_args wr_dir_args = {
		.force_btgt	= -1,
		.flags		= AuWrDir_ADD_ENTRY
	};

	AuDbg("%.*s\n", AuDLNPair(dentry));
	IMustLock(dir);

	parent = dentry->d_parent; /* dir inode is locked */
	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
	if (unlikely(err))
		goto out;
	err = au_d_may_add(dentry);
	if (unlikely(err))
		goto out_unlock;
	di_write_lock_parent(parent);
	wh_dentry = lock_hdir_lkup_wh(dentry, &dt, /*src_dentry*/NULL, &pin,
				      &wr_dir_args);
	err = PTR_ERR(wh_dentry);
	if (IS_ERR(wh_dentry))
		goto out_parent;

	bstart = au_dbstart(dentry);
	h_path.dentry = au_h_dptr(dentry, bstart);
	h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
	h_dir = au_pinned_h_dir(&pin);
	switch (arg->type) {
	case Creat:
		err = vfsub_create(h_dir, &h_path, arg->u.c.mode);
		break;
	case Symlink:
		err = vfsub_symlink(h_dir, &h_path, arg->u.s.symname);
		break;
	case Mknod:
		err = vfsub_mknod(h_dir, &h_path, arg->u.m.mode, arg->u.m.dev);
		break;
	default:
		BUG();
	}
	created = !err;
	if (!err)
		err = epilog(dir, bstart, wh_dentry, dentry);

	/* revert */
	if (unlikely(created && err && h_path.dentry->d_inode)) {
		int rerr;
		rerr = vfsub_unlink(h_dir, &h_path, /*force*/0);
		if (rerr) {
			AuIOErr("%.*s revert failure(%d, %d)\n",
				AuDLNPair(dentry), err, rerr);
			err = -EIO;
		}
		au_dtime_revert(&dt);
	}

	au_unpin(&pin);
	dput(wh_dentry);

out_parent:
	di_write_unlock(parent);
out_unlock:
	if (unlikely(err)) {
		au_update_dbstart(dentry);
		d_drop(dentry);
	}
	aufs_read_unlock(dentry, AuLock_DW);
out:
	return err;
}

int aufs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
	struct simple_arg arg = {
		.type = Mknod,
		.u.m = {
			.mode	= mode,
			.dev	= dev
		}
	};
	return add_simple(dir, dentry, &arg);
}

int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
	struct simple_arg arg = {
		.type = Symlink,
		.u.s.symname = symname
	};
	return add_simple(dir, dentry, &arg);
}

int aufs_create(struct inode *dir, struct dentry *dentry, int mode,
		struct nameidata *nd)
{
	struct simple_arg arg = {
		.type = Creat,
		.u.c = {
			.mode	= mode,
			.nd	= nd
		}
	};
	return add_simple(dir, dentry, &arg);
}

/* ---------------------------------------------------------------------- */

struct au_link_args {
	aufs_bindex_t bdst, bsrc;
	struct au_pin pin;
	struct path h_path;
	struct dentry *src_parent, *parent;
};

static int au_cpup_before_link(struct dentry *src_dentry,
			       struct au_link_args *a)
{
	int err;
	struct dentry *h_src_dentry;
	struct mutex *h_mtx;
	struct file *h_file;

	di_read_lock_parent(a->src_parent, AuLock_IR);
	err = au_test_and_cpup_dirs(src_dentry, a->bdst);
	if (unlikely(err))
		goto out;

	h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
	h_mtx = &h_src_dentry->d_inode->i_mutex;
	err = au_pin(&a->pin, src_dentry, a->bdst,
		     au_opt_udba(src_dentry->d_sb),
		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
	if (unlikely(err))
		goto out;
	mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
	h_file = au_h_open_pre(src_dentry, a->bsrc);
	if (IS_ERR(h_file)) {
		err = PTR_ERR(h_file);
		h_file = NULL;
	} else
		err = au_sio_cpup_simple(src_dentry, a->bdst, a->bsrc,
					 AuCpup_DTIME /* | AuCpup_KEEPLINO */);
	mutex_unlock(h_mtx);
	au_h_open_post(src_dentry, a->bsrc, h_file);
	au_unpin(&a->pin);

out:
	di_read_unlock(a->src_parent, AuLock_IR);
	return err;
}

static int au_cpup_or_link(struct dentry *src_dentry, struct au_link_args *a)
{
	int err;
	unsigned char plink;
	struct inode *h_inode, *inode;
	struct dentry *h_src_dentry;
	struct super_block *sb;
	struct file *h_file;

	plink = 0;
	h_inode = NULL;
	sb = src_dentry->d_sb;
	inode = src_dentry->d_inode;
	if (au_ibstart(inode) <= a->bdst)
		h_inode = au_h_iptr(inode, a->bdst);
	if (!h_inode || !h_inode->i_nlink) {
		/* copyup src_dentry as the name of dentry. */
		au_set_dbstart(src_dentry, a->bdst);
		au_set_h_dptr(src_dentry, a->bdst, dget(a->h_path.dentry));
		h_inode = au_h_dptr(src_dentry, a->bsrc)->d_inode;
		mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
		h_file = au_h_open_pre(src_dentry, a->bsrc);
		if (IS_ERR(h_file)) {
			err = PTR_ERR(h_file);
			h_file = NULL;
		} else
			err = au_sio_cpup_single(src_dentry, a->bdst, a->bsrc,
						 -1, AuCpup_KEEPLINO,
						 a->parent);
		mutex_unlock(&h_inode->i_mutex);
		au_h_open_post(src_dentry, a->bsrc, h_file);
		au_set_h_dptr(src_dentry, a->bdst, NULL);
		au_set_dbstart(src_dentry, a->bsrc);
	} else {
		/* the inode of src_dentry already exists on a.bdst branch */
		h_src_dentry = d_find_alias(h_inode);
		if (!h_src_dentry && au_plink_test(inode)) {
			plink = 1;
			h_src_dentry = au_plink_lkup(inode, a->bdst);
			err = PTR_ERR(h_src_dentry);
			if (IS_ERR(h_src_dentry))
				goto out;

			if (unlikely(!h_src_dentry->d_inode)) {
				dput(h_src_dentry);
				h_src_dentry = NULL;
			}

		}
		if (h_src_dentry) {
			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
					 &a->h_path);
			dput(h_src_dentry);
		} else {
			AuIOErr("no dentry found for hi%lu on b%d\n",
				h_inode->i_ino, a->bdst);
			err = -EIO;
		}
	}

	if (!err && !plink)
		au_plink_append(inode, a->bdst, a->h_path.dentry);

out:
	AuTraceErr(err);
	return err;
}
예제 #30
0
static void p9_xos_read_work(struct work_struct *work)
{
	struct p9_xos_driver *drv;
	struct p9_xos_endpoint *ep;
	int n;
	unsigned long flags;

	prolog("w=%p", work);

	drv = container_of(work, struct p9_xos_driver, rwork);
	ep = &drv->ep[RD_EP];

	drv->wake_status = 1;

	spin_lock_irqsave(&drv->ep_lock, flags);
	n = p9_xos_deque_pop(ep->lqueue, ep);
	spin_unlock_irqrestore(&drv->ep_lock, flags);
	if (n == deque_null)
		goto done;

	do {
		u16 tag;
		int id;
		unsigned int size;
		struct p9_xos_device *device;
		struct p9_req_t *req;
		u8 *ptr;
		u8 type;

		ptr = n2a(n, ep) + 4;

		id = *(int *)ptr;
		ptr += 4;

		size = le32_to_cpu(*(__le32 *) ptr);
		if (size < 7) {
			critical("ignoring too short request");
			break;
		}

		type = *(ptr + 4);

		__log_event(drv, id, type, RD_EP);

		device = &drv->device[id];

		if (type & 1) {
			if (size >= device->client->msize) {
				warning("requested packet size too big: %d\n",
					size);
				goto ignore;
			}
			tag = le16_to_cpu(*(__le16 *) (ptr + 5));
			req = p9_tag_lookup(device->client, tag);

			if (req == NULL) {
				warning("ignoring unexpected response");
				goto ignore;
			}

			BUG_ON(!req->rc);

			if (likely(req->aio_cb != NULL)) {
				req->rc->sdata = ptr;
				req->status = REQ_STATUS_RCVD;
				p9_client_notify_aio(device->client, req);
			} else {
				req->rc->sdata =
				    (char *)req->rc + sizeof(*req->rc);
				memcpy(req->rc->sdata, ptr, size);
				p9_client_cb(device->client, req);
			}
ignore:
			spin_lock_irqsave(&drv->ep_lock, flags);
			p9_xos_deque_push(ep->rqueue, n, ep);
			nb_free_packets++;
			spin_unlock_irqrestore(&drv->ep_lock, flags);
		} else {
			/*
			 *  Dirty hack for pmu_int server
			 *    pmu_int is on channel 1
			 *    pmu_int client has always a request pending
			 *    so does not keep the wake lock if only
			 *    pmu_int request pending
			 */
			if (likely(device != &drv->device[1]))
				drv->wake_count++;

			if (unlikely(!device->open)) {
				warning("DEVICE %d NOT OPENED, ignoring req",
					device->id);
				goto ignore2;
			}
			req = kmem_cache_alloc(drv->cache, GFP_KERNEL);
			req->tc = kmalloc(sizeof(struct p9_fcall), GFP_KERNEL);
			req->tc->size = size;
			req->tc->sdata = ptr;
			req->aux = device;

			spin_lock(&device->lock);
			list_add_tail(&req->req_list, &device->req_list);
			spin_unlock(&device->lock);

			if (device->rd_cb)
				device->rd_cb(device, req);
		}
ignore2:
		spin_lock_irqsave(&drv->ep_lock, flags);
		n = p9_xos_deque_pop(ep->lqueue, ep);
		spin_unlock_irqrestore(&drv->ep_lock, flags);
	} while (n != deque_null);

done:
	if ((!drv->wake_count) && (drv->wake_status == 1)) {
		drv->wake_status = 0;
		wake_unlock(&drv->wake_lock);
		wmb();
		if (drv->wake_status == 2)
			wake_lock(&drv->wake_lock);
	}
	epilog();
}