Пример #1
0
void reqc_write(int sock, struct write_packet *p, const void *buffer) {
	struct object_descriptor obj;
	int wsize;

	lock_object(p->obj);

	if (read_metadata(p->obj, &obj)) {
	 	reqm_write(sock, p, buffer, 1);

		return;
	}
		
	for (unsigned int i = 0; i < peers.size(); i++) {
		pthread_mutex_lock(&peer_locks[i]);
		forward_data(peer_srv_socks[i], OP_WRITE, OS_MASTER, p, sizeof(*p), buffer, p->size);
		wsize = receive_reply(peer_srv_socks[i], &wsize, sizeof(wsize));
		pthread_mutex_unlock(&peer_locks[i]);

		if (wsize >= 0) {
			unlock_object(p->obj);

			send_reply(sock, &wsize, sizeof(wsize));
			return;
		}
	}

	unlock_object(p->obj);

	send_error(sock, ENOENT);
}
Пример #2
0
void reqm_create(int sock, struct create_packet *p) {
	struct replica_info repl;
	int err, node = rand() % peers.size();

	lock_object(p->dir_obj);
	lock_object(p->obj);

	err = create_object_mst(p->dir_obj, p->obj, (const char*)p->name, p->type);

	if (err) {
		unlock_object(p->dir_obj);
		unlock_object(p->obj);

		send_error(sock, err);
		return;
	}

	repl.id = p->obj;
	repl.node = node;

	pthread_mutex_lock(&replicas_lock);
	replicas.push_back(repl);
	pthread_mutex_unlock(&replicas_lock);

	pthread_mutex_lock(&peer_locks[node]);
	forward_packet(peer_srv_socks[node], OP_CREATE, OS_SLAVE, p, sizeof(*p));
	receive_reply(peer_srv_socks[node], NULL, 0);
	pthread_mutex_unlock(&peer_locks[node]);

	unlock_object(p->dir_obj);
	unlock_object(p->obj);

	send_reply(sock, NULL, 0);
}
Пример #3
0
void create_replica(int node, oid_t id) {
	struct create_packet pc;
	struct write_packet pw;
	struct object_descriptor obj;
	void *buffer;

	read_metadata(id, &obj);
	buffer = malloc(obj.meta.size);
	read_data(&obj, 0, obj.meta.size, buffer);

	pc.obj = id;
	pc.type = obj.meta.type;
	strncpy((char*)pc.name, (const char*)obj.meta.name, 256);

	pw.obj = id;
	pw.offset = 0;
	pw.size = obj.meta.size;

	lock_object(id);

	pthread_mutex_lock(&peer_locks[node]);
	forward_packet(peer_srv_socks[node], OP_CREATE, OS_SLAVE, &pc, sizeof(pc));
	receive_reply(peer_srv_socks[node], NULL, 0);

	forward_data(peer_srv_socks[node], OP_WRITE, OS_SLAVE, &pw, sizeof(pw), buffer, pw.size);
	receive_reply(peer_srv_socks[node], NULL, 0);
	pthread_mutex_unlock(&peer_locks[node]);

	unlock_object(id);

	free(buffer);
}
Пример #4
0
void reqm_write(int sock, struct write_packet *p, const void *buffer, int unlock) {
	struct object_descriptor obj;
	int wsize, node;

	if (!read_metadata(p->obj, &obj)) {
		if (unlock)
			unlock_object(p->obj);

		send_error(sock, ENOENT);
		return;
	}

	wsize = write_data(&obj, p->offset, p->size, buffer);

	if (p->offset != -1)
		obj.meta.size = obj.meta.size > p->offset + p->size ? obj.meta.size : p->offset + p->size;
	else
		obj.meta.size += p->size;
	write_metadata(&obj);

	pthread_mutex_lock(&replicas_lock);
	for (unsigned int i = 0; i < replicas.size(); i++) {
		if (!memcmp(&replicas[i].id, &p->obj, sizeof(oid_t))) {
			node = replicas[i].node;

			pthread_mutex_lock(&peer_locks[node]);
			forward_data(peer_srv_socks[node], OP_WRITE, OS_SLAVE, p, sizeof(*p), buffer, p->size);
			receive_reply(peer_srv_socks[node], NULL, 0);
			pthread_mutex_unlock(&peer_locks[node]);
		}
	}
	pthread_mutex_unlock(&replicas_lock);

	if (unlock)
		unlock_object(p->obj);

	send_reply(sock, &wsize, sizeof(wsize));
}
Пример #5
0
void reqm_remove(int sock, struct remove_packet *p, int unlock) {
	int err, node = -1;

	err = remove_object_mst(p->dir_obj, p->obj, p->type);
	if (err) {
		if (unlock) {
			unlock_object(p->dir_obj);
			unlock_object(p->obj);
		}

		send_error(sock, err);
		return;
	}

	pthread_mutex_lock(&replicas_lock);
	for (unsigned int i = 0; i < replicas.size(); i++) {
		if (!memcmp(&p->obj, &replicas[i].id, sizeof(oid_t))) {
			node = replicas[i].node;
			replicas.erase(replicas.begin() + i);
			break;
		}
	}
	pthread_mutex_unlock(&replicas_lock);

	if (node != -1) {
		pthread_mutex_lock(&peer_locks[node]);
		forward_packet(peer_srv_socks[node], OP_REMOVE, OS_SLAVE, p, sizeof(*p));
		receive_reply(peer_srv_socks[node], NULL, 0);
		pthread_mutex_unlock(&peer_locks[node]);
	}

	if (unlock) {
		unlock_object(p->dir_obj);
		unlock_object(p->obj);
	}

	send_reply(sock, NULL, 0);
}
Пример #6
0
void reqc_remove(int sock, struct remove_packet *p) {
	struct object_descriptor obj;
	int err;

	lock_object(p->dir_obj);
	lock_object(p->obj);

	if (read_metadata(p->obj, &obj)) {
	 	reqm_remove(sock, p, 1);
		return;
	}
		
	for (unsigned int i = 0; i < peers.size(); i++) {
		pthread_mutex_lock(&peer_locks[i]);
		forward_packet(peer_srv_socks[i], OP_REMOVE, OS_MASTER, p, sizeof(*p));
		err = receive_reply(peer_srv_socks[i], NULL, 0);
		pthread_mutex_unlock(&peer_locks[i]);

		if (err >= 0) {
			unlock_object(p->dir_obj);
			unlock_object(p->obj);

			send_reply(sock, NULL, 0);
			return;
		} else if (err != -ENOENT) {
			unlock_object(p->dir_obj);
			unlock_object(p->obj);

			send_error(sock, -err);
			return;
		}
	}

	unlock_object(p->dir_obj);
	unlock_object(p->obj);

	send_error(sock, ENOENT);
}
address InterpreterGenerator::generate_native_entry(bool synchronized)
{
  const Register handler  = r14;
  const Register function = r15;

  assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor,
			     handler, function);

  // We use the same code for synchronized and not
  if (native_entry)
    return native_entry;

  address start = __ pc();

  // Allocate and initialize our stack frame.
  __ load (Rstate, 0);
  generate_compute_interpreter_state(true);

  // Make sure method is native and not abstract
#ifdef ASSERT
  {
    Label ok;
    __ lwz (r0, Address(Rmethod, methodOopDesc::access_flags_offset()));
    __ andi_ (r0, r0, JVM_ACC_NATIVE | JVM_ACC_ABSTRACT);
    __ compare (r0, JVM_ACC_NATIVE);
    __ beq (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif

  // Lock if necessary
  Label not_synchronized_1;
  
  __ bne (CRsync, not_synchronized_1);
  __ lock_object (Rmonitor);
  __ bind (not_synchronized_1);
  
  // Get signature handler
  const Address signature_handler_addr(
    Rmethod, methodOopDesc::signature_handler_offset());

  Label return_to_caller, got_signature_handler;

  __ load (handler, signature_handler_addr);
  __ compare (handler, 0);
  __ bne (got_signature_handler);
  __ call_VM (noreg,
              CAST_FROM_FN_PTR(address,
                               InterpreterRuntime::prepare_native_call),
              Rmethod,
              CALL_VM_NO_EXCEPTION_CHECKS);
  __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
  __ compare (r0, 0);
  __ bne (return_to_caller);
  __ load (handler, signature_handler_addr);
  __ bind (got_signature_handler); 

  // Get the native function entry point
  const Address native_function_addr(
    Rmethod, methodOopDesc::native_function_offset());

  Label got_function;

  __ load (function, native_function_addr);
#ifdef ASSERT
  {
    // InterpreterRuntime::prepare_native_call() sets the mirror
    // handle and native function address first and the signature
    // handler last, so function should always be set here.
    Label ok;
    __ compare (function, 0);
    __ bne (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif

  // Call signature handler
  __ mtctr (handler);
  __ bctrl ();
  __ mr (handler, r0);

  // Pass JNIEnv
  __ la (r3, Address(Rthread, JavaThread::jni_environment_offset()));

  // Pass mirror handle if static
  const Address oop_temp_addr = STATE(_oop_temp);

  Label not_static;

  __ bne (CRstatic, not_static);
  __ get_mirror_handle (r4);
  __ store (r4, oop_temp_addr);
  __ la (r4, oop_temp_addr);
  __ bind (not_static);

  // Set up the Java frame anchor
  __ set_last_Java_frame ();

  // Change the thread state to native
  const Address thread_state_addr(Rthread, JavaThread::thread_state_offset());
#ifdef ASSERT
  {
    Label ok;
    __ lwz (r0, thread_state_addr);
    __ compare (r0, _thread_in_Java);
    __ beq (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif
  __ load (r0, _thread_in_native);
  __ stw (r0, thread_state_addr);

  // Make the call
  __ call (function);
  __ fixup_after_potential_safepoint ();

  // The result will be in r3 (and maybe r4 on 32-bit) or f1.
  // Wherever it is, we need to store it before calling anything
  const Register r3_save      = r16;
#ifdef PPC32
  const Register r4_save      = r17;
#endif
  const FloatRegister f1_save = f14;

  __ mr (r3_save, r3);
#ifdef PPC32
  __ mr (r4_save, r4);
#endif
  __ fmr (f1_save, f1);

  // Switch thread to "native transition" state before reading the
  // synchronization state.  This additional state is necessary
  // because reading and testing the synchronization state is not
  // atomic with respect to garbage collection.
  __ load (r0, _thread_in_native_trans);
  __ stw (r0, thread_state_addr);

  // Ensure the new state is visible to the VM thread.
  if(os::is_MP()) {
    if (UseMembar)
      __ sync ();
    else
      __ serialize_memory (r3, r4);
  }

  // Check for safepoint operation in progress and/or pending
  // suspend requests.  We use a leaf call in order to leave
  // the last_Java_frame setup undisturbed.
  Label block, no_block;

  __ load (r3, (intptr_t) SafepointSynchronize::address_of_state());
  __ lwz (r0, Address(r3, 0));
  __ compare (r0, SafepointSynchronize::_not_synchronized);
  __ bne (block);
  __ lwz (r0, Address(Rthread, JavaThread::suspend_flags_offset()));
  __ compare (r0, 0);
  __ beq (no_block);
  __ bind (block);
  __ call_VM_leaf (
       CAST_FROM_FN_PTR(address, 
                        JavaThread::check_special_condition_for_native_trans));
  __ fixup_after_potential_safepoint ();
  __ bind (no_block);

  // Change the thread state
  __ load (r0, _thread_in_Java);
  __ stw (r0, thread_state_addr);

  // Reset the frame anchor  
  __ reset_last_Java_frame ();

  // If the result was an OOP then unbox it and store it in the frame
  // (where it will be safe from garbage collection) before we release
  // the handle it might be protected by
  Label non_oop, store_oop;
  
  __ load (r0, (intptr_t) AbstractInterpreter::result_handler(T_OBJECT));
  __ compare (r0, handler);
  __ bne (non_oop);
  __ compare (r3_save, 0);
  __ beq (store_oop);
  __ load (r3_save, Address(r3_save, 0));
  __ bind (store_oop);
  __ store (r3_save, STATE(_oop_temp));
  __ bind (non_oop);

  // Reset handle block
  __ load (r3, Address(Rthread, JavaThread::active_handles_offset()));
  __ load (r0, 0);
  __ stw (r0, Address(r3, JNIHandleBlock::top_offset_in_bytes()));

  // If there is an exception we skip the result handler and return.
  // Note that this also skips unlocking which seems totally wrong,
  // but apparently this is what the asm interpreter does so we do
  // too.
  __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
  __ compare (r0, 0);
  __ bne (return_to_caller);
  
  // Unlock if necessary
  Label not_synchronized_2;
  
  __ bne (CRsync, not_synchronized_2);
  __ unlock_object (Rmonitor);
  __ bind (not_synchronized_2);

  // Restore saved result and call the result handler
  __ mr (r3, r3_save);
#ifdef PPC32
  __ mr (r4, r4_save);
#endif
  __ fmr (f1, f1_save);
  __ mtctr (handler);
  __ bctrl ();
  
  // Unwind the current activation and return
  __ bind (return_to_caller);

  generate_unwind_interpreter_state();
  __ blr ();

  native_entry = start;
  return start;
}