Beispiel #1
0
static int stmsmi_write(struct flash_bank *bank, uint8_t *buffer,
	uint32_t offset, uint32_t count)
{
	struct target *target = bank->target;
	struct stmsmi_flash_bank *stmsmi_info = bank->driver_priv;
	uint32_t io_base = stmsmi_info->io_base;
	uint32_t cur_count, page_size, page_offset;
	int sector;
	int retval = ERROR_OK;

	LOG_DEBUG("%s: offset=0x%08" PRIx32 " count=0x%08" PRIx32,
		__FUNCTION__, offset, count);

	if (target->state != TARGET_HALTED)
	{
		LOG_ERROR("Target not halted");
		return ERROR_TARGET_NOT_HALTED;
	}

	if (offset + count > stmsmi_info->dev->size_in_bytes)
	{
		LOG_WARNING("Write pasts end of flash. Extra data discarded.");
		count = stmsmi_info->dev->size_in_bytes - offset;
	}

	/* Check sector protection */
	for (sector = 0; sector < bank->num_sectors; sector++)
	{
		/* Start offset in or before this sector? */
		/* End offset in or behind this sector? */
		if ( (offset <
				(bank->sectors[sector].offset + bank->sectors[sector].size))
			&& ((offset + count - 1) >= bank->sectors[sector].offset)
			&& bank->sectors[sector].is_protected )
		{
			LOG_ERROR("Flash sector %d protected", sector);
			return ERROR_FAIL;
		}
	}

	page_size = stmsmi_info->dev->pagesize;

	/* unaligned buffer head */
	if (count > 0 && (offset & 3) != 0)
	{
		cur_count = 4 - (offset & 3);
		if (cur_count > count)
			cur_count = count;
		retval = smi_write_buffer(bank, buffer, bank->base + offset,
			cur_count);
		if (retval != ERROR_OK)
			goto err;
		offset += cur_count;
		buffer += cur_count;
		count -= cur_count;
	}

	page_offset = offset % page_size;
	/* central part, aligned words */
	while (count >= 4)
	{
		/* clip block at page boundary */
		if (page_offset + count > page_size)
			cur_count = page_size - page_offset;
		else
			cur_count = count & ~3;

		retval = smi_write_buffer(bank, buffer, bank->base + offset,
			cur_count);
		if (retval != ERROR_OK)
			goto err;

		page_offset = 0;
		buffer += cur_count;
		offset += cur_count;
		count -= cur_count;

		keep_alive();
	}

	/* buffer tail */
	if (count > 0)
		retval = smi_write_buffer(bank, buffer, bank->base + offset, count);

err:
	/* Switch to HW mode before return to prompt */
	SMI_SET_HW_MODE();
	return retval;
}
Beispiel #2
0
static JSObject *
gjs_keep_alive_new(JSContext *context)
{
    KeepAlive *priv;
    bool found;

    /* This function creates an unattached KeepAlive object; following our
     * general strategy, we have a single KeepAlive class with a constructor
     * stored on our single "load global" pseudo-global object, and we create
     * instances with the load global as parent.
     */

    g_assert(context != NULL);

    JSAutoRequest ar(context);

    JS::RootedObject global(context, gjs_get_import_global(context));

    g_assert(global != NULL);

    if (!JS_HasProperty(context, global, gjs_keep_alive_class.name, &found))
        return NULL;

    if (!found) {
        JSObject *prototype;

        gjs_debug(GJS_DEBUG_KEEP_ALIVE,
                  "Initializing keep-alive class in context %p global %p",
                  context, global.get());

        prototype = JS_InitClass(context, global,
                                 /* parent prototype JSObject* for
                                  * prototype; NULL for
                                  * Object.prototype
                                  */
                                 JS::NullPtr(),
                                 &gjs_keep_alive_class,
                                 /* constructor for instances (NULL for
                                  * none - just name the prototype like
                                  * Math - rarely correct)
                                  */
                                 gjs_keep_alive_constructor,
                                 /* number of constructor args */
                                 0,
                                 /* props of prototype */
                                 &gjs_keep_alive_proto_props[0],
                                 /* funcs of prototype */
                                 &gjs_keep_alive_proto_funcs[0],
                                 /* props of constructor, MyConstructor.myprop */
                                 NULL,
                                 /* funcs of constructor, MyConstructor.myfunc() */
                                 NULL);
        if (prototype == NULL)
            g_error("Can't init class %s", gjs_keep_alive_class.name);

        gjs_debug(GJS_DEBUG_KEEP_ALIVE, "Initialized class %s prototype %p",
                  gjs_keep_alive_class.name, prototype);
    }

    gjs_debug(GJS_DEBUG_KEEP_ALIVE,
              "Creating new keep-alive object for context %p global %p",
              context, global.get());

    JS::RootedObject keep_alive(context,
        JS_NewObject(context, &gjs_keep_alive_class, JS::NullPtr(), global));
    if (keep_alive == NULL) {
        gjs_log_exception(context);
        g_error("Failed to create keep_alive object");
    }

    priv = g_slice_new0(KeepAlive);
    priv->children = g_hash_table_new_full(child_hash, child_equal, NULL, child_free);

    g_assert(priv_from_js(context, keep_alive) == NULL);
    JS_SetPrivate(keep_alive, priv);

    gjs_debug_lifecycle(GJS_DEBUG_KEEP_ALIVE,
                        "keep_alive constructor, obj %p priv %p",
                        keep_alive.get(), priv);

    return keep_alive;
}
bool http_connection::prepare_http_request(unsigned fd)
{
	// Connect to backend.
	const char* host;
	unsigned short hostlen;
	unsigned short port;

#if !PROXY
	if ((_M_fd = _M_rule->backends.connect(host, hostlen, port)) < 0) {
		_M_error = http_error::GATEWAY_TIMEOUT;
		return true;
	}
#else
	host = _M_host.data();
	hostlen = _M_host.count() - 1;

	port = _M_port;

	if ((_M_fd = socket_wrapper::connect(_M_host.data(), port)) < 0) {
		_M_error = http_error::GATEWAY_TIMEOUT;
		return true;
	}
#endif

	if (!static_cast<http_server*>(_M_server)->add(_M_fd, selector::WRITE, true)) {
		socket_wrapper::close(_M_fd);
		_M_fd = -1;

		_M_error = http_error::INTERNAL_SERVER_ERROR;
		return true;
	}

	// Get Connection header.
	keep_alive();

	if (_M_rule->handler == rulelist::HTTP_HANDLER) {
		buffer* out = &(static_cast<http_server*>(_M_server)->_M_proxy_connections[_M_fd]._M_out);

		out->reset();

		if (!out->format("%s %.*s HTTP/1.1\r\n", http_method::get_method(_M_method), _M_path.count(), _M_path.data())) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		if (!_M_headers.add_known_header(http_headers::CONNECTION_HEADER, "close", 5, true)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		_M_headers.remove_known_header(http_headers::KEEP_ALIVE_HEADER);
		_M_headers.remove_known_header(http_headers::PROXY_CONNECTION_HEADER);

		char string[512];
		size_t len;
		if (port == url_parser::HTTP_DEFAULT_PORT) {
			len = snprintf(string, sizeof(string), "%.*s", hostlen, host);
		} else {
			len = snprintf(string, sizeof(string), "%.*s:%u", hostlen, host, port);
		}

		if (!_M_headers.add_known_header(http_headers::HOST_HEADER, string, len, true)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		if (!_M_headers.serialize(*out)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		static_cast<http_server*>(_M_server)->_M_proxy_connections[_M_fd]._M_fd = fd;
		static_cast<http_server*>(_M_server)->_M_proxy_connections[_M_fd]._M_client = this;

		static_cast<http_server*>(_M_server)->_M_proxy_connections[_M_fd]._M_timestamp = now::_M_time;
	} else {
		buffer* out = &(static_cast<http_server*>(_M_server)->_M_fcgi_connections[_M_fd]._M_out);

		out->reset();

		if (!fastcgi::begin_request(REQUEST_ID, fastcgi::FCGI_RESPONDER, false, *out)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		if (!add_fcgi_params(fd, out)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		if (_M_payload_in_memory) {
			if (!fastcgi::stdin_stream(REQUEST_ID, _M_in.data() + _M_request_header_size, _M_request_body_size, *out)) {
				_M_error = http_error::INTERNAL_SERVER_ERROR;
				return true;
			}

			if (!fastcgi::stdin_stream(REQUEST_ID, NULL, 0, *out)) {
				_M_error = http_error::INTERNAL_SERVER_ERROR;
				return true;
			}
		} else {
			if (!fastcgi::stdin_stream(REQUEST_ID, NULL, 0, _M_tmpfile, _M_tmpfilesize)) {
				_M_error = http_error::INTERNAL_SERVER_ERROR;
				return true;
			}
		}

		static_cast<http_server*>(_M_server)->_M_fcgi_connections[_M_fd]._M_fd = fd;
		static_cast<http_server*>(_M_server)->_M_fcgi_connections[_M_fd]._M_client = this;

		static_cast<http_server*>(_M_server)->_M_fcgi_connections[_M_fd]._M_timestamp = now::_M_time;
	}

	static_cast<http_server*>(_M_server)->_M_connection_handlers[_M_fd] = _M_rule->handler;

	return true;
}
bool http_connection::process_request(unsigned fd)
{
	char* data = _M_in.data();
	data[_M_uri + _M_urilen] = 0;

	// Parse URL.
	_M_url.reset();
	url_parser::parse_result parse_result = _M_url.parse(data + _M_uri, _M_urilen, _M_path);
	if (parse_result == url_parser::ERROR_NO_MEMORY) {
		logger::instance().log(logger::LOG_INFO, "[http_connection::process_request] (fd %d) No memory, URL (%.*s).", fd, _M_urilen, data + _M_uri);

		_M_error = http_error::INTERNAL_SERVER_ERROR;
		return true;
	} else if (parse_result == url_parser::PARSE_ERROR) {
		if (_M_path.count() > 0) {
			logger::instance().log(logger::LOG_INFO, "[http_connection::process_request] (fd %d) URL parse error (%.*s).", fd, _M_path.count(), _M_path.data());
		} else {
			logger::instance().log(logger::LOG_INFO, "[http_connection::process_request] (fd %d) URL parse error (%.*s).", fd, _M_urilen, data + _M_uri);
		}

		_M_error = http_error::BAD_REQUEST;
		return true;
	} else if (parse_result == url_parser::FORBIDDEN) {
		logger::instance().log(logger::LOG_INFO, "[http_connection::process_request] (fd %d) Forbidden URL (%.*s).", fd, _M_path.count(), _M_path.data());

		_M_error = http_error::FORBIDDEN;
		return true;
	}

	// If an absolute URL has been received...
	unsigned short hostlen;
	const char* host = _M_url.get_host(hostlen);
	if (host) {
		if ((_M_major_number == 1) && (_M_minor_number == 1)) {
			// Ignore Host header (if present).
			const char* value;
			unsigned short valuelen;
			if (!_M_headers.get_value_known_header(http_headers::HOST_HEADER, value, &valuelen)) {
				_M_error = http_error::BAD_REQUEST;
				return true;
			}
		}

		if (!_M_host.append_nul_terminated_string(host, hostlen)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		_M_port = _M_url.get_port();

#if PROXY
		_M_http_rule.handler = rulelist::HTTP_HANDLER;
		_M_rule = &_M_http_rule;
		return process_non_local_handler(fd);
#endif // PROXY

		if (_M_url.get_port() != static_cast<http_server*>(_M_server)->_M_port) {
			not_found();
			return true;
		}

		_M_vhost = static_cast<http_server*>(_M_server)->_M_vhosts.get_host(host, hostlen);
	} else {
		if (_M_headers.get_value_known_header(http_headers::HOST_HEADER, host, &hostlen)) {
			// If the host includes port number...
			unsigned port;

			const char* semicolon = (const char*) memrchr(host, ':', hostlen);
			if (semicolon) {
				if (number::parse_unsigned(semicolon + 1, (host + hostlen) - (semicolon + 1), port, 1, 65535) != number::PARSE_SUCCEEDED) {
					_M_error = http_error::BAD_REQUEST;
					return true;
				}

				hostlen = semicolon - host;
			} else {
				port = url_parser::HTTP_DEFAULT_PORT;
			}

			if (port != static_cast<http_server*>(_M_server)->_M_port) {
				not_found();
				return true;
			}

			_M_vhost = static_cast<http_server*>(_M_server)->_M_vhosts.get_host(host, hostlen);
		} else {
			if ((_M_major_number == 1) && (_M_minor_number == 1)) {
				_M_error = http_error::BAD_REQUEST;
				return true;
			}

			_M_vhost = static_cast<http_server*>(_M_server)->_M_vhosts.get_default_host();
		}
	}

	if (!_M_vhost) {
		not_found();
		return true;
	}

	unsigned short pathlen;
	const char* urlpath = _M_url.get_path(pathlen);

	unsigned short extensionlen;
	const char* extension = _M_url.get_extension(extensionlen);

	_M_rule = _M_vhost->rules->find(_M_method, urlpath, pathlen, extension, extensionlen);

	if (_M_rule->handler != rulelist::LOCAL_HANDLER) {
		if (_M_rule->handler == rulelist::FCGI_HANDLER) {
			size_t len = _M_vhost->rootlen + pathlen;
			if (len > PATH_MAX) {
				_M_error = http_error::REQUEST_URI_TOO_LONG;
				return true;
			}

			// Save decoded path.
			if (!_M_decoded_path.append(urlpath, pathlen)) {
				_M_error = http_error::INTERNAL_SERVER_ERROR;
				return true;
			}

			unsigned short query_string_len;
			const char* query_string = _M_url.get_query(query_string_len);
			if (query_string_len > 1) {
				// Save query string.
				if (!_M_query_string.append(query_string + 1, query_string_len - 1)) {
					_M_error = http_error::INTERNAL_SERVER_ERROR;
					return true;
				}
			}
		}

		return process_non_local_handler(fd);
	}

	// Local handler.

	if ((_M_method != http_method::GET) && (_M_method != http_method::HEAD)) {
		_M_error = http_error::NOT_IMPLEMENTED;
		return true;
	}

	char path[PATH_MAX + 1];
	size_t len = _M_vhost->rootlen + pathlen;
	if (len >= sizeof(path)) {
		_M_error = http_error::REQUEST_URI_TOO_LONG;
		return true;
	}

	memcpy(path, _M_vhost->root, _M_vhost->rootlen);
	memcpy(path + _M_vhost->rootlen, urlpath, pathlen);
	path[len] = 0;

	struct stat buf;
	if (stat(path, &buf) < 0) {
		not_found();
		return true;
	}

	bool dirlisting;
	bool index_file;

	// If the URI points to a directory...
	if (S_ISDIR(buf.st_mode)) {
		// If the directory name doesn't end with '/'...
		if (path[len - 1] != '/') {
			moved_permanently();
			return true;
		}

		// Search index file.
		if (static_cast<http_server*>(_M_server)->_M_index_file_finder.search(path, len, &buf)) {
			// File.
			dirlisting = false;
			index_file = true;
		} else {
			// If we don't have directory listing...
			if (!_M_vhost->have_dirlisting) {
				not_found();
				return true;
			} else {
				// Build directory listing.
				if (!_M_vhost->dir_listing->build(urlpath, pathlen, _M_body)) {
					logger::instance().log(logger::LOG_WARNING, "[http_connection::process_request] (fd %d) Couldn't build directory listing for (%s).", fd, path);

					_M_error = http_error::INTERNAL_SERVER_ERROR;
					return true;
				}

				_M_bodyp = &_M_body;

				dirlisting = true;
				index_file = false;
			}
		}
	} else if ((S_ISREG(buf.st_mode)) || (S_ISLNK(buf.st_mode))) {
		// File.
		dirlisting = false;
		index_file = false;
	} else {
		not_found();
		return true;
	}

	if (!dirlisting) {
		const char* value;
		unsigned short valuelen;
		if (_M_headers.get_value_known_header(http_headers::IF_MODIFIED_SINCE_HEADER, value, &valuelen)) {
			time_t t;
			if ((t = date_parser::parse(value, valuelen, &_M_last_modified)) != (time_t) -1) {
				if (t == buf.st_mtime) {
					not_modified();
					return true;
				} else if (t > buf.st_mtime) {
					gmtime_r(&buf.st_mtime, &_M_last_modified);
					not_modified();

					return true;
				}
			}
		}

		if (_M_method == http_method::GET) {
			if ((_M_headers.get_value_known_header(http_headers::RANGE_HEADER, value, &valuelen)) && (valuelen > 6) && (strncasecmp(value, "bytes=", 6) == 0)) {
				if (!range_parser::parse(value + 6, valuelen - 6, buf.st_size, _M_ranges)) {
					requested_range_not_satisfiable();
					return true;
				}
			}

			if ((_M_fd = file_wrapper::open(path, O_RDONLY)) < 0) {
				logger::instance().log(logger::LOG_WARNING, "[http_connection::process_request] (fd %d) Couldn't open file (%s).", fd, path);

				_M_error = http_error::INTERNAL_SERVER_ERROR;
				return true;
			}
		}
	}

	http_headers* headers = &(static_cast<http_server*>(_M_server)->_M_headers);
	headers->reset();

	if (!headers->add_known_header(http_headers::DATE_HEADER, &now::_M_tm)) {
		_M_error = http_error::INTERNAL_SERVER_ERROR;
		return true;
	}

	if (keep_alive()) {
		if (!headers->add_known_header(http_headers::CONNECTION_HEADER, "Keep-Alive", 10, false)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}
	} else {
		if (!headers->add_known_header(http_headers::CONNECTION_HEADER, "close", 5, false)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}
	}

	if (!headers->add_known_header(http_headers::SERVER_HEADER, WEBSERVER_NAME, sizeof(WEBSERVER_NAME) - 1, false)) {
		_M_error = http_error::INTERNAL_SERVER_ERROR;
		return true;
	}

	unsigned short status_code;

	// Directory listing?
	if (dirlisting) {
		char num[32];
		int numlen = snprintf(num, sizeof(num), "%lu", _M_body.count());
		if (!headers->add_known_header(http_headers::CONTENT_LENGTH_HEADER, num, numlen, false)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		if (!headers->add_known_header(http_headers::CONTENT_TYPE_HEADER, "text/html; charset=UTF-8", 24, false)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		status_code = 200;
	} else {
		if (!headers->add_known_header(http_headers::ACCEPT_RANGES_HEADER, "bytes", 5, false)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		if (index_file) {
			const char* end = path + len;
			const char* ptr = end;
			extension = NULL;
			while ((ptr > path) && (*(ptr - 1) != '/')) {
				if (*(ptr - 1) == '.') {
					extension = ptr;
					extensionlen = end - extension;
					break;
				}

				ptr--;
			}
		}

		if ((extension) && (extensionlen > 0)) {
			_M_type = static_cast<http_server*>(_M_server)->_M_mime_types.get_mime_type(extension, extensionlen, _M_typelen);
		} else {
			_M_type = mime_types::DEFAULT_MIME_TYPE;
			_M_typelen = mime_types::DEFAULT_MIME_TYPE_LEN;
		}

		_M_filesize = compute_content_length(buf.st_size);

		char num[32];
		int numlen = snprintf(num, sizeof(num), "%lld", _M_filesize);
		if (!headers->add_known_header(http_headers::CONTENT_LENGTH_HEADER, num, numlen, false)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}

		const range_list::range* range;
		char content_range[128];

		switch (_M_ranges.count()) {
			case 0:
				if (!headers->add_known_header(http_headers::CONTENT_TYPE_HEADER, _M_type, _M_typelen, false)) {
					_M_error = http_error::INTERNAL_SERVER_ERROR;
					return true;
				}

				status_code = 200;

				break;
			case 1:
				if (!headers->add_known_header(http_headers::CONTENT_TYPE_HEADER, _M_type, _M_typelen, false)) {
					_M_error = http_error::INTERNAL_SERVER_ERROR;
					return true;
				}

				range = _M_ranges.get(0);

				len = snprintf(content_range, sizeof(content_range), "bytes %lld-%lld/%lld", range->from, range->to, buf.st_size);
				if (!headers->add_known_header(http_headers::CONTENT_RANGE_HEADER, content_range, len, false)) {
					_M_error = http_error::INTERNAL_SERVER_ERROR;
					return true;
				}

				status_code = 206;

				break;
			default:
				_M_boundary = ++(static_cast<http_server*>(_M_server)->_M_boundary);
				if (!headers->add_content_type_multipart(_M_boundary)) {
					_M_error = http_error::INTERNAL_SERVER_ERROR;
					return true;
				}

				status_code = 206;
		}

		// Add 'Last-Modified' header.

		struct tm timestamp;
		gmtime_r(&buf.st_mtime, &timestamp);

		if (!headers->add_known_header(http_headers::LAST_MODIFIED_HEADER, &timestamp)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}
	}

	_M_out.reset();

	if (status_code == 200) {
		if (!_M_out.append("HTTP/1.1 200 OK\r\n", 17)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}
	} else {
		if (!_M_out.append("HTTP/1.1 206 Partial Content\r\n", 30)) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}
	}

	if (!headers->serialize(_M_out)) {
		_M_error = http_error::INTERNAL_SERVER_ERROR;
		return true;
	}

	_M_response_header_size = _M_out.count();

	// If multipart...
	if (_M_ranges.count() >= 2) {
		if (!build_part_header()) {
			_M_error = http_error::INTERNAL_SERVER_ERROR;
			return true;
		}
	}

	_M_error = http_error::OK;

	if (_M_method == http_method::HEAD) {
		_M_filesize = 0;

		_M_state = SENDING_HEADERS_STATE;
	} else {
		if (dirlisting) {
			_M_filesize = _M_body.count();

			_M_state = SENDING_TWO_BUFFERS_STATE;
		} else {
			socket_wrapper::cork(fd);

			_M_state = SENDING_HEADERS_STATE;
		}
	}

	return true;
}
Beispiel #5
0
int main()
{
	tlog_init(TLOG_MODE_STDERR, TLOG_INFO, NULL);

	rcp_connect("10.25.25.223");

	start_event_handler();

	client_register(RCP_USER_LEVEL_LIVE, "", RCP_REGISTRATION_TYPE_NORMAL, RCP_ENCRYPTION_MODE_MD5);

	rcp_coder_list encoders, decoders;
	get_coder_list(RCP_CODER_ENCODER, RCP_MEDIA_TYPE_VIDEO, &encoders, 1);
	TL_DEBUG("***");
	for (int i=0; i<encoders.count; i++)
		TL_DEBUG("%x %x %x %x %x", encoders.coder[i].number, encoders.coder[i].caps, encoders.coder[i].current_cap, encoders.coder[i].param_caps, encoders.coder[i].current_param);
	TL_DEBUG("***");
	get_coder_list(RCP_CODER_DECODER, RCP_MEDIA_TYPE_VIDEO, &decoders, 1);
	TL_DEBUG("***");
	for (int i=0; i<decoders.count; i++)
		TL_DEBUG("%x %x %x %x %x", decoders.coder[i].number, decoders.coder[i].caps, decoders.coder[i].current_cap, decoders.coder[i].param_caps, decoders.coder[i].current_param);
	TL_DEBUG("***");

	rcp_session session;
	memset(&session, 0, sizeof(rcp_session));
	unsigned short tcp_port = stream_connect_tcp(&session);

	rcp_media_descriptor desc = {
			RCP_MEP_TCP, 1, 1, 0, tcp_port, 0, 1, RCP_VIDEO_CODING_H263P, RCP_VIDEO_RESOLUTION_4CIF
	};

	client_connect(&session, RCP_CONNECTION_METHOD_GET, RCP_MEDIA_TYPE_VIDEO, 0, &desc);

	initiate_tcp_stream(&session, &decoders.coder[0]);

	int res = fork();
	if (res == 0)
	{
		while (1)
		{
			sleep(2);
			int n = keep_alive(&session);
			//TL_DEBUG("active connections = %d", n);
			if (n < 0)
				break;
		}
	}


	rtp_merge_desc mdesc;
	rtp_init(RTP_PAYLOAD_TYPE_H263, 1, &mdesc);

	time_t end_time = time(NULL) + 10;
	while (time(NULL) < end_time)
	{
/*
		int num = recv(con.stream_socket, buffer, 1500, 0);

		rtp_push_frame(buffer, num, &mdesc);
*/
		char buff[2000];
		int size = recv(session.stream_socket, buff, 1000, 0);
		fwrite(buff, size, 1, stdout);
/*
		rtp_recv(session.stream_socket, &mdesc);

		if (rtp_pop_frame(&mdesc) == 0)
			fwrite(mdesc.data, mdesc.frame_lenght, 1, stdout);
*/
	}

	stop_event_handler();


	return 0;
}
Beispiel #6
0
void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
                               size_t size,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  _next_gen = gch->next_gen(this);
  assert(_next_gen != NULL,
    "This must be the youngest gen, and not the only gen");

  // If the next generation is too full to accomodate promotion
  // from this generation, pass on collection; let the next generation
  // do it.
  if (!collection_attempt_is_safe()) {
    if (Verbose && PrintGCDetails) {
      gclog_or_tty->print(" :: Collection attempt not safe :: ");
    }
    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
    return;
  }
  assert(to()->is_empty(), "Else not collection_attempt_is_safe");

  init_assuming_no_promotion_failure();

  TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
  // Capture heap used before collection (for printing).
  size_t gch_prev_used = gch->used();

  SpecializationStats::clear();

  // These can be shared for all code paths
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);

  age_table()->clear();
  to()->clear(SpaceDecorator::Mangle);

  gch->rem_set()->prepare_for_younger_refs_iterate(false);

  assert(gch->no_allocs_since_save_marks(0),
         "save marks have not been newly set.");

  // Not very pretty.
  CollectorPolicy* cp = gch->collector_policy();

  FastScanClosure fsc_with_no_gc_barrier(this, false);
  FastScanClosure fsc_with_gc_barrier(this, true);

  set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
  FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
                                                  &fsc_with_no_gc_barrier,
                                                  &fsc_with_gc_barrier);

  assert(gch->no_allocs_since_save_marks(0),
         "save marks have not been newly set.");

  gch->gen_process_strong_roots(_level,
                                true,  // Process younger gens, if any,
                                       // as strong roots.
                                true,  // activate StrongRootsScope
                                false, // not collecting perm generation.
                                SharedHeap::SO_AllClasses,
                                &fsc_with_no_gc_barrier,
                                true,   // walk *all* scavengable nmethods
                                &fsc_with_gc_barrier);

  // "evacuate followers".
  evacuate_followers.do_void();

  FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
  ReferenceProcessor* rp = ref_processor();
  rp->setup_policy(clear_all_soft_refs);
  rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                    NULL);
  if (!promotion_failed()) {
    // Swap the survivor spaces.
    eden()->clear(SpaceDecorator::Mangle);
    from()->clear(SpaceDecorator::Mangle);
    if (ZapUnusedHeapArea) {
      // This is now done here because of the piece-meal mangling which
      // can check for valid mangling at intermediate points in the
      // collection(s).  When a minor collection fails to collect
      // sufficient space resizing of the young generation can occur
      // an redistribute the spaces in the young generation.  Mangle
      // here so that unzapped regions don't get distributed to
      // other spaces.
      to()->mangle_unused_area();
    }
    swap_spaces();

    assert(to()->is_empty(), "to space should be empty now");

    // Set the desired survivor size to half the real survivor space
    _tenuring_threshold =
      age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);

    // A successful scavenge should restart the GC time limit count which is
    // for full GC's.
    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
    size_policy->reset_gc_overhead_limit_count();
    if (PrintGC && !PrintGCDetails) {
      gch->print_heap_change(gch_prev_used);
    }
    assert(!gch->incremental_collection_failed(), "Should be clear");
  } else {
    assert(_promo_failure_scan_stack.is_empty(), "post condition");
    _promo_failure_scan_stack.clear(true); // Clear cached segments.

    remove_forwarding_pointers();
    if (PrintGCDetails) {
      gclog_or_tty->print(" (promotion failed) ");
    }
    // Add to-space to the list of space to compact
    // when a promotion failure has occurred.  In that
    // case there can be live objects in to-space
    // as a result of a partial evacuation of eden
    // and from-space.
    swap_spaces();   // For uniformity wrt ParNewGeneration.
    from()->set_next_compaction_space(to());
    gch->set_incremental_collection_failed();

    // Inform the next generation that a promotion failure occurred.
    _next_gen->promotion_failure_occurred();

    // Reset the PromotionFailureALot counters.
    NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
  }
  // set new iteration safe limit for the survivor spaces
  from()->set_concurrent_iteration_safe_limit(from()->top());
  to()->set_concurrent_iteration_safe_limit(to()->top());
  SpecializationStats::print();
  update_time_of_last_gc(os::javaTimeMillis());
}
void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
                               size_t size,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  _gc_timer->register_gc_start();
  DefNewTracer gc_tracer;
  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());

  _old_gen = gch->old_gen();

  // If the next generation is too full to accommodate promotion
  // from this generation, pass on collection; let the next generation
  // do it.
  if (!collection_attempt_is_safe()) {
    log_trace(gc)(":: Collection attempt not safe ::");
    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
    return;
  }
  assert(to()->is_empty(), "Else not collection_attempt_is_safe");

  init_assuming_no_promotion_failure();

  GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());

  gch->trace_heap_before_gc(&gc_tracer);

  // These can be shared for all code paths
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);

  age_table()->clear();
  to()->clear(SpaceDecorator::Mangle);

  gch->rem_set()->prepare_for_younger_refs_iterate(false);

  assert(gch->no_allocs_since_save_marks(),
         "save marks have not been newly set.");

  // Not very pretty.
  CollectorPolicy* cp = gch->collector_policy();

  FastScanClosure fsc_with_no_gc_barrier(this, false);
  FastScanClosure fsc_with_gc_barrier(this, true);

  KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
                                      gch->rem_set()->klass_rem_set());
  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
                                           &fsc_with_no_gc_barrier,
                                           false);

  set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
  FastEvacuateFollowersClosure evacuate_followers(gch,
                                                  &fsc_with_no_gc_barrier,
                                                  &fsc_with_gc_barrier);

  assert(gch->no_allocs_since_save_marks(),
         "save marks have not been newly set.");

  {
    // DefNew needs to run with n_threads == 0, to make sure the serial
    // version of the card table scanning code is used.
    // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
    StrongRootsScope srs(0);

    gch->gen_process_roots(&srs,
                           GenCollectedHeap::YoungGen,
                           true,  // Process younger gens, if any,
                                  // as strong roots.
                           GenCollectedHeap::SO_ScavengeCodeCache,
                           GenCollectedHeap::StrongAndWeakRoots,
                           &fsc_with_no_gc_barrier,
                           &fsc_with_gc_barrier,
                           &cld_scan_closure);
  }

  // "evacuate followers".
  evacuate_followers.do_void();

  FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
  ReferenceProcessor* rp = ref_processor();
  rp->setup_policy(clear_all_soft_refs);
  const ReferenceProcessorStats& stats =
  rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                    NULL, _gc_timer);
  gc_tracer.report_gc_reference_stats(stats);
  gc_tracer.report_tenuring_threshold(tenuring_threshold());

  if (!_promotion_failed) {
    // Swap the survivor spaces.
    eden()->clear(SpaceDecorator::Mangle);
    from()->clear(SpaceDecorator::Mangle);
    if (ZapUnusedHeapArea) {
      // This is now done here because of the piece-meal mangling which
      // can check for valid mangling at intermediate points in the
      // collection(s).  When a young collection fails to collect
      // sufficient space resizing of the young generation can occur
      // an redistribute the spaces in the young generation.  Mangle
      // here so that unzapped regions don't get distributed to
      // other spaces.
      to()->mangle_unused_area();
    }
    swap_spaces();

    assert(to()->is_empty(), "to space should be empty now");

    adjust_desired_tenuring_threshold();

    // A successful scavenge should restart the GC time limit count which is
    // for full GC's.
    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
    size_policy->reset_gc_overhead_limit_count();
    assert(!gch->incremental_collection_failed(), "Should be clear");
  } else {
    assert(_promo_failure_scan_stack.is_empty(), "post condition");
    _promo_failure_scan_stack.clear(true); // Clear cached segments.

    remove_forwarding_pointers();
    log_debug(gc)("Promotion failed");
    // Add to-space to the list of space to compact
    // when a promotion failure has occurred.  In that
    // case there can be live objects in to-space
    // as a result of a partial evacuation of eden
    // and from-space.
    swap_spaces();   // For uniformity wrt ParNewGeneration.
    from()->set_next_compaction_space(to());
    gch->set_incremental_collection_failed();

    // Inform the next generation that a promotion failure occurred.
    _old_gen->promotion_failure_occurred();
    gc_tracer.report_promotion_failed(_promotion_failed_info);

    // Reset the PromotionFailureALot counters.
    NOT_PRODUCT(gch->reset_promotion_should_fail();)
  }
  // set new iteration safe limit for the survivor spaces
  from()->set_concurrent_iteration_safe_limit(from()->top());
  to()->set_concurrent_iteration_safe_limit(to()->top());

  // We need to use a monotonically non-decreasing time in ms
  // or we will see time-warp warnings and os::javaTimeMillis()
  // does not guarantee monotonicity.
  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  update_time_of_last_gc(now);

  gch->trace_heap_after_gc(&gc_tracer);

  _gc_timer->register_gc_end();

  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}
oop ClassLoaderData::keep_alive_object() const {
  assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
  return is_anonymous() ? _klasses->java_mirror() : class_loader();
}
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

  assert(_preserved_mark_stack.is_empty(), "should be empty");
  assert(_preserved_oop_stack.is_empty(), "should be empty");

  _gc_timer.register_gc_start();

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::check_active_before_gc()) {
    return false;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return false;
  }

  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());

  bool promotion_failure_occurred = false;

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSAdaptiveSizePolicy* size_policy = heap->size_policy();

  heap->increment_total_collections();

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  if ((gc_cause != GCCause::_java_lang_system_gc) ||
       UseAdaptiveSizePolicyWithSystemGC) {
    // Gather the feedback data for eden occupancy.
    young_gen->eden_space()->accumulate_statistics();
  }

  if (ZapUnusedHeapArea) {
    // Save information needed to minimize mangling
    heap->record_gen_tops_before_GC();
  }

  heap->print_heap_before_gc();
  heap->trace_heap_before_gc(&_gc_tracer);

  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    Universe::verify(" VerifyBeforeGC:");
  }

  {
    ResourceMark rm;
    HandleMark hm;

    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);

    if (TraceGen0Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();

    // Verify the object start arrays.
    if (VerifyObjectStartArray &&
        VerifyBeforeGC) {
      old_gen->verify_object_start_array();
    }

    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (!ScavengeWithObjectsInToSpace) {
      assert(young_gen->to_space()->is_empty(),
             "Attempt to scavenge with live objects in to_space");
      young_gen->to_space()->clear(SpaceDecorator::Mangle);
    } else if (ZapUnusedHeapArea) {
      young_gen->to_space()->mangle_unused_area();
    }
    save_to_space_top_before_gc();

    COMPILER2_PRESENT(DerivedPointerTable::clear());

    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
    reference_processor()->setup_policy(false);

    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->used_in_bytes();

    // For PrintGCDetails
    size_t young_gen_used_before = young_gen->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);

    // We need to save the old top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    // Set the number of GC threads to be used in this collection
    gc_task_manager()->set_active_gang();
    gc_task_manager()->task_idle_workers();
    // Get the active number of workers here and use that value
    // throughout the methods.
    uint active_workers = gc_task_manager()->active_workers();
    heap->set_par_threads(active_workers);

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
      ParallelScavengeHeap::ParStrongRootsScope psrs;

      GCTaskQueue* q = GCTaskQueue::create();

      if (!old_gen->object_space()->is_empty()) {
        // There are only old-to-young pointers if there are objects
        // in the old gen.
        uint stripe_total = active_workers;
        for(uint i=0; i < stripe_total; i++) {
          q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
        }
      }

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));

      ParallelTaskTerminator terminator(
        active_workers,
                  (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
      if (active_workers > 1) {
        for (uint j = 0; j < active_workers; j++) {
          q->enqueue(new StealTask(&terminator));
        }
      }

      gc_task_manager()->execute_and_wait(q);
    }

    scavenge_midpoint.update();

    // Process reference objects discovered during scavenge
    {
      GCTraceTime tm("References", false, false, &_gc_timer);

      reference_processor()->setup_policy(false); // not always_clear
      reference_processor()->set_active_mt_degree(active_workers);
      PSKeepAliveClosure keep_alive(promotion_manager);
      PSEvacuateFollowersClosure evac_followers(promotion_manager);
      ReferenceProcessorStats stats;
      if (reference_processor()->processing_is_mt()) {
        PSRefProcTaskExecutor task_executor;
        stats = reference_processor()->process_discovered_references(
          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
          &_gc_timer);
      } else {
        stats = reference_processor()->process_discovered_references(
          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
      }

      _gc_tracer.report_gc_reference_stats(stats);

      // Enqueue reference objects discovered during scavenge.
      if (reference_processor()->processing_is_mt()) {
        PSRefProcTaskExecutor task_executor;
        reference_processor()->enqueue_discovered_references(&task_executor);
      } else {
        reference_processor()->enqueue_discovered_references(NULL);
      }
    }

    {
      GCTraceTime tm("StringTable", false, false, &_gc_timer);
      // Unlink any dead interned Strings and process the remaining live ones.
      PSScavengeRootsClosure root_closure(promotion_manager);
      StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
    }

    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
    if (promotion_failure_occurred) {
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done.  Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're
    // implicitly saying it's mutator time).
    size_policy->minor_collection_end(gc_cause);

    if (!promotion_failure_occurred) {
      // Swap the survivor spaces.
      young_gen->eden_space()->clear(SpaceDecorator::Mangle);
      young_gen->from_space()->clear(SpaceDecorator::Mangle);
      young_gen->swap_spaces();

      size_t survived = young_gen->from_space()->used_in_bytes();
      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
      size_policy->update_averages(_survivor_overflow, survived, promoted);

      // A successful scavenge should restart the GC time limit count which is
      // for full GC's.
      size_policy->reset_gc_overhead_limit_count();
      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print("AdaptiveSizeStart: ");
          gclog_or_tty->stamp();
          gclog_or_tty->print_cr(" collection: %d ",
                         heap->total_collections());

          if (Verbose) {
            gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
          }
        }


        if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_old_eden_size(
            size_policy->calculated_eden_size_in_bytes());
          counters->update_old_promo_size(
            size_policy->calculated_promo_size_in_bytes());
          counters->update_old_capacity(old_gen->capacity_in_bytes());
          counters->update_young_capacity(young_gen->capacity_in_bytes());
          counters->update_survived(survived);
          counters->update_promoted(promoted);
          counters->update_survivor_overflowed(_survivor_overflow);
        }

        size_t max_young_size = young_gen->max_size();

        // Deciding a free ratio in the young generation is tricky, so if
        // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
        // that the old generation size may have been limited because of them) we
        // should then limit our young generation size using NewRatio to have it
        // follow the old generation size.
        if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
          max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
        }

        size_t survivor_limit =
          size_policy->max_survivor_size(max_young_size);
        _tenuring_threshold =
          size_policy->compute_survivor_space_size_and_threshold(
                                                           _survivor_overflow,
                                                           _tenuring_threshold,
                                                           survivor_limit);

       if (PrintTenuringDistribution) {
         gclog_or_tty->cr();
         gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
                                size_policy->calculated_survivor_size_in_bytes(),
                                _tenuring_threshold, MaxTenuringThreshold);
       }

        if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_tenuring_threshold(_tenuring_threshold);
          counters->update_survivor_size_counters();
        }

        // Do call at minor collections?
        // Don't check if the size_policy is ready at this
        // level.  Let the size_policy check that internally.
        if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
            ((gc_cause != GCCause::_java_lang_system_gc) ||
              UseAdaptiveSizePolicyWithSystemGC)) {

          // Calculate optimial free space amounts
          assert(young_gen->max_size() >
            young_gen->from_space()->capacity_in_bytes() +
            young_gen->to_space()->capacity_in_bytes(),
            "Sizes of space in young gen are out-of-bounds");

          size_t young_live = young_gen->used_in_bytes();
          size_t eden_live = young_gen->eden_space()->used_in_bytes();
          size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
          size_t max_old_gen_size = old_gen->max_gen_size();
          size_t max_eden_size = max_young_size -
            young_gen->from_space()->capacity_in_bytes() -
            young_gen->to_space()->capacity_in_bytes();

          // Used for diagnostics
          size_policy->clear_generation_free_space_flags();

          size_policy->compute_eden_space_size(young_live,
                                               eden_live,
                                               cur_eden,
                                               max_eden_size,
                                               false /* not full gc*/);

          size_policy->check_gc_overhead_limit(young_live,
                                               eden_live,
                                               max_old_gen_size,
                                               max_eden_size,
                                               false /* not full gc*/,
                                               gc_cause,
                                               heap->collector_policy());

          size_policy->decay_supplemental_growth(false /* not full gc*/);
        }
        // Resize the young generation at every collection
        // even if new sizes have not been calculated.  This is
        // to allow resizes that may have been inhibited by the
        // relative location of the "to" and "from" spaces.

        // Resizing the old gen at minor collects can cause increases
        // that don't feed back to the generation sizing policy until
        // a major collection.  Don't resize the old gen here.

        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                        size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }
      }

      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
      // Also update() will case adaptive NUMA chunk resizing.
      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
      young_gen->eden_space()->update();

      heap->gc_policy_counters()->update_counters();

      heap->resize_all_tlabs();

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());

    {
      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);

      CodeCache::prune_scavenge_root_nmethods();
    }

    // Re-verify object start arrays
    if (VerifyObjectStartArray &&
        VerifyAfterGC) {
      old_gen->verify_object_start_array();
    }

    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) accumulated_time()->stop();

    if (PrintGC) {
      if (PrintGCDetails) {
        // Don't print a GC timestamp here.  This is after the GC so
        // would be confusing.
        young_gen->print_used_change(young_gen_used_before);
      }
      heap->print_heap_change(prev_used);
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();

    gc_task_manager()->release_idle_workers();
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    Universe::verify(" VerifyAfterGC:");
  }

  heap->print_heap_after_gc();
  heap->trace_heap_after_gc(&_gc_tracer);
  _gc_tracer.report_tenuring_threshold(tenuring_threshold());

  if (ZapUnusedHeapArea) {
    young_gen->eden_space()->check_mangled_unused_area_complete();
    young_gen->from_space()->check_mangled_unused_area_complete();
    young_gen->to_space()->check_mangled_unused_area_complete();
  }

  scavenge_exit.update();

  if (PrintGCTaskTimeStamps) {
    tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
                  scavenge_exit.ticks());
    gc_task_manager()->print_task_time_stamps();
  }

#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif


  _gc_timer.register_gc_end();

  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());

  return !promotion_failure_occurred;
}
Beispiel #10
0
// EXPORTED to FA256
int arm920t_soft_reset_halt(struct target *target)
{
	int retval = ERROR_OK;
	struct arm920t_common *arm920t = target_to_arm920(target);
	struct arm7_9_common *arm7_9 = target_to_arm7_9(target);
	struct arm *armv4_5 = &arm7_9->armv4_5_common;
	struct reg *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];

	if ((retval = target_halt(target)) != ERROR_OK)
	{
		return retval;
	}

	long long then = timeval_ms();
	int timeout;
	while (!(timeout = ((timeval_ms()-then) > 1000)))
	{
		if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_DBGACK, 1)
				== 0)
		{
			embeddedice_read_reg(dbg_stat);
			if ((retval = jtag_execute_queue()) != ERROR_OK)
			{
				return retval;
			}
		} else
		{
			break;
		}
		if (debug_level >= 3)
		{
			/* do not eat all CPU, time out after 1 se*/
			alive_sleep(100);
		} else
		{
			keep_alive();
		}
	}
	if (timeout)
	{
		LOG_ERROR("Failed to halt CPU after 1 sec");
		return ERROR_TARGET_TIMEOUT;
	}

	target->state = TARGET_HALTED;

	/* SVC, ARM state, IRQ and FIQ disabled */
	uint32_t cpsr;

	cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
	cpsr &= ~0xff;
	cpsr |= 0xd3;
	arm_set_cpsr(armv4_5, cpsr);
	armv4_5->cpsr->dirty = 1;

	/* start fetching from 0x0 */
	buf_set_u32(armv4_5->pc->value, 0, 32, 0x0);
	armv4_5->pc->dirty = 1;
	armv4_5->pc->valid = 1;

	arm920t_disable_mmu_caches(target, 1, 1, 1);
	arm920t->armv4_5_mmu.mmu_enabled = 0;
	arm920t->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
	arm920t->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;

	return target_call_event_callbacks(target, TARGET_EVENT_HALTED);
}
void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
			       size_t size,
                               bool   is_large_noref,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  _next_gen = gch->next_gen(this);
  assert(_next_gen != NULL, 
    "This must be the youngest gen, and not the only gen");
  // If the next generation is too full to accomodate worst-case promotion
  // from this generation, pass on collection; let the next generation
  // do it.
  if (!full_promotion_would_succeed()) {
    gch->set_incremental_collection_will_fail();
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("DefNewGeneration::collect"
                    " contiguous_available: " SIZE_FORMAT " < used: " SIZE_FORMAT,
                    _next_gen->max_contiguous_available(), used());
    }
    return;
  }

  TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
  // Capture heap used before collection (for printing).
  size_t gch_prev_used = gch->used();

  SpecializationStats::clear();

  // These can be shared for all code paths
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);

  age_table()->clear();
  to()->clear();

  gch->rem_set()->prepare_for_younger_refs_iterate(false);

  assert(gch->no_allocs_since_save_marks(0), 
	 "save marks have not been newly set.");

  // Weak refs.
  // FIXME: Are these storage leaks, or are they resource objects?
  NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy());
  COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy());
      
  // Not very pretty.
  CollectorPolicy* cp = gch->collector_policy();
  if (!cp->is_train_policy()) {
    FastScanClosure fsc_with_no_gc_barrier(this, false);
    FastScanClosure fsc_with_gc_barrier(this, true);

    FastEvacuateFollowersClosure evacuate_followers(gch, _level,
                                                    &fsc_with_no_gc_barrier, 
                                                    &fsc_with_gc_barrier);
    
    assert(gch->no_allocs_since_save_marks(0), 
           "save marks have not been newly set.");
    
    gch->process_strong_roots(_level,
                              true, // Process younger gens, if any, as
                              // strong roots.
                              false,// not collecting permanent generation.
                              GenCollectedHeap::CSO_AllClasses,
                              &fsc_with_gc_barrier, 
                              &fsc_with_no_gc_barrier);
    
    // "evacuate followers".
    evacuate_followers.do_void();
    
    FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
    ref_processor()->process_discovered_references(soft_ref_policy,
                                                   &is_alive,
                                                   &keep_alive,
                                                   &evacuate_followers);
  } else { // Train policy
    ScanClosure sc_with_no_gc_barrier(this, false);
    ScanClosure sc_with_gc_barrier(this, true);
    
    EvacuateFollowersClosure evacuate_followers(gch, _level,
                                                &sc_with_no_gc_barrier, 
                                                &sc_with_gc_barrier);
    
    gch->process_strong_roots(_level,
                              true, // Process younger gens, if any, as
                              // strong roots.
                              false,// not collecting perm generation.
                              GenCollectedHeap::CSO_AllClasses,
                              &sc_with_gc_barrier, 
                              &sc_with_no_gc_barrier);
    
    // "evacuate followers".
    evacuate_followers.do_void();

    TrainPolicyKeepAliveClosure keep_alive((TrainGeneration*)_next_gen, 
                                           &scan_weak_ref);
    ref_processor()->process_discovered_references(soft_ref_policy,
                                                   &is_alive,
                                                   &keep_alive,
                                                   &evacuate_followers);
  }

  // Swap the survivor spaces.
  eden()->clear();
  from()->clear();
  swap_spaces();
  
  assert(to()->is_empty(), "to space should be empty now");

  // Set the desired survivor size to half the real survivor space
  _tenuring_threshold =
    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);

  if (PrintGC && !PrintGCDetails) {
    gch->print_heap_change(gch_prev_used);
  }
  SpecializationStats::print();
  update_time_of_last_gc(os::javaTimeMillis());
}
// This method contains no policy. You should probably
// be calling invoke() instead. 
void PSScavenge::invoke_no_policy(bool& notify_ref_lock) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::is_active()) return;

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return;
  }

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();
  AdaptiveSizePolicy* size_policy = heap->size_policy();

  heap->increment_total_collections();

  if (PrintHeapAtGC){
    gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
    Universe::print();
  }

  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();
  assert(promotion_failed() == false, "Sanity");

  // Fill in TLABs
  heap->ensure_parseability();

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  {
    ResourceMark rm;
    HandleMark hm;

    TraceTime t1("GC", PrintGC, true, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    if (TraceGen0Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();
    
    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
      CardTableExtension::verify_all_young_refs_imprecise();
    }
    
    assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space");
    young_gen->to_space()->clear();

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
    COMPILER2_ONLY(DerivedPointerTable::clear(););

    reference_processor()->enable_discovery();
    
    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->object_space()->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);
    
    // We need to save the old/perm top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();
    HeapWord* perm_top = perm_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      // TraceTime("Roots");
      
      GCTaskQueue* q = GCTaskQueue::create();
      
      for(uint i=0; i<ParallelGCThreads; i++) {
        q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
      }

      q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));

      if (ParallelGCThreads>1) {
        for (uint j=0; j<ParallelGCThreads-1; j++) {
          q->enqueue(new StealTask(false));
        }
        q->enqueue(new StealTask(true));
      }

      WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
      q->enqueue(fin);

      gc_task_manager()->add_list(q);
      
      fin->wait_for();

      // We have to release the barrier tasks!
      WaitForBarrierGCTask::destroy(fin);
    }

    scavenge_midpoint.update();

    NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy());
    COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy());
    
    PSIsAliveClosure is_alive;
    PSKeepAliveClosure keep_alive(promotion_manager);
    PSEvacuateFollowersClosure evac_followers(promotion_manager);
    
    // Process reference objects discovered during scavenge
    reference_processor()->process_discovered_references(soft_ref_policy, &is_alive,
                                                         &keep_alive, &evac_followers);
    
    // Enqueue reference objects discovered during scavenge.
    notify_ref_lock = reference_processor()->enqueue_discovered_references();
    
    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    assert(promotion_manager->claimed_stack()->size() == 0, "Sanity");
    PSPromotionManager::post_scavenge();

    bool scavenge_promotion_failure = promotion_failed();
    if (scavenge_promotion_failure) {
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done. Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're implicitly
    // saying it's mutator time).
    size_policy->minor_collection_end();

    if (!scavenge_promotion_failure) {
      // Swap the survivor spaces.
      young_gen->eden_space()->clear();
      young_gen->from_space()->clear();
      young_gen->swap_spaces();

      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold
        size_t survived = young_gen->from_space()->used_in_bytes();
        size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;

        if (PrintAdaptiveSizePolicy) {
          tty->print_cr("AdaptiveSizeStart: collection: %d ",
                         heap->total_collections());
        }

        size_t survivor_limit = 
	  size_policy->max_survivor_size(young_gen->max_size());
        _tenuring_threshold = 
           size_policy->compute_survivor_space_size_and_threshold(survived, 
                                                           promoted,
                                                           _survivor_overflow, 
                                                           _tenuring_threshold,
                                                           survivor_limit);

        // Calculate optimial free space amounts
        size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                                                   old_gen->used_in_bytes(),
                                                   perm_gen->used_in_bytes(),
                                                   false  /* full gc*/);
        
        // Resize the old and young generations
        old_gen->resize(size_policy->calculated_old_free_size_in_bytes());
        
        young_gen->resize(size_policy->calculated_eden_size_in_bytes(),
                          size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }


      }

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    COMPILER2_ONLY(DerivedPointerTable::update_pointers());
    
    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
        
    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) accumulated_time()->stop();
    
    if (PrintGC) {
      heap->print_heap_change(prev_used);
    }

    heap->update_counters();
  }
Beispiel #13
0
static int msp432_write(struct flash_bank *bank, const uint8_t *buffer,
	uint32_t offset, uint32_t count)
{
	struct target *target = bank->target;
	struct msp432_bank *msp432_bank = bank->driver_priv;
	struct msp432_algo_params algo_params;
	uint32_t size;
	uint32_t data_ready = BUFFER_DATA_READY;
	long long start_ms;
	long long elapsed_ms;

	int retval;

	if (TARGET_HALTED != target->state) {
		LOG_ERROR("Target not halted");
		return ERROR_TARGET_NOT_HALTED;
	}

	/*
	 * Block attempts to write to read-only sectors of flash
	 * The TVL region in sector 1 of the info flash is always read-only
	 * The BSL region in sectors 2 and 3 of the info flash may be unlocked
	 * The helper algorithm will hang on attempts to write to TVL
	 */
	if (1 == bank->bank_number) {
		/* Set read-only start to TVL sector */
		uint32_t start = 0x1000;
		/* Set read-only end after BSL region if locked */
		uint32_t end = (msp432_bank->unlock_bsl) ? 0x2000 : 0x4000;
		/* Check if request includes anything in read-only sectors */
		if ((offset + count - 1) < start || offset >= end) {
			/* The request includes no bytes in read-only sectors */
			/* Fall out and process the request normally */
		} else {
			/* Send a request for anything before read-only sectors */
			if (offset < start) {
				uint32_t start_count = MIN(start - offset, count);
				retval = msp432_write(bank, buffer, offset, start_count);
				if (ERROR_OK != retval)
					return retval;
			}
			/* Send a request for anything after read-only sectors */
			if ((offset + count - 1) >= end) {
				uint32_t skip = end - offset;
				count -= skip;
				offset += skip;
				buffer += skip;
				return msp432_write(bank, buffer, offset, count);
			} else {
				/* Request is entirely in read-only sectors */
				return ERROR_OK;
			}
		}
	}

	retval = msp432_init(bank);
	if (ERROR_OK != retval)
		return retval;

	/* Initialize algorithm parameters to default values */
	msp432_init_params(&algo_params);

	/* Set up parameters for requested flash write operation */
	buf_set_u32(algo_params.address, 0, 32, bank->base + offset);
	buf_set_u32(algo_params.length, 0, 32, count);

	/* Check if this is the info bank */
	if (1 == bank->bank_number) {
		/* And flag if BSL is unlocked */
		if (msp432_bank->unlock_bsl)
			buf_set_u32(algo_params.unlock_bsl, 0, 32, FLASH_UNLOCK_BSL);
	}

	/* Set up flash helper algorithm to continuous flash mode */
	retval = msp432_exec_cmd(target, &algo_params, FLASH_CONTINUOUS);
	if (ERROR_OK != retval) {
		(void)msp432_quit(bank);
		return retval;
	}

	/* Write requested data, one buffer at a time */
	start_ms = timeval_ms();
	while (count > 0) {

		if (count > ALGO_BUFFER_SIZE)
			size = ALGO_BUFFER_SIZE;
		else
			size = count;

		/* Put next block of data to flash into buffer */
		retval = target_write_buffer(target, ALGO_BUFFER1_ADDR, size, buffer);
		if (ERROR_OK != retval) {
			LOG_ERROR("Unable to write data to target memory");
			(void)msp432_quit(bank);
			return ERROR_FLASH_OPERATION_FAILED;
		}

		/* Signal the flash helper algorithm that data is ready to flash */
		retval = target_write_buffer(target, ALGO_BUFFER1_STATUS_ADDR,
					sizeof(data_ready), (uint8_t *)&data_ready);
		if (ERROR_OK != retval) {
			(void)msp432_quit(bank);
			return ERROR_FLASH_OPERATION_FAILED;
		}

		retval = msp432_wait_inactive(target, 1);
		if (ERROR_OK != retval) {
			(void)msp432_quit(bank);
			return retval;
		}

		count -= size;
		buffer += size;

		elapsed_ms = timeval_ms() - start_ms;
		if (elapsed_ms > 500)
			keep_alive();
	}

	/* Confirm that the flash helper algorithm is finished */
	retval = msp432_wait_return_code(target);
	if (ERROR_OK != retval) {
		(void)msp432_quit(bank);
		return retval;
	}

	retval = msp432_quit(bank);
	if (ERROR_OK != retval)
		return retval;

	return retval;
}
Beispiel #14
0
bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
  bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
      || is_alive_closure->do_object_b(keep_alive_object());

  return alive;
}
Beispiel #15
0
	ImageSocketTCPImpl::ImageSocketTCPImpl(std::string ip_address, int send_port, int receive_port) :ImageSocketImpl(ip_address, send_port, receive_port), send_socket(this->send_io_service), receive_socket(this->receive_io_service), acceptor(this->receive_io_service, tcp::endpoint(tcp::v4(), receive_port)) {
		asio::socket_base::reuse_address reuse_address(true);
		this->acceptor.set_option(reuse_address);
		asio::socket_base::keep_alive keep_alive(true);
		this->acceptor.set_option(keep_alive);
	}