bool Proc::startCGI(void) { // GNASH_REPORT_FUNCTION; log_unimpl(__PRETTY_FUNCTION__); return false; }
bool FBGui::showMouse(bool /*show*/) { log_unimpl(_("This GUI does not yet support a mouse pointer")); // Should return true if the pointer was visible before call, // otherwise false; return true; }
// Create a function block for AMF Element::Element(bool /* flag */, double /* unknown1 */, double /* unknown2 */, const string &/* methodname */) : _name(0), _type(NOTYPE), _referenceid(0) { // GNASH_REPORT_FUNCTION; log_unimpl(_("Can't create remote function calls yet")); }
bool Proc::stopCGI(void) { // GNASH_REPORT_FUNCTION; log_unimpl(__PRETTY_FUNCTION__); std::lock_guard<std::mutex> lock(_mutex); return false; }
bool Proc::stopCGI(void) { // GNASH_REPORT_FUNCTION; log_unimpl("%s", __PRETTY_FUNCTION__); boost::mutex::scoped_lock lock(_mutex); return false; }
boost::intrusive_ptr<movie_definition> MovieFactory::makeMovie(std::auto_ptr<IOChannel> in, const std::string& url, const RunResources& runResources, bool startLoaderThread) { boost::intrusive_ptr<movie_definition> ret; assert(in.get()); // see if it's a jpeg or an swf FileType type = getFileType(*in); switch (type) { case GNASH_FILETYPE_JPEG: case GNASH_FILETYPE_PNG: case GNASH_FILETYPE_GIF: { if ( startLoaderThread == false ) { log_unimpl(_("Requested to keep from completely loading " "a movie, but the movie in question is an " "image, for which we don't yet have the " "concept of a 'loading thread'")); } ret = createBitmapMovie(in, url, runResources, type); break; } case GNASH_FILETYPE_SWF: ret = createSWFMovie(in, url, runResources, startLoaderThread); break; case GNASH_FILETYPE_FLV: log_unimpl(_("FLV can't be loaded directly as a movie")); return ret; default: log_error(_("unknown file type (%s)"), type); break; } return ret; }
SharedMem::SharedMem(size_t size) : _addr(0), _size(size), _semid(0), _shmid(0), _shmkey(0) { log_unimpl(_("%s on Haiku"), __FUNCTION__); }
as_value camera_activitylevel(const fn_call& fn) { Camera_as* ptr = ensure<ThisIsNative<Camera_as> >(fn); if (!fn.nargs) { log_unimpl("Camera::activityLevel only has default value"); return as_value(ptr->activityLevel()); } IF_VERBOSE_ASCODING_ERRORS( log_aserror(_("Attempt to set activity property of Camera")); );
as_value filereference_ctor(const fn_call& fn) { if (fn.nargs) { std::stringstream ss; fn.dump_args(ss); LOG_ONCE( log_unimpl("FileReference(%s): %s", ss.str(), _("arguments discarded")) ); } return as_value(); }
as_value camera_setmotionlevel(const fn_call& fn) { log_unimpl ("Camera::motionLevel can be set, but it's not implemented"); Camera_as* ptr = ensure<ThisIsNative<Camera_as> >(fn); const size_t nargs = fn.nargs; const double ml = nargs > 0 ? fn.arg(0).to_number() : 50; const double mt = nargs > 1 ? fn.arg(1).to_number() : 2000; const size_t motionLevel = (ml >= 0 && ml <= 100) ? ml : 100; ptr->setMotionLevel(motionLevel, mt); return as_value(); }
as_value camera_setquality(const fn_call& fn) { log_unimpl ("Camera::quality can be set, but it's not implemented"); Camera_as* ptr = ensure<ThisIsNative<Camera_as> >(fn); const size_t nargs = fn.nargs; const double b = nargs > 0 ? fn.arg(0).to_number() : 16384; const double q = nargs > 1 ? fn.arg(1).to_number() : 0; size_t quality = (q < 0 || q > 100) ? 100 : q; ptr->setQuality(b, quality); return as_value(); }
size_t Handler::addClient(int fd, Network::protocols_supported_e proto) { // GNASH_REPORT_FUNCTION; boost::mutex::scoped_lock lock(_mutex); log_debug("Adding %d to the client array.", fd); switch (proto) { case Network::NONE: break; case Network::HTTP: { boost::shared_ptr<HTTPServer> http(new HTTPServer); _http[fd] = http; break; } case Network::HTTPS: break; case Network::RTMP: { boost::shared_ptr<RTMPServer> rtmp(new RTMPServer); _rtmp[fd] = rtmp; break; } case Network::RTMPT: case Network::RTMPTS: case Network::RTMPE: case Network::RTMPS: case Network::DTN: default: log_unimpl("Protocol %d for Handler::AddClient()", proto); break; } _clients.push_back(fd); _protocol[fd] = proto; return _clients.size(); }
void Gui::hideMenu() { LOG_ONCE(log_unimpl(_("Menu show/hide not yet supported in this GUI"))); }
void Gui::unsetFullscreen() { log_unimpl(_("Fullscreen not yet supported in this GUI")); }
void Gui::resizeWindow(int /*width*/, int /*height*/) { log_unimpl(_("Window resize not yet supported in this GUI")); }
// Simulate the ioctls used to get information from the framebuffer // driver. Since this is an emulator, we have to set these fields // to a reasonable default. int fakefb_ioctl(int /* fd */, int request, void *data) { // GNASH_REPORT_FUNCTION; switch (request) { case FBIOGET_VSCREENINFO: { struct fb_var_screeninfo *ptr = reinterpret_cast<struct fb_var_screeninfo *>(data); // Note that the fake framebuffer is only used for // debugging and development. // Framebuffer device uses 1536000 bytes of memory at this size #if 0 ptr->xres = 1024; // visible resolution ptr->xres_virtual = 1024; // virtual resolution ptr->yres = 768; // visible resolution ptr->yres_virtual = 768; // virtual resolution // standard PC framebuffer use a 32 bit 8/8/8 framebuffer ptr->bits_per_pixel = 24; ptr->red.offset = 0; ptr->red.length = 8; ptr->green.offset = 16; ptr->green.length = 8; ptr->blue.offset = 0; ptr->blue.length = 6; ptr->transp.offset = 0; ptr->transp.length = 0; #else ptr->xres = 800; // visible resolution ptr->xres_virtual = 1600; // virtual resolution ptr->yres = 480; // visible resolution ptr->yres_virtual = 480; // virtual resolution // Most modile devices use a 16bit 5/6/5 framebuffer ptr->bits_per_pixel = 16; ptr->red.length = 5; ptr->red.offset = 11; ptr->green.length = 6; ptr->green.offset = 5; ptr->blue.length = 5; ptr->blue.offset = 0; ptr->transp.offset = 0; ptr->transp.length = 0; #endif // 8bit framebuffer // ptr->bits_per_pixel = 8; // ptr->red.length = 8; // ptr->red.offset = 0; // ptr->green.length = 8; // ptr->green.offset = 0; // ptr->blue.length = 8; // ptr->blue.offset = 0; // ptr->transp.offset = 0; // ptr->transp.length = 0; ptr->grayscale = 1; // != 0 Graylevels instead of color break; } case FBIOGET_FSCREENINFO: { struct fb_fix_screeninfo *ptr = reinterpret_cast<struct fb_fix_screeninfo *>(data); #if 1 // Most mobile devices use a 16bit 5/6/5 framebuffer ptr->smem_len = 33554432; // size of frame buffer memory ptr->type = 0; // see FB_TYPE_* ptr->visual = 2; // see FB_VISUAL_* ptr->xpanstep = 1; // zero if no hardware panning ptr->ypanstep = 1; // zero if no hardware panning ptr->ywrapstep = 0; // zero if no hardware panning ptr->line_length = 1600; // line length ptr->accel = FB_ACCEL_NONE; // Indicate to driver which specific // chip/card we have #else // Android and fbe use a 16bit 5/6/5 framebuffer ptr->smem_len = 307200; // Length of frame buffer mem ptr->type = FB_TYPE_PACKED_PIXELS; // see FB_TYPE_* ptr->visual = FB_VISUAL_PSEUDOCOLOR; // see FB_VISUAL_* ptr->xpanstep = 0; // zero if no hardware panning ptr->ypanstep = 0; // zero if no hardware panning ptr->ywrapstep = 0; // zero if no hardware panning ptr->accel = FB_ACCEL_NONE; // Indicate to driver which specific // chip/card we have #endif break; } case FBIOPUTCMAP: { // Fbe uses this name for the fake framebuffer, so in this // case assume we're using fbe, so write to the known fbe // cmap file. std::string str = FAKEFB; if (str == "/tmp/fbe_buffer") { int fd = open("/tmp/fbe_cmap", O_WRONLY); if (fd) { write(fd, data, sizeof(struct fb_cmap)); close(fd); } else { log_error(_("Couldn't write to the fake cmap!")); return -1; } } else { log_error(_("Couldn't write to the fake cmap, unknown type!")); return -1; } // If we send a SIGUSR1 signal to fbe, it'll reload the // color map. int fd = open("/tmp/fbe.pid", O_RDONLY); char buf[10]; if (fd) { if (read(fd, buf, 10) == 0) { close(fd); return -1; } else { pid_t pid = strtol(buf, 0, NULL); kill(pid, SIGUSR1); log_debug(_("Signaled fbe to reload it's colormap.")); } close(fd); } break; } default: log_unimpl(_("fakefb_ioctl(%d)"), request); break; } return 0; }
std::auto_ptr<IOChannel> ConnectionHandler::getStream(const std::string&) { log_unimpl("%s doesn't support fetching streams", typeName(*this)); return std::auto_ptr<IOChannel>(0); }
SharedMem::~SharedMem() { log_unimpl(_("%s on Haiku"), __FUNCTION__); }
bool pointTest(const std::vector<Path>& paths, const std::vector<LineStyle>& lineStyles, boost::int32_t x, boost::int32_t y, const SWFMatrix& wm) { /* Principle: For the fill of the shape, we project a ray from the test point to the left side of the shape counting all crossings. When a line or curve segment is crossed we add 1 if the left fill style is set. Regardless of the left fill style we subtract 1 from the counter then the right fill style is set. This is true when the line goes in downward direction. If it goes upward, the fill styles are reversed. The final counter value reveals if the point is inside the shape (and depends on filling rule, see below). This method should not depend on subshapes and work for some malformed shapes situations: - wrong fill side (eg. left side set for a clockwise drawen rectangle) - intersecting paths */ point pt(x, y); // later we will need non-zero for glyphs... (TODO) bool even_odd = true; unsigned npaths = paths.size(); int counter = 0; // browse all paths for (unsigned pno=0; pno<npaths; pno++) { const Path& pth = paths[pno]; unsigned nedges = pth.m_edges.size(); float next_pen_x = pth.ap.x; float next_pen_y = pth.ap.y; float pen_x, pen_y; if (pth.empty()) continue; // If the path has a line style, check for strokes there if (pth.m_line != 0 ) { assert(lineStyles.size() >= pth.m_line); const LineStyle& ls = lineStyles[pth.m_line-1]; double thickness = ls.getThickness(); if (! thickness ) { thickness = 20; // at least ONE PIXEL thick. } else if ((!ls.scaleThicknessVertically()) && (!ls.scaleThicknessHorizontally()) ) { // TODO: pass the SWFMatrix to withinSquareDistance instead ? double xScale = wm.get_x_scale(); double yScale = wm.get_y_scale(); thickness *= std::max(xScale, yScale); } else if (ls.scaleThicknessVertically() != ls.scaleThicknessHorizontally()) { LOG_ONCE(log_unimpl(_("Collision detection for " "unidirectionally scaled strokes"))); } double dist = thickness / 2.0; double sqdist = dist * dist; if (pth.withinSquareDistance(pt, sqdist)) return true; } // browse all edges of the path for (unsigned eno=0; eno<nedges; eno++) { const Edge& edg = pth.m_edges[eno]; pen_x = next_pen_x; pen_y = next_pen_y; next_pen_x = edg.ap.x; next_pen_y = edg.ap.y; float cross1 = 0.0, cross2 = 0.0; int dir1 = 0, dir2 = 0; // +1 = downward, -1 = upward int crosscount = 0; if (edg.straight()) { // ignore horizontal lines // TODO: better check for small difference? if (edg.ap.y == pen_y) { continue; } // does this line cross the Y coordinate? if ( ((pen_y <= y) && (edg.ap.y >= y)) || ((pen_y >= y) && (edg.ap.y <= y)) ) { // calculate X crossing cross1 = pen_x + (edg.ap.x - pen_x) * (y - pen_y) / (edg.ap.y - pen_y); if (pen_y > edg.ap.y) dir1 = -1; // upward else dir1 = +1; // downward crosscount = 1; } else { // no crossing found crosscount = 0; } } else { // ==> curve case crosscount = curve_x_crossings<float>(pen_x, pen_y, edg.ap.x, edg.ap.y, edg.cp.x, edg.cp.y, y, cross1, cross2); dir1 = pen_y > y ? -1 : +1; dir2 = dir1 * (-1); // second crossing always in opposite dir. } // curve // ==> we have now: // - one (cross1) or two (cross1, cross2) ray crossings (X // coordinate) // - dir1/dir2 tells the direction of the crossing // (+1 = downward, -1 = upward) // - crosscount tells the number of crossings // need at least one crossing if (crosscount == 0) { continue; } // check first crossing if (cross1 <= x) { if (pth.m_fill0 > 0) counter += dir1; if (pth.m_fill1 > 0) counter -= dir1; } // check optional second crossing (only possible with curves) if ( (crosscount > 1) && (cross2 <= x) ) { if (pth.m_fill0 > 0) counter += dir2; if (pth.m_fill1 > 0) counter -= dir2; } }// for edge } // for path return ( (even_odd && (counter % 2) != 0) || (!even_odd && (counter != 0)) ); }
void LoadVariablesThread::completeLoad() { #ifdef DEBUG_LOAD_VARIABLES log_debug("completeLoad called"); #endif // TODO: how to set _bytesTotal ? // this is going to override any previous setting, // better do this inside a subclass (in a separate thread) _bytesLoaded = 0; _bytesTotal = _stream->size(); std::string toparse; const size_t chunkSize = 1024; boost::scoped_array<char> buf(new char[chunkSize]); unsigned int parsedLines = 0; // TODO: use read_string ? while ( size_t bytesRead = _stream->read(buf.get(), chunkSize) ) { #ifdef DEBUG_LOAD_VARIABLES log_debug("Read %u bytes", bytesRead); #endif if ( _bytesLoaded ) { std::string chunk(buf.get(), bytesRead); toparse += chunk; } else { size_t dataSize = bytesRead; utf8::TextEncoding encoding; char* ptr = utf8::stripBOM(buf.get(), dataSize, encoding); if ( encoding != utf8::encUTF8 && encoding != utf8::encUNSPECIFIED ) { log_unimpl("%s to utf8 conversion in " "MovieClip.loadVariables " "input parsing", utf8::textEncodingName(encoding)); } std::string chunk(ptr, dataSize); toparse += chunk; } #ifdef DEBUG_LOAD_VARIABLES log_debug("toparse: %s", toparse); #endif // parse remainder size_t lastamp = toparse.rfind('&'); if ( lastamp != std::string::npos ) { std::string parseable = toparse.substr(0, lastamp); #ifdef DEBUG_LOAD_VARIABLES log_debug("parseable: %s", parseable); #endif parse(parseable); toparse = toparse.substr(lastamp+1); #ifdef DEBUG_LOAD_VARIABLES log_debug("toparse nextline: %s", toparse); #endif ++parsedLines; } _bytesLoaded += bytesRead; // eof, get out ! if ( _stream->eof() ) break; if ( cancelRequested() ) { log_debug("Cancelling LoadVariables download thread..."); _stream.reset(); return; } } if ( ! toparse.empty() ) { parse(toparse); } try { _stream->go_to_end(); } catch (IOException& ex) { log_error("Stream couldn't seek to end: %s", ex.what()); } _bytesLoaded = _stream->tell(); if ( _bytesTotal != _bytesLoaded ) { log_error("Size of 'variables' stream advertised to be %d bytes," " but turned out to be %d bytes.", _bytesTotal, _bytesLoaded); _bytesTotal = _bytesLoaded; } _stream.reset(); // we don't need the IOChannel anymore //dispatchLoadEvent(); setCompleted(); }
void RTMP::handlePacket(const RTMPPacket& packet) { const PacketType t = packet.header.packetType; log_debug("Received %s", t); switch (t) { case PACKET_TYPE_CHUNK_SIZE: handleChangeChunkSize(*this, packet); break; case PACKET_TYPE_BYTES_READ: break; case PACKET_TYPE_CONTROL: handleControl(*this, packet); break; case PACKET_TYPE_SERVERBW: handleServerBW(*this, packet); break; case PACKET_TYPE_CLIENTBW: handleClientBW(*this, packet); break; case PACKET_TYPE_AUDIO: if (!m_mediaChannel) m_mediaChannel = packet.header.channel; break; case PACKET_TYPE_VIDEO: if (!m_mediaChannel) m_mediaChannel = packet.header.channel; break; case PACKET_TYPE_FLEX_STREAM_SEND: LOG_ONCE(log_unimpl(_("unsupported packet received"))); break; case PACKET_TYPE_FLEX_SHARED_OBJECT: LOG_ONCE(log_unimpl(_("unsupported packet received"))); break; case PACKET_TYPE_FLEX_MESSAGE: { LOG_ONCE(log_unimpl(_("partially supported packet %s received"))); _messageQueue.push_back(packet.buffer); break; } case PACKET_TYPE_METADATA: handleMetadata(*this, payloadData(packet), payloadSize(packet)); break; case PACKET_TYPE_SHARED_OBJECT: LOG_ONCE(log_unimpl(_("packet %s received"))); break; case PACKET_TYPE_INVOKE: _messageQueue.push_back(packet.buffer); break; case PACKET_TYPE_FLV: _flvQueue.push_back(packet.buffer); break; default: log_error(_("Unknown packet %s received"), t); } }
void MediaHandlerFfmpeg::cameraNames(std::vector<std::string>& /*names*/) const { log_unimpl("FFmpeg: camera names"); }
void MediaHandlerHaiku::cameraNames(std::vector<std::string>& /*names*/) const { QQ(2); log_unimpl("Haiku: camera names"); }
bool SharedMem::attach() { log_unimpl(_("%s on Haiku"), __FUNCTION__); return false; }
bool SharedMem::unlock() const { log_unimpl(_("%s on Haiku"), __FUNCTION__); return false; }
bool Gui::showMouse(bool /* show */) { LOG_ONCE(log_unimpl(_("Mouse show/hide not yet supported in this GUI"))); return true; }
void Gui::showMenu(bool /* show */) { LOG_ONCE(log_unimpl(_("Menu show/hide not yet supported in this GUI"))); }
void FBGui::showMenu(bool /*show*/) { log_unimpl(_("This GUI does not yet support menus")); }
//static void MediaParserGst::cb_pad_added(GstElement* /* element */, GstPad* new_pad, gpointer data) { MediaParserGst* parser = static_cast<MediaParserGst*>(data); GstCaps* caps = gst_pad_get_caps(new_pad); print_caps(caps); GstStructure* str = gst_caps_get_structure (caps, 0); if (!str) { log_error(_("MediaParserGst: couldn't get structure name.")); parser->link_to_fakesink(new_pad); return; } const gchar* caps_name = gst_structure_get_name (str); bool media_type_audio; if (std::equal(caps_name, caps_name+5, "audio")) { media_type_audio = true; } else if (std::equal(caps_name, caps_name+5, "video")) { media_type_audio = false; } else { log_error(_("MediaParserGst: ignoring stream of type %s."), caps_name); parser->link_to_fakesink(new_pad); return; } gboolean parsed = false; gboolean framed = false; gst_structure_get_boolean(str, "parsed", &parsed); gst_structure_get_boolean(str, "framed", &framed); bool already_parsed = parsed || framed; GstPad* final_pad = 0; if (already_parsed) { final_pad = new_pad; } else { // We'll try to find a parser, so that we will eventually receive // timestamped buffers, on which the MediaParser system relies. GstElementFactory* parserfactory = swfdec_gst_get_parser_factory (caps); if (!parserfactory) { log_error(_("MediaParserGst: Failed to find a parser (media: %s)."), caps_name); parser->link_to_fakesink(new_pad); return; } GstElement* parserel = gst_element_factory_create (parserfactory, NULL); gst_object_unref (parserfactory); if (!parserel) { log_error(_("MediaParserGst: Failed to find a parser. We'll continue, " "but either audio or video will not work!")); parser->link_to_fakesink(new_pad); return; } gboolean success = gst_bin_add(GST_BIN(parser->_bin), parserel); if (!success) { gst_object_unref(parserel); log_error(_("MediaParserGst: couldn't add parser.")); parser->link_to_fakesink(new_pad); return; } GstPad* sinkpad = gst_element_get_static_pad (parserel, "sink"); assert(sinkpad); GstPadLinkReturn ret = gst_pad_link(new_pad, sinkpad); gst_object_unref(GST_OBJECT(sinkpad)); if (!GST_PAD_LINK_SUCCESSFUL(ret)) { log_error(_("MediaParserGst: couldn't link parser.")); parser->link_to_fakesink(new_pad); return; } final_pad = gst_element_get_static_pad (parserel, "src"); } if (media_type_audio) { parser->_audiosink = swfdec_gst_connect_sinkpad_by_pad (final_pad, caps); if (!parser->_audiosink) { log_error(_("MediaParserGst: couldn't link \"fake\" sink.")); return; } gst_pad_set_chain_function(parser->_audiosink, MediaParserGst::cb_chain_func_audio); g_object_set_data(G_OBJECT (parser->_audiosink), "mediaparser-obj", parser); LOG_ONCE( log_unimpl("MediaParserGst won't set codec, sampleRate, " "sampleSize, stereo and duration in AudioInfo"); ); AudioInfo* audioinfo = new AudioInfo(0, 0, 0, false, 0, CODEC_TYPE_CUSTOM); audioinfo->extra.reset(new ExtraInfoGst(caps)); parser->_audioInfo.reset(audioinfo); log_debug(_("MediaParserGst: Linked audio source (type: %s)"), caps_name); } else {
/// Returns false if the data cannot be written to file. // /// If there is no data, the file is removed and the function returns true. bool SharedObject_as::flush(int space) const { /// This is called on on destruction of the SharedObject, or (allegedly) /// on a call to SharedObject.data, so _data is not guaranteed to exist. // /// The function should never be called from SharedObject.flush() when /// _data is 0. if (!_data) return false; if (space > 0) { log_unimpl("SharedObject.flush() called with a minimum disk space " "argument (%d), which is currently ignored", space); } const std::string& filespec = getFilespec(); if (!mkdirRecursive(filespec)) { log_error("Couldn't create dir for flushing SharedObject %s", filespec); return false; } #ifdef USE_SOL_READONLY log_debug(_("SharedObject %s not flushed (compiled as read-only mode)"), filespec); return false; #endif if (rcfile.getSOLReadOnly()) { log_security("Attempting to write object %s when it's SOL " "Read Only is set! Refusing...", filespec); return false; } // Open file std::ofstream ofs(filespec.c_str(), std::ios::binary); if (!ofs) { log_error("SharedObject::flush(): Failed opening file '%s' in " "binary mode", filespec.c_str()); return false; } // Encode data part. SimpleBuffer buf; if (!encodeData(_name, *_data, buf)) { std::remove(filespec.c_str()); return true; } // Encode header part. SimpleBuffer header; encodeHeader(buf.size(), header); // Write header ofs.write(reinterpret_cast<const char*>(header.data()), header.size()); if (!ofs) { log_error("Error writing SOL header"); return false; } // Write AMF data ofs.write(reinterpret_cast<const char*>(buf.data()), buf.size()); if (!ofs) { log_error("Error writing %d bytes to output file %s", buf.size(), filespec.c_str()); return false; } ofs.close(); log_security("SharedObject '%s' written to filesystem.", filespec); return true; }