Video::Video(as_object* object, const SWF::DefineVideoStreamTag* def, DisplayObject* parent) : DisplayObject(getRoot(*object), object, parent), m_def(def), _ns(nullptr), _embeddedStream(m_def), _lastDecodedVideoFrameNum(-1), _lastDecodedVideoFrame(), _smoothing(false) { assert(object); assert(def); media::MediaHandler* mh = getRunResources(*object).mediaHandler(); if (!mh) { LOG_ONCE(log_error(_("No Media handler registered, " "won't be able to decode embedded video")) ); return; } media::VideoInfo* info = m_def->getVideoInfo(); if (!info) return; try { _decoder = mh->createVideoDecoder(*info); } catch (const MediaException& e) { log_error(_("Could not create Video Decoder: %s"), e.what()); } }
/// A wrapper around a VaapiGlobalContext to ensure it's free'd on destruction. VaapiGlobalContext *VaapiGlobalContext::get() { LOG_ONCE(GNASH_REPORT_FUNCTION); static std::auto_ptr<VaapiGlobalContext> vaapi_global_context; if (!vaapi_global_context.get()) { std::auto_ptr<VaapiDisplay> dpy; /* XXX: this won't work with multiple renders built-in */ try { #if HAVE_VA_VA_GLX_H dpy.reset(new VaapiDisplayGLX()); #else dpy.reset(new VaapiDisplayX11()); #endif if (!dpy.get()) { return NULL; } vaapi_global_context.reset(new VaapiGlobalContext(dpy)); } catch (...) { vaapi_set_is_enabled(false); return NULL; } } return vaapi_global_context.get(); }
as_value filereference_ctor(const fn_call& fn) { if (fn.nargs) { std::stringstream ss; fn.dump_args(ss); LOG_ONCE( log_unimpl("FileReference(%s): %s", ss.str(), _("arguments discarded")) ); } return as_value(); }
bool pointTest(const std::vector<Path>& paths, const std::vector<LineStyle>& lineStyles, boost::int32_t x, boost::int32_t y, const SWFMatrix& wm) { /* Principle: For the fill of the shape, we project a ray from the test point to the left side of the shape counting all crossings. When a line or curve segment is crossed we add 1 if the left fill style is set. Regardless of the left fill style we subtract 1 from the counter then the right fill style is set. This is true when the line goes in downward direction. If it goes upward, the fill styles are reversed. The final counter value reveals if the point is inside the shape (and depends on filling rule, see below). This method should not depend on subshapes and work for some malformed shapes situations: - wrong fill side (eg. left side set for a clockwise drawen rectangle) - intersecting paths */ point pt(x, y); // later we will need non-zero for glyphs... (TODO) bool even_odd = true; unsigned npaths = paths.size(); int counter = 0; // browse all paths for (unsigned pno=0; pno<npaths; pno++) { const Path& pth = paths[pno]; unsigned nedges = pth.m_edges.size(); float next_pen_x = pth.ap.x; float next_pen_y = pth.ap.y; float pen_x, pen_y; if (pth.empty()) continue; // If the path has a line style, check for strokes there if (pth.m_line != 0 ) { assert(lineStyles.size() >= pth.m_line); const LineStyle& ls = lineStyles[pth.m_line-1]; double thickness = ls.getThickness(); if (! thickness ) { thickness = 20; // at least ONE PIXEL thick. } else if ((!ls.scaleThicknessVertically()) && (!ls.scaleThicknessHorizontally()) ) { // TODO: pass the SWFMatrix to withinSquareDistance instead ? double xScale = wm.get_x_scale(); double yScale = wm.get_y_scale(); thickness *= std::max(xScale, yScale); } else if (ls.scaleThicknessVertically() != ls.scaleThicknessHorizontally()) { LOG_ONCE(log_unimpl(_("Collision detection for " "unidirectionally scaled strokes"))); } double dist = thickness / 2.0; double sqdist = dist * dist; if (pth.withinSquareDistance(pt, sqdist)) return true; } // browse all edges of the path for (unsigned eno=0; eno<nedges; eno++) { const Edge& edg = pth.m_edges[eno]; pen_x = next_pen_x; pen_y = next_pen_y; next_pen_x = edg.ap.x; next_pen_y = edg.ap.y; float cross1 = 0.0, cross2 = 0.0; int dir1 = 0, dir2 = 0; // +1 = downward, -1 = upward int crosscount = 0; if (edg.straight()) { // ignore horizontal lines // TODO: better check for small difference? if (edg.ap.y == pen_y) { continue; } // does this line cross the Y coordinate? if ( ((pen_y <= y) && (edg.ap.y >= y)) || ((pen_y >= y) && (edg.ap.y <= y)) ) { // calculate X crossing cross1 = pen_x + (edg.ap.x - pen_x) * (y - pen_y) / (edg.ap.y - pen_y); if (pen_y > edg.ap.y) dir1 = -1; // upward else dir1 = +1; // downward crosscount = 1; } else { // no crossing found crosscount = 0; } } else { // ==> curve case crosscount = curve_x_crossings<float>(pen_x, pen_y, edg.ap.x, edg.ap.y, edg.cp.x, edg.cp.y, y, cross1, cross2); dir1 = pen_y > y ? -1 : +1; dir2 = dir1 * (-1); // second crossing always in opposite dir. } // curve // ==> we have now: // - one (cross1) or two (cross1, cross2) ray crossings (X // coordinate) // - dir1/dir2 tells the direction of the crossing // (+1 = downward, -1 = upward) // - crosscount tells the number of crossings // need at least one crossing if (crosscount == 0) { continue; } // check first crossing if (cross1 <= x) { if (pth.m_fill0 > 0) counter += dir1; if (pth.m_fill1 > 0) counter -= dir1; } // check optional second crossing (only possible with curves) if ( (crosscount > 1) && (cross2 <= x) ) { if (pth.m_fill0 > 0) counter += dir2; if (pth.m_fill1 > 0) counter -= dir2; } }// for edge } // for path return ( (even_odd && (counter % 2) != 0) || (!even_odd && (counter != 0)) ); }
void Gui::showMenu(bool /* show */) { LOG_ONCE(log_unimpl(_("Menu show/hide not yet supported in this GUI"))); }
bool Gui::showMouse(bool /* show */) { LOG_ONCE(log_unimpl(_("Mouse show/hide not yet supported in this GUI"))); return true; }
void Gui::hideMenu() { LOG_ONCE(log_unimpl(_("Menu show/hide not yet supported in this GUI"))); }
void RTMP::handlePacket(const RTMPPacket& packet) { const PacketType t = packet.header.packetType; log_debug("Received %s", t); switch (t) { case PACKET_TYPE_CHUNK_SIZE: handleChangeChunkSize(*this, packet); break; case PACKET_TYPE_BYTES_READ: break; case PACKET_TYPE_CONTROL: handleControl(*this, packet); break; case PACKET_TYPE_SERVERBW: handleServerBW(*this, packet); break; case PACKET_TYPE_CLIENTBW: handleClientBW(*this, packet); break; case PACKET_TYPE_AUDIO: if (!m_mediaChannel) m_mediaChannel = packet.header.channel; break; case PACKET_TYPE_VIDEO: if (!m_mediaChannel) m_mediaChannel = packet.header.channel; break; case PACKET_TYPE_FLEX_STREAM_SEND: LOG_ONCE(log_unimpl(_("unsupported packet received"))); break; case PACKET_TYPE_FLEX_SHARED_OBJECT: LOG_ONCE(log_unimpl(_("unsupported packet received"))); break; case PACKET_TYPE_FLEX_MESSAGE: { LOG_ONCE(log_unimpl(_("partially supported packet %s received"))); _messageQueue.push_back(packet.buffer); break; } case PACKET_TYPE_METADATA: handleMetadata(*this, payloadData(packet), payloadSize(packet)); break; case PACKET_TYPE_SHARED_OBJECT: LOG_ONCE(log_unimpl(_("packet %s received"))); break; case PACKET_TYPE_INVOKE: _messageQueue.push_back(packet.buffer); break; case PACKET_TYPE_FLV: _flvQueue.push_back(packet.buffer); break; default: log_error(_("Unknown packet %s received"), t); } }
image::GnashImage* Video::getVideoFrame() { // If this is a video from a NetStream_as object, retrieve a video // frame from there. if (_ns) { std::unique_ptr<image::GnashImage> tmp = _ns->get_video(); if (tmp.get()) _lastDecodedVideoFrame = std::move(tmp); } // If this is a video from a VideoFrame tag, retrieve a video frame // from there. else if (_embeddedStream) { // Don't try to do anything if there is no decoder. If it was // never constructed (most likely), we'll return nothing, // otherwise the last decoded frame. if (!_decoder.get()) { LOG_ONCE(log_error(_("No Video info in video definition"))); return _lastDecodedVideoFrame.get(); } const std::uint16_t current_frame = get_ratio(); #ifdef DEBUG_EMBEDDED_VIDEO_DECODING log_debug("Video instance %s need display video frame (ratio) %d", getTarget(), current_frame); #endif // If current frame is the same then last decoded // we don't need to decode more if (_lastDecodedVideoFrameNum >= 0 && _lastDecodedVideoFrameNum == current_frame) { #ifdef DEBUG_EMBEDDED_VIDEO_DECODING log_debug(" current frame == _lastDecodedVideoFrameNum (%d)", current_frame); #endif return _lastDecodedVideoFrame.get(); } // TODO: find a better way than using -1 to show that no // frames have been decoded yet. assert(_lastDecodedVideoFrameNum >= -1); std::uint16_t from_frame = _lastDecodedVideoFrameNum + 1; // If current frame is smaller then last decoded frame // we restart decoding from scratch if (current_frame < static_cast<size_t>(_lastDecodedVideoFrameNum)) { #ifdef DEBUG_EMBEDDED_VIDEO_DECODING log_debug(" current frame (%d) < _lastDecodedVideoFrameNum (%d)", current_frame, _lastDecodedVideoFrameNum); #endif from_frame = 0; } // Reset last decoded video frame number now, so it's correct // on early return (ie: nothing more to decode) _lastDecodedVideoFrameNum = current_frame; #ifdef DEBUG_EMBEDDED_VIDEO_DECODING log_debug(" decoding embedded frames from %d to %d " "for Video object %s", from_frame, current_frame, getTarget()); #endif const size_t frames = m_def->visitSlice( std::bind(std::mem_fn(&media::VideoDecoder::push), _decoder.get(), std::placeholders::_1), from_frame, current_frame); if (!frames) return _lastDecodedVideoFrame.get(); _lastDecodedVideoFrame = _decoder->pop(); } return _lastDecodedVideoFrame.get(); }
void Sound_as::setTransform() { LOG_ONCE(log_unimpl(__FUNCTION__)); }
void Sound_as::setPan() { LOG_ONCE(log_unimpl(__FUNCTION__)); }
static void VThreadBaseSimpleNoID(void) { VThreadID newID; Bool reused = FALSE; Bool result; void *newNative = VThreadBaseGetNative(); HashTable *ht = VThreadBaseGetNativeHash(); VThreadBaseData *base; /* Require key allocation before TLS read */ VThreadBaseGetKey(); /* Before allocating a new ID, try to reclaim any old IDs. */ for (newID = 0; newID < Atomic_Read(&vthreadBaseGlobals.dynamicID); newID++) { void *newKey = (void *)(uintptr_t)newID; /* * Windows: any entry that is found and not (alive or NULL) * is reclaimable. The check is slightly racy, but the race * would only cause missing a reclaim which isn't a problem. * Posix: thread exit is hooked (via TLS destructor) and sets * entries to NULL, so any entry that is NULL is reclaimable. */ #ifdef _WIN32 void *oldNative; reused = HashTable_Lookup(ht, newKey, &oldNative) && (oldNative == NULL || !VThreadBaseNativeIsAlive(oldNative)) && HashTable_ReplaceIfEqual(ht, newKey, oldNative, newNative); #else reused = HashTable_ReplaceIfEqual(ht, newKey, NULL, newNative); #endif if (reused) { break; } } if (!reused) { void *newKey; newID = Atomic_FetchAndInc(&vthreadBaseGlobals.dynamicID); /* * Detect VThreadID overflow (~0 is used as a sentinel). * Leave a space of ~10 IDs, since the increment and bounds-check * are not atomic. */ ASSERT_NOT_IMPLEMENTED(newID < VTHREAD_INVALID_ID - 10); newKey = (void *)(uintptr_t)newID; result = HashTable_Insert(ht, newKey, newNative); ASSERT_NOT_IMPLEMENTED(result); } /* ID picked. Now do the important stuff. */ base = Util_SafeCalloc(1, sizeof *base); base->id = newID; Str_Sprintf(base->name, sizeof base->name, "vthread-%u", newID); result = VThreadBase_InitWithTLS(base); ASSERT(result); if (vmx86_debug && reused) { Log("VThreadBase reused VThreadID %d.\n", newID); } if (Atomic_Read(&vthreadBaseGlobals.numThreads) > 1) { LOG_ONCE(("VThreadBase detected multiple threads.\n")); } }