From 6af08c309e695161c05b89179046a1f518e2e03f Mon Sep 17 00:00:00 2001 From: bossiel Date: Wed, 7 Sep 2011 18:30:46 +0000 Subject: [PATCH] - Add support for H.264 Full HD (1080p) using NVIDIA Cuda - Begin adding support for video jitter buffer (will be used to give feedbacks for packet loss-FEC-) - Move video flipping code to the converter (refactoring) - Fix issue 62, issue 41 and issue 66 - Fix issues (workaround) on VP8 (frame corruption) - Update contribution list --- .../bindings/_common/ProxyConsumer.cxx | 71 +- .../doubango/bindings/_common/ProxyConsumer.h | 3 + .../bindings/_common/ProxyProducer.cxx | 2 +- .../2.0/doubango/bindings/_common/SipStack.i | 28 +- .../bindings/csharp/ProxyVideoConsumer.cs | 10 + .../bindings/csharp/tdav_codec_id_t.cs | 7 +- .../bindings/csharp/tinyWRAPPINVOKE.cs | 6 + .../bindings/csharp/tinyWRAP_wrap.cxx | 28 + .../bindings/csharp/tmedia_chroma_t.cs | 21 +- .../bindings/java/ProxyVideoConsumer.java | 8 + .../java/android/ProxyVideoConsumer.java | 8 + .../java/android/tdav_codec_id_t.java | 7 +- .../bindings/java/android/tinyWRAPJNI.java | 2 + .../bindings/java/android/tinyWRAP_wrap.cxx | 36 + .../java/android/tmedia_chroma_t.java | 21 +- .../bindings/java/tdav_codec_id_t.java | 7 +- .../doubango/bindings/java/tinyWRAPJNI.java | 2 + .../doubango/bindings/java/tinyWRAP_wrap.cxx | 36 + .../bindings/java/tmedia_chroma_t.java | 21 +- .../2.0/doubango/bindings/perl/tinyWRAP.pm | 24 +- .../doubango/bindings/perl/tinyWRAP_wrap.cxx | 126 +- .../2.0/doubango/bindings/python/tinyWRAP.py | 24 +- .../bindings/python/tinyWRAP_wrap.cxx | 84 +- branches/2.0/doubango/contributors.txt | 4 + .../thirdparties/win32/include/cuda/types.h | 241 ++++ branches/2.0/doubango/tinyDAV/droid-makefile | 6 +- .../tinydav/audio/tdav_speex_jitterbuffer.h | 2 +- .../tinydav/codecs/h264/tdav_codec_h264.h | 45 +- .../codecs/h264/tdav_codec_h264_common.h | 160 +++ .../codecs/h264/tdav_codec_h264_cuda.h | 126 ++ .../tinydav/codecs/h264/tdav_codec_h264_rtp.h | 8 +- .../tinydav/codecs/vpx/tdav_codec_vp8.h | 1 + .../doubango/tinyDAV/include/tinydav/tdav.h | 7 +- .../tinydav/video/tdav_consumer_video.h | 71 ++ .../tinydav/video/tdav_converter_video.h | 12 +- .../tinydav/video/tdav_video_jitterbuffer.h | 77 ++ .../tinyDAV/src/audio/tdav_consumer_audio.c | 2 +- .../src/audio/tdav_speakup_jitterbuffer.c | 21 +- .../src/audio/tdav_speex_jitterbuffer.c | 10 +- .../tinyDAV/src/codecs/h261/tdav_codec_h261.c | 8 - .../tinyDAV/src/codecs/h263/tdav_codec_h263.c | 13 - .../tinyDAV/src/codecs/h264/tdav_codec_h264.c | 216 ++-- .../src/codecs/h264/tdav_codec_h264_cuda.cxx | 1129 +++++++++++++++++ .../src/codecs/h264/tdav_codec_h264_rtp.c | 27 +- .../src/codecs/mp4ves/tdav_codec_mp4ves.c | 10 +- .../src/codecs/theora/tdav_codec_theora.c | 9 +- .../tinyDAV/src/codecs/vpx/tdav_codec_vp8.c | 150 ++- branches/2.0/doubango/tinyDAV/src/tdav.c | 72 +- .../tinyDAV/src/video/tdav_consumer_video.c | 203 +++ .../tinyDAV/src/video/tdav_converter_video.c | 115 +- .../tinyDAV/src/video/tdav_session_video.c | 47 +- .../src/video/tdav_video_jitterbuffer.c | 296 +++++ .../2.0/doubango/tinyDAV/test/test_sessions.h | 2 +- branches/2.0/doubango/tinyDAV/tinyDAV.vcproj | 38 +- .../include/tinydshow/DSCaptureGraph.h | 1 - .../doubango/tinyDSHOW/src/DSCaptureGraph.cxx | 26 +- .../2.0/doubango/tinyDSHOW/src/DSGrabber.cxx | 18 +- .../doubango/tinyDSHOW/src/DSOutputStream.cxx | 4 +- .../tinyDSHOW/src/plugin/DSConsumer.cxx | 2 +- .../tinyDSHOW/src/plugin/DSProducer.cxx | 7 +- .../include/tinymedia/tmedia_codec.h | 15 +- .../include/tinymedia/tmedia_common.h | 21 +- .../include/tinymedia/tmedia_consumer.h | 8 +- .../include/tinymedia/tmedia_jitterbuffer.h | 19 +- .../include/tinymedia/tmedia_producer.h | 2 +- .../2.0/doubango/tinyMEDIA/src/tmedia_codec.c | 18 +- .../doubango/tinyMEDIA/src/tmedia_common.c | 20 +- .../doubango/tinyMEDIA/src/tmedia_consumer.c | 2 +- .../tinyMEDIA/src/tmedia_jitterbuffer.c | 126 +- .../2.0/doubango/tinyNET/src/tnet_socket.c | 2 +- .../tinyNET/src/tnet_transport_win32.c | 2 - .../2.0/doubango/tinyNET/src/tnet_utils.h | 15 +- .../2.0/doubango/tinyRTP/src/trtp_manager.c | 6 +- .../2.0/doubango/tinySAK/src/tsk_common.h | 7 +- .../2.0/doubango/tinySAK/src/tsk_semaphore.c | 11 +- .../2.0/doubango/tinySAK/src/tsk_semaphore.h | 1 + .../include/tinysip/dialogs/tsip_dialog.h | 5 +- .../tinySIP/src/dialogs/tsip_dialog.c | 6 +- .../tinySIP/src/dialogs/tsip_dialog_invite.c | 9 +- .../tinySIP/src/transports/tsip_transport.c | 3 +- 80 files changed, 3446 insertions(+), 620 deletions(-) create mode 100644 branches/2.0/doubango/thirdparties/win32/include/cuda/types.h create mode 100644 branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_common.h create mode 100644 branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_cuda.h create mode 100644 branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_consumer_video.h create mode 100644 branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_video_jitterbuffer.h create mode 100644 branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx create mode 100644 branches/2.0/doubango/tinyDAV/src/video/tdav_consumer_video.c create mode 100644 branches/2.0/doubango/tinyDAV/src/video/tdav_video_jitterbuffer.c diff --git a/branches/2.0/doubango/bindings/_common/ProxyConsumer.cxx b/branches/2.0/doubango/bindings/_common/ProxyConsumer.cxx index 6f9c493a..c64da7b1 100644 --- a/branches/2.0/doubango/bindings/_common/ProxyConsumer.cxx +++ b/branches/2.0/doubango/bindings/_common/ProxyConsumer.cxx @@ -35,6 +35,8 @@ #include "tsk_debug.h" #include "tinydav/audio/tdav_consumer_audio.h" +#include "tinydav/video/tdav_consumer_video.h" + /* ============ Audio Consumer Interface ================= */ @@ -267,7 +269,7 @@ bool ProxyAudioConsumer::setPullBuffer(const void* pPullBufferPtr, unsigned nPul unsigned ProxyAudioConsumer::pull(void* _pOutput/*=tsk_null*/, unsigned _nSize/*=0*/) { - if(m_pWrappedPlugin){ + if((m_pWrappedPlugin = (twrap_consumer_proxy_audio_t*)tsk_object_ref(m_pWrappedPlugin))){ void* pOutput; unsigned nSize; if(_pOutput && _nSize){ @@ -290,6 +292,8 @@ unsigned ProxyAudioConsumer::pull(void* _pOutput/*=tsk_null*/, unsigned _nSize/* } tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(m_pWrappedPlugin)); + + m_pWrappedPlugin = (twrap_consumer_proxy_audio_t*)tsk_object_unref(m_pWrappedPlugin); return nRetSize; } return 0; @@ -364,7 +368,7 @@ bool ProxyAudioConsumer::registerPlugin() typedef struct twrap_consumer_proxy_video_s { - TMEDIA_DECLARE_CONSUMER; + TDAV_DECLARE_CONSUMER_VIDEO; uint64_t id; tsk_bool_t started; @@ -386,7 +390,7 @@ int twrap_consumer_proxy_video_prepare(tmedia_consumer_t* self, const tmedia_cod if((videoConsumer = manager->findVideoConsumer(TWRAP_CONSUMER_PROXY_VIDEO(self)->id)) && videoConsumer->getCallback()){ self->video.fps = TMEDIA_CODEC_VIDEO(codec)->in.fps; // in - self->video.in.chroma = tmedia_yuv420p; + self->video.in.chroma = tmedia_chroma_yuv420p; self->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width; self->video.in.height = TMEDIA_CODEC_VIDEO(codec)->in.height; // display (out) @@ -433,14 +437,19 @@ int twrap_consumer_proxy_video_consume(tmedia_consumer_t* self, const void* buff if((manager = ProxyPluginMgr::getInstance())){ const ProxyVideoConsumer* videoConsumer; if((videoConsumer = manager->findVideoConsumer(TWRAP_CONSUMER_PROXY_VIDEO(self)->id)) && videoConsumer->getCallback()){ - if(videoConsumer->hasConsumeBuffer()){ - unsigned nCopiedSize = videoConsumer->copyBuffer(buffer, size); - ret = videoConsumer->getCallback()->bufferCopied(nCopiedSize, size); + if(tdav_consumer_video_has_jb(TDAV_CONSUMER_VIDEO(self))){ + ret = tdav_consumer_video_put(TDAV_CONSUMER_VIDEO(self), buffer, size, proto_hdr); } else{ - ProxyVideoFrame* frame = new ProxyVideoFrame(buffer, size); - ret = videoConsumer->getCallback()->consume(frame); - delete frame, frame = tsk_null; + if(videoConsumer->hasConsumeBuffer()){ + unsigned nCopiedSize = videoConsumer->copyBuffer(buffer, size); + ret = videoConsumer->getCallback()->bufferCopied(nCopiedSize, size); + } + else{ + ProxyVideoFrame* frame = new ProxyVideoFrame(buffer, size); + ret = videoConsumer->getCallback()->consume(frame); + delete frame, frame = tsk_null; + } } } else{ @@ -490,7 +499,7 @@ static tsk_object_t* twrap_consumer_proxy_video_ctor(tsk_object_t * self, va_lis twrap_consumer_proxy_video_t *consumer = (twrap_consumer_proxy_video_t *)self; if(consumer){ /* init base */ - tmedia_consumer_init(TMEDIA_CONSUMER(consumer)); + tdav_consumer_video_init(TDAV_CONSUMER_VIDEO(consumer)); /* init self */ /* Add the plugin to the manager */ @@ -516,7 +525,7 @@ static tsk_object_t* twrap_consumer_proxy_video_dtor(tsk_object_t * self) } /* deinit base */ - tmedia_consumer_deinit(TMEDIA_CONSUMER(consumer)); + tdav_consumer_video_deinit(TDAV_CONSUMER_VIDEO(consumer)); /* deinit self */ /* Remove plugin from the manager */ @@ -558,7 +567,7 @@ TINYWRAP_GEXTERN const tmedia_consumer_plugin_def_t *twrap_consumer_proxy_video_ /* ============ ProxyVideoConsumer Class ================= */ -tmedia_chroma_t ProxyVideoConsumer::s_eDefaultChroma = tmedia_rgb565le; +tmedia_chroma_t ProxyVideoConsumer::s_eDefaultChroma = tmedia_chroma_rgb565le; bool ProxyVideoConsumer::s_bAutoResizeDisplay = false; ProxyVideoConsumer::ProxyVideoConsumer(tmedia_chroma_t eChroma, struct twrap_consumer_proxy_video_s* pConsumer) @@ -653,6 +662,44 @@ unsigned ProxyVideoConsumer::copyBuffer(const void* pBuffer, unsigned nSize)cons return nRetsize; } +unsigned ProxyVideoConsumer::pull(void* pOutput, unsigned nSize) +{ + if(pOutput && nSize && (m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_ref(m_pWrappedPlugin))){ + tsk_size_t nRetSize = 0; + + if(!tdav_consumer_video_has_jb(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin))){ + TSK_DEBUG_ERROR("This consumer doesn't hold any jitter buffer.\n\nTo pull a buffer you must register a callback ('class ProxyVideoConsumerCallback') and listen for either 'consume' or 'bufferCopied' functions"); + goto done; + } + + nRetSize = tdav_consumer_video_get(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin), pOutput, nSize); + + tdav_consumer_video_tick(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin)); + +done: + m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_unref(m_pWrappedPlugin); + return nRetSize; + } + return 0; +} + +bool ProxyVideoConsumer::reset() +{ + bool ret = false; + if((m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_ref(m_pWrappedPlugin))){ + if(tdav_consumer_video_has_jb(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin))){ + ret = (tdav_consumer_video_reset(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin)) == 0); + } + else{ + TSK_DEBUG_ERROR("This consumer doesn't hold any jitter buffer"); + } + m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_unref(m_pWrappedPlugin); + } + + TSK_DEBUG_ERROR("This consumer doesn't wrap any plugin"); + return ret; +} + bool ProxyVideoConsumer::registerPlugin() { /* HACK: Unregister all other video plugins */ diff --git a/branches/2.0/doubango/bindings/_common/ProxyConsumer.h b/branches/2.0/doubango/bindings/_common/ProxyConsumer.h index 86cccfd5..a9a601ab 100644 --- a/branches/2.0/doubango/bindings/_common/ProxyConsumer.h +++ b/branches/2.0/doubango/bindings/_common/ProxyConsumer.h @@ -105,6 +105,7 @@ public: virtual int prepare(int nWidth, int nHeight, int nFps) { return -1; } virtual int consume(const ProxyVideoFrame* frame) { return -1; } + // only called if a buffer is registered using setPullBuffer(). Otherwise, consume() will be called virtual int bufferCopied(unsigned nCopiedSize, unsigned nAvailableSize) { return -1; } virtual int start() { return -1; } virtual int pause() { return -1; } @@ -128,6 +129,8 @@ public: bool setAutoResizeDisplay(bool bAutoResizeDisplay); bool getAutoResizeDisplay()const; bool setConsumeBuffer(const void* pConsumeBufferPtr, unsigned nConsumeBufferSize); + unsigned pull(void* pOutput, unsigned nSize); + bool reset(); #if !defined(SWIG) bool hasConsumeBuffer()const { return m_ConsumeBuffer.pConsumeBufferPtr && m_ConsumeBuffer.nConsumeBufferSize; } diff --git a/branches/2.0/doubango/bindings/_common/ProxyProducer.cxx b/branches/2.0/doubango/bindings/_common/ProxyProducer.cxx index 3623fe34..aa472d95 100644 --- a/branches/2.0/doubango/bindings/_common/ProxyProducer.cxx +++ b/branches/2.0/doubango/bindings/_common/ProxyProducer.cxx @@ -426,7 +426,7 @@ TINYWRAP_GEXTERN const tmedia_producer_plugin_def_t *twrap_producer_proxy_video_ /* ============ ProxyVideoProducer Class ================= */ -tmedia_chroma_t ProxyVideoProducer::s_eDefaultChroma = tmedia_nv21; +tmedia_chroma_t ProxyVideoProducer::s_eDefaultChroma = tmedia_chroma_nv21; ProxyVideoProducer::ProxyVideoProducer(tmedia_chroma_t eChroma, struct twrap_producer_proxy_video_s* pProducer) :m_pCallback(tsk_null), m_eChroma(eChroma), m_nRotation(0), m_pWrappedPlugin(pProducer), ProxyPlugin(twrap_proxy_plugin_video_producer) diff --git a/branches/2.0/doubango/bindings/_common/SipStack.i b/branches/2.0/doubango/bindings/_common/SipStack.i index 8c40eb45..39e34bf6 100644 --- a/branches/2.0/doubango/bindings/_common/SipStack.i +++ b/branches/2.0/doubango/bindings/_common/SipStack.i @@ -178,16 +178,17 @@ tsip_invite_event_type_t; // used by tinyWRAP typedef enum tmedia_chroma_e { - tmedia_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7 - tmedia_bgr24, // used by windows consumer (DirectShow) - - tmedia_rgb32, // used by iOS4 consumer (iPhone and iPod touch) - tmedia_rgb565le, // (used by both android and wince consumers) - tmedia_rgb565be, - tmedia_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4) - tmedia_nv21, // Yuv420 SP (used by android producer) - tmedia_yuv422p, - tmedia_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G) - tmedia_yuv420p, // Default + tmedia_chroma_none=0, + tmedia_chroma_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7 + tmedia_chroma_bgr24, // used by windows consumer (DirectShow) - + tmedia_chroma_rgb32, // used by iOS4 consumer (iPhone and iPod touch) + tmedia_chroma_rgb565le, // (used by both android and wince consumers) + tmedia_chroma_rgb565be, + tmedia_chroma_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4) + tmedia_chroma_nv21, // Yuv420 SP (used by android producer) + tmedia_chroma_yuv422p, + tmedia_chroma_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G) + tmedia_chroma_yuv420p, // Default } tmedia_chroma_t; @@ -255,8 +256,9 @@ typedef enum tdav_codec_id_e tdav_codec_id_h264_bp10 = 0x00010000<<4, tdav_codec_id_h264_bp20 = 0x00010000<<5, tdav_codec_id_h264_bp30 = 0x00010000<<6, - tdav_codec_id_theora = 0x00010000<<7, - tdav_codec_id_mp4ves_es = 0x00010000<<8, - tdav_codec_id_vp8 = 0x00010000<<9, + tdav_codec_id_h264_svc = 0x00010000<<7, + tdav_codec_id_theora = 0x00010000<<8, + tdav_codec_id_mp4ves_es = 0x00010000<<9, + tdav_codec_id_vp8 = 0x00010000<<10, } tdav_codec_id_t; \ No newline at end of file diff --git a/branches/2.0/doubango/bindings/csharp/ProxyVideoConsumer.cs b/branches/2.0/doubango/bindings/csharp/ProxyVideoConsumer.cs index d5b6bc38..1863ee0d 100644 --- a/branches/2.0/doubango/bindings/csharp/ProxyVideoConsumer.cs +++ b/branches/2.0/doubango/bindings/csharp/ProxyVideoConsumer.cs @@ -72,6 +72,16 @@ public class ProxyVideoConsumer : ProxyPlugin { return ret; } + public uint pull(byte[] pOutput, uint nSize) { + uint ret = tinyWRAPPINVOKE.ProxyVideoConsumer_pull(swigCPtr, pOutput, nSize); + return ret; + } + + public bool reset() { + bool ret = tinyWRAPPINVOKE.ProxyVideoConsumer_reset(swigCPtr); + return ret; + } + public virtual ulong getMediaSessionId() { ulong ret = tinyWRAPPINVOKE.ProxyVideoConsumer_getMediaSessionId(swigCPtr); return ret; diff --git a/branches/2.0/doubango/bindings/csharp/tdav_codec_id_t.cs b/branches/2.0/doubango/bindings/csharp/tdav_codec_id_t.cs index 6da7b27f..5d68aaa2 100644 --- a/branches/2.0/doubango/bindings/csharp/tdav_codec_id_t.cs +++ b/branches/2.0/doubango/bindings/csharp/tdav_codec_id_t.cs @@ -33,9 +33,10 @@ public enum tdav_codec_id_t { tdav_codec_id_h264_bp10 = 0x00010000 << 4, tdav_codec_id_h264_bp20 = 0x00010000 << 5, tdav_codec_id_h264_bp30 = 0x00010000 << 6, - tdav_codec_id_theora = 0x00010000 << 7, - tdav_codec_id_mp4ves_es = 0x00010000 << 8, - tdav_codec_id_vp8 = 0x00010000 << 9 + tdav_codec_id_h264_svc = 0x00010000 << 7, + tdav_codec_id_theora = 0x00010000 << 8, + tdav_codec_id_mp4ves_es = 0x00010000 << 9, + tdav_codec_id_vp8 = 0x00010000 << 10 } } diff --git a/branches/2.0/doubango/bindings/csharp/tinyWRAPPINVOKE.cs b/branches/2.0/doubango/bindings/csharp/tinyWRAPPINVOKE.cs index 722c5e25..f2fd4055 100644 --- a/branches/2.0/doubango/bindings/csharp/tinyWRAPPINVOKE.cs +++ b/branches/2.0/doubango/bindings/csharp/tinyWRAPPINVOKE.cs @@ -1028,6 +1028,12 @@ class tinyWRAPPINVOKE { [DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_setConsumeBuffer")] public static extern bool ProxyVideoConsumer_setConsumeBuffer(HandleRef jarg1, byte[] jarg2, uint jarg3); + [DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_pull")] + public static extern uint ProxyVideoConsumer_pull(HandleRef jarg1, byte[] jarg2, uint jarg3); + + [DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_reset")] + public static extern bool ProxyVideoConsumer_reset(HandleRef jarg1); + [DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_getMediaSessionId")] public static extern ulong ProxyVideoConsumer_getMediaSessionId(HandleRef jarg1); diff --git a/branches/2.0/doubango/bindings/csharp/tinyWRAP_wrap.cxx b/branches/2.0/doubango/bindings/csharp/tinyWRAP_wrap.cxx index 2994f05f..ecbf1747 100644 --- a/branches/2.0/doubango/bindings/csharp/tinyWRAP_wrap.cxx +++ b/branches/2.0/doubango/bindings/csharp/tinyWRAP_wrap.cxx @@ -4668,6 +4668,34 @@ SWIGEXPORT unsigned int SWIGSTDCALL CSharp_ProxyVideoConsumer_setConsumeBuffer(v } +SWIGEXPORT unsigned int SWIGSTDCALL CSharp_ProxyVideoConsumer_pull(void * jarg1, void * jarg2, unsigned int jarg3) { + unsigned int jresult ; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *arg2 = (void *) 0 ; + unsigned int arg3 ; + unsigned int result; + + arg1 = (ProxyVideoConsumer *)jarg1; + arg2 = jarg2; + arg3 = (unsigned int)jarg3; + result = (unsigned int)(arg1)->pull(arg2,arg3); + jresult = result; + return jresult; +} + + +SWIGEXPORT unsigned int SWIGSTDCALL CSharp_ProxyVideoConsumer_reset(void * jarg1) { + unsigned int jresult ; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + bool result; + + arg1 = (ProxyVideoConsumer *)jarg1; + result = (bool)(arg1)->reset(); + jresult = result; + return jresult; +} + + SWIGEXPORT unsigned long long SWIGSTDCALL CSharp_ProxyVideoConsumer_getMediaSessionId(void * jarg1) { unsigned long long jresult ; ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; diff --git a/branches/2.0/doubango/bindings/csharp/tmedia_chroma_t.cs b/branches/2.0/doubango/bindings/csharp/tmedia_chroma_t.cs index ebec6bcb..f7427748 100644 --- a/branches/2.0/doubango/bindings/csharp/tmedia_chroma_t.cs +++ b/branches/2.0/doubango/bindings/csharp/tmedia_chroma_t.cs @@ -9,16 +9,17 @@ namespace org.doubango.tinyWRAP { public enum tmedia_chroma_t { - tmedia_rgb24, - tmedia_bgr24, - tmedia_rgb32, - tmedia_rgb565le, - tmedia_rgb565be, - tmedia_nv12, - tmedia_nv21, - tmedia_yuv422p, - tmedia_uyvy422, - tmedia_yuv420p + tmedia_chroma_none = 0, + tmedia_chroma_rgb24, + tmedia_chroma_bgr24, + tmedia_chroma_rgb32, + tmedia_chroma_rgb565le, + tmedia_chroma_rgb565be, + tmedia_chroma_nv12, + tmedia_chroma_nv21, + tmedia_chroma_yuv422p, + tmedia_chroma_uyvy422, + tmedia_chroma_yuv420p } } diff --git a/branches/2.0/doubango/bindings/java/ProxyVideoConsumer.java b/branches/2.0/doubango/bindings/java/ProxyVideoConsumer.java index a9e83bd7..8686d61e 100644 --- a/branches/2.0/doubango/bindings/java/ProxyVideoConsumer.java +++ b/branches/2.0/doubango/bindings/java/ProxyVideoConsumer.java @@ -61,6 +61,14 @@ public class ProxyVideoConsumer extends ProxyPlugin { return tinyWRAPJNI.ProxyVideoConsumer_setConsumeBuffer(swigCPtr, this, pConsumeBufferPtr, nConsumeBufferSize); } + public long pull(java.nio.ByteBuffer pOutput, long nSize) { + return tinyWRAPJNI.ProxyVideoConsumer_pull(swigCPtr, this, pOutput, nSize); + } + + public boolean reset() { + return tinyWRAPJNI.ProxyVideoConsumer_reset(swigCPtr, this); + } + public java.math.BigInteger getMediaSessionId() { return tinyWRAPJNI.ProxyVideoConsumer_getMediaSessionId(swigCPtr, this); } diff --git a/branches/2.0/doubango/bindings/java/android/ProxyVideoConsumer.java b/branches/2.0/doubango/bindings/java/android/ProxyVideoConsumer.java index a9e83bd7..8686d61e 100644 --- a/branches/2.0/doubango/bindings/java/android/ProxyVideoConsumer.java +++ b/branches/2.0/doubango/bindings/java/android/ProxyVideoConsumer.java @@ -61,6 +61,14 @@ public class ProxyVideoConsumer extends ProxyPlugin { return tinyWRAPJNI.ProxyVideoConsumer_setConsumeBuffer(swigCPtr, this, pConsumeBufferPtr, nConsumeBufferSize); } + public long pull(java.nio.ByteBuffer pOutput, long nSize) { + return tinyWRAPJNI.ProxyVideoConsumer_pull(swigCPtr, this, pOutput, nSize); + } + + public boolean reset() { + return tinyWRAPJNI.ProxyVideoConsumer_reset(swigCPtr, this); + } + public java.math.BigInteger getMediaSessionId() { return tinyWRAPJNI.ProxyVideoConsumer_getMediaSessionId(swigCPtr, this); } diff --git a/branches/2.0/doubango/bindings/java/android/tdav_codec_id_t.java b/branches/2.0/doubango/bindings/java/android/tdav_codec_id_t.java index b6309b2c..3cb12c5b 100644 --- a/branches/2.0/doubango/bindings/java/android/tdav_codec_id_t.java +++ b/branches/2.0/doubango/bindings/java/android/tdav_codec_id_t.java @@ -33,9 +33,10 @@ public enum tdav_codec_id_t { tdav_codec_id_h264_bp10(0x00010000 << 4), tdav_codec_id_h264_bp20(0x00010000 << 5), tdav_codec_id_h264_bp30(0x00010000 << 6), - tdav_codec_id_theora(0x00010000 << 7), - tdav_codec_id_mp4ves_es(0x00010000 << 8), - tdav_codec_id_vp8(0x00010000 << 9); + tdav_codec_id_h264_svc(0x00010000 << 7), + tdav_codec_id_theora(0x00010000 << 8), + tdav_codec_id_mp4ves_es(0x00010000 << 9), + tdav_codec_id_vp8(0x00010000 << 10); public final int swigValue() { return swigValue; diff --git a/branches/2.0/doubango/bindings/java/android/tinyWRAPJNI.java b/branches/2.0/doubango/bindings/java/android/tinyWRAPJNI.java index 3e65e908..ee864632 100644 --- a/branches/2.0/doubango/bindings/java/android/tinyWRAPJNI.java +++ b/branches/2.0/doubango/bindings/java/android/tinyWRAPJNI.java @@ -294,6 +294,8 @@ class tinyWRAPJNI { public final static native boolean ProxyVideoConsumer_setAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_, boolean jarg2); public final static native boolean ProxyVideoConsumer_getAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_); public final static native boolean ProxyVideoConsumer_setConsumeBuffer(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3); + public final static native long ProxyVideoConsumer_pull(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3); + public final static native boolean ProxyVideoConsumer_reset(long jarg1, ProxyVideoConsumer jarg1_); public final static native java.math.BigInteger ProxyVideoConsumer_getMediaSessionId(long jarg1, ProxyVideoConsumer jarg1_); public final static native boolean ProxyVideoConsumer_registerPlugin(); public final static native void ProxyVideoConsumer_setDefaultChroma(int jarg1); diff --git a/branches/2.0/doubango/bindings/java/android/tinyWRAP_wrap.cxx b/branches/2.0/doubango/bindings/java/android/tinyWRAP_wrap.cxx index 0b002cd9..446c67f9 100644 --- a/branches/2.0/doubango/bindings/java/android/tinyWRAP_wrap.cxx +++ b/branches/2.0/doubango/bindings/java/android/tinyWRAP_wrap.cxx @@ -6751,6 +6751,42 @@ SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoCon } +SWIGEXPORT jlong JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1pull(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2, jlong jarg3) { + jlong jresult = 0 ; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *arg2 = (void *) 0 ; + unsigned int arg3 ; + unsigned int result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(ProxyVideoConsumer **)&jarg1; + + arg2 = jenv->GetDirectBufferAddress(jarg2); + + arg3 = (unsigned int)jarg3; + result = (unsigned int)(arg1)->pull(arg2,arg3); + jresult = (jlong)result; + return jresult; +} + + +SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1reset(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jboolean jresult = 0 ; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + bool result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(ProxyVideoConsumer **)&jarg1; + result = (bool)(arg1)->reset(); + jresult = (jboolean)result; + return jresult; +} + + SWIGEXPORT jobject JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1getMediaSessionId(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jobject jresult = 0 ; ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; diff --git a/branches/2.0/doubango/bindings/java/android/tmedia_chroma_t.java b/branches/2.0/doubango/bindings/java/android/tmedia_chroma_t.java index 18fbb979..1ba112a0 100644 --- a/branches/2.0/doubango/bindings/java/android/tmedia_chroma_t.java +++ b/branches/2.0/doubango/bindings/java/android/tmedia_chroma_t.java @@ -9,16 +9,17 @@ package org.doubango.tinyWRAP; public enum tmedia_chroma_t { - tmedia_rgb24, - tmedia_bgr24, - tmedia_rgb32, - tmedia_rgb565le, - tmedia_rgb565be, - tmedia_nv12, - tmedia_nv21, - tmedia_yuv422p, - tmedia_uyvy422, - tmedia_yuv420p; + tmedia_chroma_none(0), + tmedia_chroma_rgb24, + tmedia_chroma_bgr24, + tmedia_chroma_rgb32, + tmedia_chroma_rgb565le, + tmedia_chroma_rgb565be, + tmedia_chroma_nv12, + tmedia_chroma_nv21, + tmedia_chroma_yuv422p, + tmedia_chroma_uyvy422, + tmedia_chroma_yuv420p; public final int swigValue() { return swigValue; diff --git a/branches/2.0/doubango/bindings/java/tdav_codec_id_t.java b/branches/2.0/doubango/bindings/java/tdav_codec_id_t.java index b6309b2c..3cb12c5b 100644 --- a/branches/2.0/doubango/bindings/java/tdav_codec_id_t.java +++ b/branches/2.0/doubango/bindings/java/tdav_codec_id_t.java @@ -33,9 +33,10 @@ public enum tdav_codec_id_t { tdav_codec_id_h264_bp10(0x00010000 << 4), tdav_codec_id_h264_bp20(0x00010000 << 5), tdav_codec_id_h264_bp30(0x00010000 << 6), - tdav_codec_id_theora(0x00010000 << 7), - tdav_codec_id_mp4ves_es(0x00010000 << 8), - tdav_codec_id_vp8(0x00010000 << 9); + tdav_codec_id_h264_svc(0x00010000 << 7), + tdav_codec_id_theora(0x00010000 << 8), + tdav_codec_id_mp4ves_es(0x00010000 << 9), + tdav_codec_id_vp8(0x00010000 << 10); public final int swigValue() { return swigValue; diff --git a/branches/2.0/doubango/bindings/java/tinyWRAPJNI.java b/branches/2.0/doubango/bindings/java/tinyWRAPJNI.java index 3e65e908..ee864632 100644 --- a/branches/2.0/doubango/bindings/java/tinyWRAPJNI.java +++ b/branches/2.0/doubango/bindings/java/tinyWRAPJNI.java @@ -294,6 +294,8 @@ class tinyWRAPJNI { public final static native boolean ProxyVideoConsumer_setAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_, boolean jarg2); public final static native boolean ProxyVideoConsumer_getAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_); public final static native boolean ProxyVideoConsumer_setConsumeBuffer(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3); + public final static native long ProxyVideoConsumer_pull(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3); + public final static native boolean ProxyVideoConsumer_reset(long jarg1, ProxyVideoConsumer jarg1_); public final static native java.math.BigInteger ProxyVideoConsumer_getMediaSessionId(long jarg1, ProxyVideoConsumer jarg1_); public final static native boolean ProxyVideoConsumer_registerPlugin(); public final static native void ProxyVideoConsumer_setDefaultChroma(int jarg1); diff --git a/branches/2.0/doubango/bindings/java/tinyWRAP_wrap.cxx b/branches/2.0/doubango/bindings/java/tinyWRAP_wrap.cxx index a274f10a..c83a38eb 100644 --- a/branches/2.0/doubango/bindings/java/tinyWRAP_wrap.cxx +++ b/branches/2.0/doubango/bindings/java/tinyWRAP_wrap.cxx @@ -6751,6 +6751,42 @@ SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoCon } +SWIGEXPORT jlong JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1pull(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2, jlong jarg3) { + jlong jresult = 0 ; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *arg2 = (void *) 0 ; + unsigned int arg3 ; + unsigned int result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(ProxyVideoConsumer **)&jarg1; + + arg2 = jenv->GetDirectBufferAddress(jarg2); + + arg3 = (unsigned int)jarg3; + result = (unsigned int)(arg1)->pull(arg2,arg3); + jresult = (jlong)result; + return jresult; +} + + +SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1reset(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jboolean jresult = 0 ; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + bool result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(ProxyVideoConsumer **)&jarg1; + result = (bool)(arg1)->reset(); + jresult = (jboolean)result; + return jresult; +} + + SWIGEXPORT jobject JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1getMediaSessionId(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jobject jresult = 0 ; ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; diff --git a/branches/2.0/doubango/bindings/java/tmedia_chroma_t.java b/branches/2.0/doubango/bindings/java/tmedia_chroma_t.java index 18fbb979..1ba112a0 100644 --- a/branches/2.0/doubango/bindings/java/tmedia_chroma_t.java +++ b/branches/2.0/doubango/bindings/java/tmedia_chroma_t.java @@ -9,16 +9,17 @@ package org.doubango.tinyWRAP; public enum tmedia_chroma_t { - tmedia_rgb24, - tmedia_bgr24, - tmedia_rgb32, - tmedia_rgb565le, - tmedia_rgb565be, - tmedia_nv12, - tmedia_nv21, - tmedia_yuv422p, - tmedia_uyvy422, - tmedia_yuv420p; + tmedia_chroma_none(0), + tmedia_chroma_rgb24, + tmedia_chroma_bgr24, + tmedia_chroma_rgb32, + tmedia_chroma_rgb565le, + tmedia_chroma_rgb565be, + tmedia_chroma_nv12, + tmedia_chroma_nv21, + tmedia_chroma_yuv422p, + tmedia_chroma_uyvy422, + tmedia_chroma_yuv420p; public final int swigValue() { return swigValue; diff --git a/branches/2.0/doubango/bindings/perl/tinyWRAP.pm b/branches/2.0/doubango/bindings/perl/tinyWRAP.pm index 78e19c01..f9a9525f 100644 --- a/branches/2.0/doubango/bindings/perl/tinyWRAP.pm +++ b/branches/2.0/doubango/bindings/perl/tinyWRAP.pm @@ -1368,6 +1368,8 @@ sub DESTROY { *setAutoResizeDisplay = *tinyWRAPc::ProxyVideoConsumer_setAutoResizeDisplay; *getAutoResizeDisplay = *tinyWRAPc::ProxyVideoConsumer_getAutoResizeDisplay; *setConsumeBuffer = *tinyWRAPc::ProxyVideoConsumer_setConsumeBuffer; +*pull = *tinyWRAPc::ProxyVideoConsumer_pull; +*reset = *tinyWRAPc::ProxyVideoConsumer_reset; *getMediaSessionId = *tinyWRAPc::ProxyVideoConsumer_getMediaSessionId; *registerPlugin = *tinyWRAPc::ProxyVideoConsumer_registerPlugin; *setDefaultChroma = *tinyWRAPc::ProxyVideoConsumer_setDefaultChroma; @@ -2262,16 +2264,17 @@ package tinyWRAP; *tsip_m_local_resume_nok = *tinyWRAPc::tsip_m_local_resume_nok; *tsip_m_remote_hold = *tinyWRAPc::tsip_m_remote_hold; *tsip_m_remote_resume = *tinyWRAPc::tsip_m_remote_resume; -*tmedia_rgb24 = *tinyWRAPc::tmedia_rgb24; -*tmedia_bgr24 = *tinyWRAPc::tmedia_bgr24; -*tmedia_rgb32 = *tinyWRAPc::tmedia_rgb32; -*tmedia_rgb565le = *tinyWRAPc::tmedia_rgb565le; -*tmedia_rgb565be = *tinyWRAPc::tmedia_rgb565be; -*tmedia_nv12 = *tinyWRAPc::tmedia_nv12; -*tmedia_nv21 = *tinyWRAPc::tmedia_nv21; -*tmedia_yuv422p = *tinyWRAPc::tmedia_yuv422p; -*tmedia_uyvy422 = *tinyWRAPc::tmedia_uyvy422; -*tmedia_yuv420p = *tinyWRAPc::tmedia_yuv420p; +*tmedia_chroma_none = *tinyWRAPc::tmedia_chroma_none; +*tmedia_chroma_rgb24 = *tinyWRAPc::tmedia_chroma_rgb24; +*tmedia_chroma_bgr24 = *tinyWRAPc::tmedia_chroma_bgr24; +*tmedia_chroma_rgb32 = *tinyWRAPc::tmedia_chroma_rgb32; +*tmedia_chroma_rgb565le = *tinyWRAPc::tmedia_chroma_rgb565le; +*tmedia_chroma_rgb565be = *tinyWRAPc::tmedia_chroma_rgb565be; +*tmedia_chroma_nv12 = *tinyWRAPc::tmedia_chroma_nv12; +*tmedia_chroma_nv21 = *tinyWRAPc::tmedia_chroma_nv21; +*tmedia_chroma_yuv422p = *tinyWRAPc::tmedia_chroma_yuv422p; +*tmedia_chroma_uyvy422 = *tinyWRAPc::tmedia_chroma_uyvy422; +*tmedia_chroma_yuv420p = *tinyWRAPc::tmedia_chroma_yuv420p; *tmedia_qos_stype_none = *tinyWRAPc::tmedia_qos_stype_none; *tmedia_qos_stype_segmented = *tinyWRAPc::tmedia_qos_stype_segmented; *tmedia_qos_stype_e2e = *tinyWRAPc::tmedia_qos_stype_e2e; @@ -2308,6 +2311,7 @@ package tinyWRAP; *tdav_codec_id_h264_bp10 = *tinyWRAPc::tdav_codec_id_h264_bp10; *tdav_codec_id_h264_bp20 = *tinyWRAPc::tdav_codec_id_h264_bp20; *tdav_codec_id_h264_bp30 = *tinyWRAPc::tdav_codec_id_h264_bp30; +*tdav_codec_id_h264_svc = *tinyWRAPc::tdav_codec_id_h264_svc; *tdav_codec_id_theora = *tinyWRAPc::tdav_codec_id_theora; *tdav_codec_id_mp4ves_es = *tinyWRAPc::tdav_codec_id_mp4ves_es; *tdav_codec_id_vp8 = *tinyWRAPc::tdav_codec_id_vp8; diff --git a/branches/2.0/doubango/bindings/perl/tinyWRAP_wrap.cxx b/branches/2.0/doubango/bindings/perl/tinyWRAP_wrap.cxx index 9c349aa2..8229107c 100644 --- a/branches/2.0/doubango/bindings/perl/tinyWRAP_wrap.cxx +++ b/branches/2.0/doubango/bindings/perl/tinyWRAP_wrap.cxx @@ -14029,6 +14029,80 @@ XS(_wrap_ProxyVideoConsumer_setConsumeBuffer) { } +XS(_wrap_ProxyVideoConsumer_pull) { + { + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *arg2 = (void *) 0 ; + unsigned int arg3 ; + void *argp1 = 0 ; + int res1 = 0 ; + int res2 ; + unsigned int val3 ; + int ecode3 = 0 ; + int argvi = 0; + unsigned int result; + dXSARGS; + + if ((items < 3) || (items > 3)) { + SWIG_croak("Usage: ProxyVideoConsumer_pull(self,pOutput,nSize);"); + } + res1 = SWIG_ConvertPtr(ST(0), &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_pull" "', argument " "1"" of type '" "ProxyVideoConsumer *""'"); + } + arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1); + res2 = SWIG_ConvertPtr(ST(1),SWIG_as_voidptrptr(&arg2), 0, 0); + if (!SWIG_IsOK(res2)) { + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "ProxyVideoConsumer_pull" "', argument " "2"" of type '" "void *""'"); + } + ecode3 = SWIG_AsVal_unsigned_SS_int SWIG_PERL_CALL_ARGS_2(ST(2), &val3); + if (!SWIG_IsOK(ecode3)) { + SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "ProxyVideoConsumer_pull" "', argument " "3"" of type '" "unsigned int""'"); + } + arg3 = static_cast< unsigned int >(val3); + result = (unsigned int)(arg1)->pull(arg2,arg3); + ST(argvi) = SWIG_From_unsigned_SS_int SWIG_PERL_CALL_ARGS_1(static_cast< unsigned int >(result)); argvi++ ; + + + + XSRETURN(argvi); + fail: + + + + SWIG_croak_null(); + } +} + + +XS(_wrap_ProxyVideoConsumer_reset) { + { + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + int argvi = 0; + bool result; + dXSARGS; + + if ((items < 1) || (items > 1)) { + SWIG_croak("Usage: ProxyVideoConsumer_reset(self);"); + } + res1 = SWIG_ConvertPtr(ST(0), &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_reset" "', argument " "1"" of type '" "ProxyVideoConsumer *""'"); + } + arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1); + result = (bool)(arg1)->reset(); + ST(argvi) = SWIG_From_bool SWIG_PERL_CALL_ARGS_1(static_cast< bool >(result)); argvi++ ; + + XSRETURN(argvi); + fail: + + SWIG_croak_null(); + } +} + + XS(_wrap_ProxyVideoConsumer_getMediaSessionId) { { ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; @@ -21676,6 +21750,8 @@ static swig_command_info swig_commands[] = { {"tinyWRAPc::ProxyVideoConsumer_setAutoResizeDisplay", _wrap_ProxyVideoConsumer_setAutoResizeDisplay}, {"tinyWRAPc::ProxyVideoConsumer_getAutoResizeDisplay", _wrap_ProxyVideoConsumer_getAutoResizeDisplay}, {"tinyWRAPc::ProxyVideoConsumer_setConsumeBuffer", _wrap_ProxyVideoConsumer_setConsumeBuffer}, +{"tinyWRAPc::ProxyVideoConsumer_pull", _wrap_ProxyVideoConsumer_pull}, +{"tinyWRAPc::ProxyVideoConsumer_reset", _wrap_ProxyVideoConsumer_reset}, {"tinyWRAPc::ProxyVideoConsumer_getMediaSessionId", _wrap_ProxyVideoConsumer_getMediaSessionId}, {"tinyWRAPc::ProxyVideoConsumer_registerPlugin", _wrap_ProxyVideoConsumer_registerPlugin}, {"tinyWRAPc::ProxyVideoConsumer_setDefaultChroma", _wrap_ProxyVideoConsumer_setDefaultChroma}, @@ -22515,53 +22591,58 @@ XS(SWIG_init) { SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb24", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb24))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_none", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_none))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_bgr24", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_bgr24))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb24", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb24))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb32", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb32))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_bgr24", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_bgr24))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb565le", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb565le))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb32", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb32))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb565be", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb565be))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb565le", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb565le))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_nv12", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_nv12))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb565be", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb565be))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_nv21", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_nv21))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_nv12", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_nv12))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_yuv422p", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_yuv422p))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_nv21", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_nv21))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_uyvy422", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_uyvy422))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_yuv422p", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_yuv422p))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { - SV *sv = get_sv((char*) SWIG_prefix "tmedia_yuv420p", TRUE | 0x2 | GV_ADDMULTI); - sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_yuv420p))); + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_uyvy422", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_uyvy422))); + SvREADONLY_on(sv); + } while(0) /*@SWIG@*/; + /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { + SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_yuv420p", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_yuv420p))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { @@ -22744,6 +22825,11 @@ XS(SWIG_init) { sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tdav_codec_id_h264_bp30))); SvREADONLY_on(sv); } while(0) /*@SWIG@*/; + /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { + SV *sv = get_sv((char*) SWIG_prefix "tdav_codec_id_h264_svc", TRUE | 0x2 | GV_ADDMULTI); + sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tdav_codec_id_h264_svc))); + SvREADONLY_on(sv); + } while(0) /*@SWIG@*/; /*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do { SV *sv = get_sv((char*) SWIG_prefix "tdav_codec_id_theora", TRUE | 0x2 | GV_ADDMULTI); sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tdav_codec_id_theora))); diff --git a/branches/2.0/doubango/bindings/python/tinyWRAP.py b/branches/2.0/doubango/bindings/python/tinyWRAP.py index 10d7010e..da633e19 100644 --- a/branches/2.0/doubango/bindings/python/tinyWRAP.py +++ b/branches/2.0/doubango/bindings/python/tinyWRAP.py @@ -899,6 +899,8 @@ class ProxyVideoConsumer(ProxyPlugin): def setAutoResizeDisplay(self, *args): return _tinyWRAP.ProxyVideoConsumer_setAutoResizeDisplay(self, *args) def getAutoResizeDisplay(self): return _tinyWRAP.ProxyVideoConsumer_getAutoResizeDisplay(self) def setConsumeBuffer(self, *args): return _tinyWRAP.ProxyVideoConsumer_setConsumeBuffer(self, *args) + def pull(self, *args): return _tinyWRAP.ProxyVideoConsumer_pull(self, *args) + def reset(self): return _tinyWRAP.ProxyVideoConsumer_reset(self) def getMediaSessionId(self): return _tinyWRAP.ProxyVideoConsumer_getMediaSessionId(self) __swig_getmethods__["registerPlugin"] = lambda x: _tinyWRAP.ProxyVideoConsumer_registerPlugin if _newclass:registerPlugin = staticmethod(_tinyWRAP.ProxyVideoConsumer_registerPlugin) @@ -1244,16 +1246,17 @@ tsip_m_local_resume_ok = _tinyWRAP.tsip_m_local_resume_ok tsip_m_local_resume_nok = _tinyWRAP.tsip_m_local_resume_nok tsip_m_remote_hold = _tinyWRAP.tsip_m_remote_hold tsip_m_remote_resume = _tinyWRAP.tsip_m_remote_resume -tmedia_rgb24 = _tinyWRAP.tmedia_rgb24 -tmedia_bgr24 = _tinyWRAP.tmedia_bgr24 -tmedia_rgb32 = _tinyWRAP.tmedia_rgb32 -tmedia_rgb565le = _tinyWRAP.tmedia_rgb565le -tmedia_rgb565be = _tinyWRAP.tmedia_rgb565be -tmedia_nv12 = _tinyWRAP.tmedia_nv12 -tmedia_nv21 = _tinyWRAP.tmedia_nv21 -tmedia_yuv422p = _tinyWRAP.tmedia_yuv422p -tmedia_uyvy422 = _tinyWRAP.tmedia_uyvy422 -tmedia_yuv420p = _tinyWRAP.tmedia_yuv420p +tmedia_chroma_none = _tinyWRAP.tmedia_chroma_none +tmedia_chroma_rgb24 = _tinyWRAP.tmedia_chroma_rgb24 +tmedia_chroma_bgr24 = _tinyWRAP.tmedia_chroma_bgr24 +tmedia_chroma_rgb32 = _tinyWRAP.tmedia_chroma_rgb32 +tmedia_chroma_rgb565le = _tinyWRAP.tmedia_chroma_rgb565le +tmedia_chroma_rgb565be = _tinyWRAP.tmedia_chroma_rgb565be +tmedia_chroma_nv12 = _tinyWRAP.tmedia_chroma_nv12 +tmedia_chroma_nv21 = _tinyWRAP.tmedia_chroma_nv21 +tmedia_chroma_yuv422p = _tinyWRAP.tmedia_chroma_yuv422p +tmedia_chroma_uyvy422 = _tinyWRAP.tmedia_chroma_uyvy422 +tmedia_chroma_yuv420p = _tinyWRAP.tmedia_chroma_yuv420p tmedia_qos_stype_none = _tinyWRAP.tmedia_qos_stype_none tmedia_qos_stype_segmented = _tinyWRAP.tmedia_qos_stype_segmented tmedia_qos_stype_e2e = _tinyWRAP.tmedia_qos_stype_e2e @@ -1290,6 +1293,7 @@ tdav_codec_id_h263pp = _tinyWRAP.tdav_codec_id_h263pp tdav_codec_id_h264_bp10 = _tinyWRAP.tdav_codec_id_h264_bp10 tdav_codec_id_h264_bp20 = _tinyWRAP.tdav_codec_id_h264_bp20 tdav_codec_id_h264_bp30 = _tinyWRAP.tdav_codec_id_h264_bp30 +tdav_codec_id_h264_svc = _tinyWRAP.tdav_codec_id_h264_svc tdav_codec_id_theora = _tinyWRAP.tdav_codec_id_theora tdav_codec_id_mp4ves_es = _tinyWRAP.tdav_codec_id_mp4ves_es tdav_codec_id_vp8 = _tinyWRAP.tdav_codec_id_vp8 diff --git a/branches/2.0/doubango/bindings/python/tinyWRAP_wrap.cxx b/branches/2.0/doubango/bindings/python/tinyWRAP_wrap.cxx index a684d881..3c90232e 100644 --- a/branches/2.0/doubango/bindings/python/tinyWRAP_wrap.cxx +++ b/branches/2.0/doubango/bindings/python/tinyWRAP_wrap.cxx @@ -14567,6 +14567,66 @@ fail: } +SWIGINTERN PyObject *_wrap_ProxyVideoConsumer_pull(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *arg2 = (void *) 0 ; + unsigned int arg3 ; + void *argp1 = 0 ; + int res1 = 0 ; + int res2 ; + unsigned int val3 ; + int ecode3 = 0 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + unsigned int result; + + if (!PyArg_ParseTuple(args,(char *)"OOO:ProxyVideoConsumer_pull",&obj0,&obj1,&obj2)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_pull" "', argument " "1"" of type '" "ProxyVideoConsumer *""'"); + } + arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1); + res2 = SWIG_ConvertPtr(obj1,SWIG_as_voidptrptr(&arg2), 0, 0); + if (!SWIG_IsOK(res2)) { + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "ProxyVideoConsumer_pull" "', argument " "2"" of type '" "void *""'"); + } + ecode3 = SWIG_AsVal_unsigned_SS_int(obj2, &val3); + if (!SWIG_IsOK(ecode3)) { + SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "ProxyVideoConsumer_pull" "', argument " "3"" of type '" "unsigned int""'"); + } + arg3 = static_cast< unsigned int >(val3); + result = (unsigned int)(arg1)->pull(arg2,arg3); + resultobj = SWIG_From_unsigned_SS_int(static_cast< unsigned int >(result)); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_ProxyVideoConsumer_reset(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + bool result; + + if (!PyArg_ParseTuple(args,(char *)"O:ProxyVideoConsumer_reset",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_reset" "', argument " "1"" of type '" "ProxyVideoConsumer *""'"); + } + arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1); + result = (bool)(arg1)->reset(); + resultobj = SWIG_From_bool(static_cast< bool >(result)); + return resultobj; +fail: + return NULL; +} + + SWIGINTERN PyObject *_wrap_ProxyVideoConsumer_getMediaSessionId(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ; @@ -21159,6 +21219,8 @@ static PyMethodDef SwigMethods[] = { { (char *)"ProxyVideoConsumer_setAutoResizeDisplay", _wrap_ProxyVideoConsumer_setAutoResizeDisplay, METH_VARARGS, NULL}, { (char *)"ProxyVideoConsumer_getAutoResizeDisplay", _wrap_ProxyVideoConsumer_getAutoResizeDisplay, METH_VARARGS, NULL}, { (char *)"ProxyVideoConsumer_setConsumeBuffer", _wrap_ProxyVideoConsumer_setConsumeBuffer, METH_VARARGS, NULL}, + { (char *)"ProxyVideoConsumer_pull", _wrap_ProxyVideoConsumer_pull, METH_VARARGS, NULL}, + { (char *)"ProxyVideoConsumer_reset", _wrap_ProxyVideoConsumer_reset, METH_VARARGS, NULL}, { (char *)"ProxyVideoConsumer_getMediaSessionId", _wrap_ProxyVideoConsumer_getMediaSessionId, METH_VARARGS, NULL}, { (char *)"ProxyVideoConsumer_registerPlugin", _wrap_ProxyVideoConsumer_registerPlugin, METH_VARARGS, NULL}, { (char *)"ProxyVideoConsumer_setDefaultChroma", _wrap_ProxyVideoConsumer_setDefaultChroma, METH_VARARGS, NULL}, @@ -22427,16 +22489,17 @@ SWIG_init(void) { SWIG_Python_SetConstant(d, "tsip_m_local_resume_nok",SWIG_From_int(static_cast< int >(tsip_m_local_resume_nok))); SWIG_Python_SetConstant(d, "tsip_m_remote_hold",SWIG_From_int(static_cast< int >(tsip_m_remote_hold))); SWIG_Python_SetConstant(d, "tsip_m_remote_resume",SWIG_From_int(static_cast< int >(tsip_m_remote_resume))); - SWIG_Python_SetConstant(d, "tmedia_rgb24",SWIG_From_int(static_cast< int >(tmedia_rgb24))); - SWIG_Python_SetConstant(d, "tmedia_bgr24",SWIG_From_int(static_cast< int >(tmedia_bgr24))); - SWIG_Python_SetConstant(d, "tmedia_rgb32",SWIG_From_int(static_cast< int >(tmedia_rgb32))); - SWIG_Python_SetConstant(d, "tmedia_rgb565le",SWIG_From_int(static_cast< int >(tmedia_rgb565le))); - SWIG_Python_SetConstant(d, "tmedia_rgb565be",SWIG_From_int(static_cast< int >(tmedia_rgb565be))); - SWIG_Python_SetConstant(d, "tmedia_nv12",SWIG_From_int(static_cast< int >(tmedia_nv12))); - SWIG_Python_SetConstant(d, "tmedia_nv21",SWIG_From_int(static_cast< int >(tmedia_nv21))); - SWIG_Python_SetConstant(d, "tmedia_yuv422p",SWIG_From_int(static_cast< int >(tmedia_yuv422p))); - SWIG_Python_SetConstant(d, "tmedia_uyvy422",SWIG_From_int(static_cast< int >(tmedia_uyvy422))); - SWIG_Python_SetConstant(d, "tmedia_yuv420p",SWIG_From_int(static_cast< int >(tmedia_yuv420p))); + SWIG_Python_SetConstant(d, "tmedia_chroma_none",SWIG_From_int(static_cast< int >(tmedia_chroma_none))); + SWIG_Python_SetConstant(d, "tmedia_chroma_rgb24",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb24))); + SWIG_Python_SetConstant(d, "tmedia_chroma_bgr24",SWIG_From_int(static_cast< int >(tmedia_chroma_bgr24))); + SWIG_Python_SetConstant(d, "tmedia_chroma_rgb32",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb32))); + SWIG_Python_SetConstant(d, "tmedia_chroma_rgb565le",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb565le))); + SWIG_Python_SetConstant(d, "tmedia_chroma_rgb565be",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb565be))); + SWIG_Python_SetConstant(d, "tmedia_chroma_nv12",SWIG_From_int(static_cast< int >(tmedia_chroma_nv12))); + SWIG_Python_SetConstant(d, "tmedia_chroma_nv21",SWIG_From_int(static_cast< int >(tmedia_chroma_nv21))); + SWIG_Python_SetConstant(d, "tmedia_chroma_yuv422p",SWIG_From_int(static_cast< int >(tmedia_chroma_yuv422p))); + SWIG_Python_SetConstant(d, "tmedia_chroma_uyvy422",SWIG_From_int(static_cast< int >(tmedia_chroma_uyvy422))); + SWIG_Python_SetConstant(d, "tmedia_chroma_yuv420p",SWIG_From_int(static_cast< int >(tmedia_chroma_yuv420p))); SWIG_Python_SetConstant(d, "tmedia_qos_stype_none",SWIG_From_int(static_cast< int >(tmedia_qos_stype_none))); SWIG_Python_SetConstant(d, "tmedia_qos_stype_segmented",SWIG_From_int(static_cast< int >(tmedia_qos_stype_segmented))); SWIG_Python_SetConstant(d, "tmedia_qos_stype_e2e",SWIG_From_int(static_cast< int >(tmedia_qos_stype_e2e))); @@ -22473,6 +22536,7 @@ SWIG_init(void) { SWIG_Python_SetConstant(d, "tdav_codec_id_h264_bp10",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_bp10))); SWIG_Python_SetConstant(d, "tdav_codec_id_h264_bp20",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_bp20))); SWIG_Python_SetConstant(d, "tdav_codec_id_h264_bp30",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_bp30))); + SWIG_Python_SetConstant(d, "tdav_codec_id_h264_svc",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_svc))); SWIG_Python_SetConstant(d, "tdav_codec_id_theora",SWIG_From_int(static_cast< int >(tdav_codec_id_theora))); SWIG_Python_SetConstant(d, "tdav_codec_id_mp4ves_es",SWIG_From_int(static_cast< int >(tdav_codec_id_mp4ves_es))); SWIG_Python_SetConstant(d, "tdav_codec_id_vp8",SWIG_From_int(static_cast< int >(tdav_codec_id_vp8))); diff --git a/branches/2.0/doubango/contributors.txt b/branches/2.0/doubango/contributors.txt index 1b53a452..f38b9716 100644 --- a/branches/2.0/doubango/contributors.txt +++ b/branches/2.0/doubango/contributors.txt @@ -1,6 +1,10 @@ - "Alberto Panizzo" - "Alex Vishnev" +- "Giacomo Vacca" - "Laurent Etiemble" - "Mamadou Diop" +- "Marco Zoncu" +- "Michael Siddi" +- "Paolo Baire" - "Philippe Verney" - "Rich Hovey" \ No newline at end of file diff --git a/branches/2.0/doubango/thirdparties/win32/include/cuda/types.h b/branches/2.0/doubango/thirdparties/win32/include/cuda/types.h new file mode 100644 index 00000000..a8fc11cd --- /dev/null +++ b/branches/2.0/doubango/thirdparties/win32/include/cuda/types.h @@ -0,0 +1,241 @@ +/* + * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. + * + * Please refer to the NVIDIA end user license agreement (EULA) associated + * with this source code for terms and conditions that govern your use of + * this software. Any use, reproduction, disclosure, or distribution of + * this software and related documentation outside the terms of the EULA + * is strictly prohibited. + * + */ + +#ifndef TYPES_H +#define TYPES_H + +#include "NVEncodeDataTypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct NVEncoderParams +{ + char configFile[256]; + char inputFile[256]; + char outputFile[256]; + int measure_psnr; + int measure_fps; + int force_device; + int iSurfaceFormat; + int iPictureType; + int nDeviceMemPitch; + + int iCodecType; // NVVE_CODEC_TYPE, + int GPU_count; // Choose the specific GPU count + int GPU_devID; // Choose the specific GPU device ID + int iUseDeviceMem; // CUDA with DEVICE_MEMORY_INPUT (for encoding) + int iForcedGPU; // NVVE_FORCE_GPU_SELECTION //F22 + int iOutputSize[2]; // NVVE_OUT_SIZE, + int iInputSize[2]; // NVVE_IN_SIZE, + float fAspectRatio; // + int iAspectRatio[3]; // NVVE_ASPECT_RATIO, + NVVE_FIELD_MODE Fieldmode; // NVVE_FIELD_ENC_MODE, + int iP_Interval; // NVVE_P_INTERVAL, + int iIDR_Period; // NVVE_IDR_PERIOD, + int iDynamicGOP; // NVVE_DYNAMIC_GOP, + NVVE_RateCtrlType RCType; // NVVE_RC_TYPE, + int iAvgBitrate; // NVVE_AVG_BITRATE, + int iPeakBitrate; // NVVE_PEAK_BITRATE, + int iQP_Level_Intra; // NVVE_QP_LEVEL_INTRA, + int iQP_Level_InterP; // NVVE_QP_LEVEL_INTER_P, + int iQP_Level_InterB; // NVVE_QP_LEVEL_INTER_B, + int iFrameRate[2]; // NVVE_FRAME_RATE, + int iDeblockMode; // NVVE_DEBLOCK_MODE, + int iProfileLevel; // NVVE_PROFILE_LEVEL, + int iForceIntra; // NVVE_FORCE_INTRA, + int iForceIDR; // NVVE_FORCE_IDR, + int iClearStat; // NVVE_CLEAR_STAT, + NVVE_DI_MODE DIMode; // NVVE_SET_DEINTERLACE, + NVVE_PRESETS_TARGET Presets; // NVVE_PRESETS, + int iDisableCabac; // NVVE_DISABLE_CABAC, + int iNaluFramingType; // NVVE_CONFIGURE_NALU_FRAMING_TYPE + int iDisableSPSPPS; // NVVE_DISABLE_SPS_PPS + NVVE_GPUOffloadLevel GPUOffloadLevel; // NVVE_GPU_OFFLOAD_LEVEL + NVVE_GPUOffloadLevel MaxOffloadLevel; // NVVE_GPU_OFFLOAD_LEVEL_MAX + int iSliceCnt; // NVVE_SLICE_COUNT //F19 + int iMultiGPU; // NVVE_MULTI_GPU //F21 + int iDeviceMemInput; // NVVE_DEVICE_MEMORY_INPUT //F23 + +// NVVE_STAT_NUM_CODED_FRAMES, +// NVVE_STAT_NUM_RECEIVED_FRAMES, +// NVVE_STAT_BITRATE, +// NVVE_STAT_NUM_BITS_GENERATED, +// NVVE_GET_PTS_DIFF_TIME, +// NVVE_GET_PTS_BASE_TIME, +// NVVE_GET_PTS_CODED_TIME, +// NVVE_GET_PTS_RECEIVED_TIME, +// NVVE_STAT_ELAPSED_TIME, +// NVVE_STAT_QBUF_FULLNESS, +// NVVE_STAT_PERF_FPS, +// NVVE_STAT_PERF_AVG_TIME, +}; + +typedef struct { + char *name; + char *yuv_type; + int bpp; +} _sYUVParams; + +static _sYUVParams sSurfaceFormat[] = +{ + { "UYVY", "4:2:2", 16 }, + { "YUY2", "4:2:2", 16 }, + { "YV12", "4:2:0", 12 }, + { "NV12", "4:2:0", 12 }, + { "IYUV", "4:2:0", 12 }, + { NULL , 0 } +}; + +typedef struct { + char *name; + int params; +} _sNVVEEncodeParams; + +static _sNVVEEncodeParams sNVVE_EncodeParams[] = +{ + { "UNDEFINED", 1 }, + { "NVVE_OUT_SIZE", 2 }, + { "NVVE_ASPECT_RATIO", 3 }, + { "NVVE_FIELD_ENC_MODE", 1 }, + { "NVVE_P_INTERVAL", 1 }, + { "NVVE_IDR_PERIOD", 1 }, + { "NVVE_DYNAMIC_GOP", 1 }, + { "NVVE_RC_TYPE", 1 }, + { "NVVE_AVG_BITRATE", 1 }, + { "NVVE_PEAK_BITRATE", 1 }, + { "NVVE_QP_LEVEL_INTRA", 1 }, + { "NVVE_QP_LEVEL_INTER_P", 1 }, + { "NVVE_QP_LEVEL_INTER_B", 1 }, + { "NVVE_FRAME_RATE", 2 }, + { "NVVE_DEBLOCK_MODE", 1 }, + { "NVVE_PROFILE_LEVEL", 1 }, + { "NVVE_FORCE_INTRA (DS)", 1 }, //DShow only + { "NVVE_FORCE_IDR (DS)", 1 }, //DShow only + { "NVVE_CLEAR_STAT (DS)", 1 }, //DShow only + { "NVVE_SET_DEINTERLACE", 1 }, + { "NVVE_PRESETS", 1 }, + { "NVVE_IN_SIZE", 2 }, + { "NVVE_STAT_NUM_CODED_FRAMES (DS)", 1 }, //DShow only + { "NVVE_STAT_NUM_RECEIVED_FRAMES (DS)", 1 }, //DShow only + { "NVVE_STAT_BITRATE (DS)", 1 }, //DShow only + { "NVVE_STAT_NUM_BITS_GENERATED (DS)", 1 }, //DShow only + { "NVVE_GET_PTS_DIFF_TIME (DS)", 1 }, //DShow only + { "NVVE_GET_PTS_BASE_TIME (DS)", 1 }, //DShow only + { "NVVE_GET_PTS_CODED_TIME (DS)", 1 }, //DShow only + { "NVVE_GET_PTS_RECEIVED_TIME (DS)", 1 }, //DShow only + { "NVVE_STAT_ELAPSED_TIME (DS)", 1 }, //DShow only + { "NVVE_STAT_QBUF_FULLNESS (DS)", 1 }, //DShow only + { "NVVE_STAT_PERF_FPS (DS)", 1 }, //DShow only + { "NVVE_STAT_PERF_AVG_TIME (DS)", 1 }, //DShow only + { "NVVE_DISABLE_CABAC", 1 }, + { "NVVE_CONFIGURE_NALU_FRAMING_TYPE", 1 }, + { "NVVE_DISABLE_SPS_PPS", 1 }, + { "NVVE_SLICE_COUNT", 1 }, + { "NVVE_GPU_OFFLOAD_LEVEL", 1 }, + { "NVVE_GPU_OFFLOAD_LEVEL_MAX", 1 }, + { "NVVE_MULTI_GPU", 1 }, + { "NVVE_GET_GPU_COUNT", 1 }, + { "NVVE_GET_GPU_ATTRIBUTES", 1 }, + { "NVVE_FORCE_GPU_SELECTION", 1 }, + { "NVVE_DEVICE_MEMORY_INPUT", 1 }, + { "NVVE_DEVICE_CTX_LOCK", 1 }, + { NULL, 0 } +}; + +static _sNVVEEncodeParams sProfileName[] = { + { "Baseline", 0x42 }, + { "Main" , 0x4d }, + { "High" , 0x64 }, + { NULL , 0 } +}; + +static _sNVVEEncodeParams sProfileLevel[] = { + { "1.0", 0x0a }, + { "1.1", 0x0b }, + { "1.2", 0x0c }, + { "1.3", 0x0d }, + { "2.0", 0x14 }, + { "2.1", 0x15 }, + { "2.2", 0x16 }, + { "3.0", 0x1e }, + { "3.1", 0x1f }, + { "3.2", 0x20 }, + { "4.0", 0x28 }, + { "4.1", 0x29 }, + { "4.2", 0x29 }, + { "5.0", 0x32 }, + { "5.1", 0x33 }, + { "Auto", 0xff }, + { NULL , 0 } +}; + +inline char * sProfileIDX2Char(_sNVVEEncodeParams *pProfile, int ID) +{ + int index; + for (index = 0; pProfile[index].name != NULL; index++) { + if (pProfile[index].params == ID) { + // found the profile return the string + return pProfile[index].name; + } + } + return NULL; +} + +static char *sVideoEncodePresets[] = { + "PSP ( 320x 240)", + "iPod/iPhone ( 320x 240)", + "AVCHD", + "BluRay", + "HDV_1440", + "ZuneHD", + "FlipCam", + NULL +}; + +static char *sGPUOffloadLevel[] = { + "CPU PEL processing", + "Motion Estimation", + "Full Encode", + NULL +}; + +static char *sPictureType[] = { + "Frame Mode", + "Field Mode (top first)", + "Field Mode (bottom first)", + "Field Mode (picaff) unsupported", + NULL +}; + +static char *sPictureStructure[] = { + "Unknown", + "Top Field", + "Bottom Field", + "Frame Picture", + NULL +}; + +// Rate Control Method (NVVE_RC_TYPE) +static char *sNVVE_RateCtrlType[] = { + "Rate Control CQP", + "Rate Control VBR", + "Rate Control CBR with QP", + "Rate Control VBR with Min QP", + NULL +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/branches/2.0/doubango/tinyDAV/droid-makefile b/branches/2.0/doubango/tinyDAV/droid-makefile index 84bbbd16..0c73ac6e 100644 --- a/branches/2.0/doubango/tinyDAV/droid-makefile +++ b/branches/2.0/doubango/tinyDAV/droid-makefile @@ -167,9 +167,11 @@ OBJS += src/audio/tdav_consumer_audio.o \ src/audio/tdav_webrtc_denoise.o ### video -OBJS += src/video/tdav_converter_video.o \ +OBJS += src/video/tdav_consumer_video.o \ + src/video/tdav_converter_video.o \ src/video/tdav_runnable_video.o \ - src/video/tdav_session_video.o + src/video/tdav_session_video.o \ + src/video/tdav_video_jitterbuffer.o ### msrp OBJS += src/msrp/tdav_consumer_msrp.o \ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/audio/tdav_speex_jitterbuffer.h b/branches/2.0/doubango/tinyDAV/include/tinydav/audio/tdav_speex_jitterbuffer.h index 1da6f9b9..adb6f023 100644 --- a/branches/2.0/doubango/tinyDAV/include/tinydav/audio/tdav_speex_jitterbuffer.h +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/audio/tdav_speex_jitterbuffer.h @@ -38,7 +38,7 @@ TDAV_BEGIN_DECLS -/** Speex JitterBufferr*/ +/** Speex JitterBuffer*/ typedef struct tdav_speex_jitterBuffer_s { TMEDIA_DECLARE_JITTER_BUFFER; diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264.h b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264.h index 9bec5acc..c76f5b10 100644 --- a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264.h +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264.h @@ -21,12 +21,11 @@ */ /**@file tdav_codec_h264.h - * @brief H.264 codec plugin + * @brief H.264 codec plugin using FFmpeg for decoding and x264 for encoding * RTP payloader/depayloader follows RFC 3984. * * @author Mamadou Diop * - */ #ifndef TINYDAV_CODEC_H264_H #define TINYDAV_CODEC_H264_H @@ -35,42 +34,15 @@ #if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264) -#include "tinymedia/tmedia_codec.h" +#include "tinydav/codecs/h264/tdav_codec_h264_common.h" #include TDAV_BEGIN_DECLS -// Because of FD, declare it here -typedef enum packetization_mode_e{ - Single_NAL_Unit_Mode = 0, /* Single NAL mode (Only nals from 1-23 are allowed) */ - Non_Interleaved_Mode = 1, /* Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed */ - Interleaved_Mode = 2 /* 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.*/ -} -packetization_mode_t; - -typedef enum tdav_codec_h264_profile_e -{ - tdav_codec_h264_bp99, - - tdav_codec_h264_bp10, - tdav_codec_h264_bp20, - tdav_codec_h264_bp30, -} -tdav_codec_h264_profile_t; - typedef struct tdav_codec_h264_s { - TMEDIA_DECLARE_CODEC_VIDEO; - - tdav_codec_h264_profile_t profile; - - packetization_mode_t pack_mode; - - struct{ - uint8_t* ptr; - tsk_size_t size; - } rtp; + TDAV_DECLARE_CODEC_H264_COMMON; // Encoder struct{ @@ -78,7 +50,7 @@ typedef struct tdav_codec_h264_s AVCodecContext* context; AVFrame* picture; void* buffer; - int frame_count; + int64_t frame_count; } encoder; // decoder @@ -89,6 +61,7 @@ typedef struct tdav_codec_h264_s void* accumulator; tsk_size_t accumulator_pos; + tsk_size_t accumulator_size; uint16_t last_seq; } decoder; } @@ -98,6 +71,14 @@ TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_bp10_plugin_def TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_bp20_plugin_def_t; TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_bp30_plugin_def_t; +static inline tsk_bool_t tdav_codec_h264_is_ffmpeg_plugin(const tmedia_codec_plugin_def_t *plugin) +{ + if(plugin && (plugin == tdav_codec_h264_bp10_plugin_def_t || plugin == tdav_codec_h264_bp20_plugin_def_t || plugin == tdav_codec_h264_bp30_plugin_def_t)){ + return tsk_true; + } + return tsk_false; +} + TDAV_END_DECLS #endif /* HAVE_FFMPEG */ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_common.h b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_common.h new file mode 100644 index 00000000..907c9fe5 --- /dev/null +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_common.h @@ -0,0 +1,160 @@ +/* +* Copyright (C) 2011 Doubango Telecom . +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ +#ifndef TINYDAV_CODEC_H264_COMMON_H +#define TINYDAV_CODEC_H264_COMMON_H + +#include "tinydav_config.h" +#include "tinydav/codecs/h264/tdav_codec_h264_rtp.h" + +#include "tinymedia/tmedia_codec.h" + +#include "tsk_memory.h" +#include "tsk_string.h" +#include "tsk_params.h" + +#include + + +TDAV_BEGIN_DECLS + +#if !defined(H264_MAX_BR) +# define H264_MAX_BR 452 +#endif +#if !defined(H264_MAX_MBPS) +# define H264_MAX_MBPS 11880 +#endif + +#if !defined(H264_PACKETIZATION_MODE) +# define H264_PACKETIZATION_MODE Non_Interleaved_Mode +#endif + +// Because of FD, declare it here +typedef enum packetization_mode_e{ + Single_NAL_Unit_Mode = 0, /* Single NAL mode (Only nals from 1-23 are allowed) */ + Non_Interleaved_Mode = 1, /* Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed */ + Interleaved_Mode = 2 /* 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.*/ +} +packetization_mode_t; + +typedef enum tdav_codec_h264_profile_e +{ + tdav_codec_h264_bp99, + + tdav_codec_h264_bp10, + tdav_codec_h264_bp20, + tdav_codec_h264_bp30, +} +tdav_codec_h264_profile_t; + +typedef struct tdav_codec_h264_common_s +{ + TMEDIA_DECLARE_CODEC_VIDEO; + + tdav_codec_h264_profile_t profile; + + packetization_mode_t pack_mode; + + struct{ + uint8_t* ptr; + tsk_size_t size; + } rtp; +} +tdav_codec_h264_common_t; +#define TDAV_CODEC_H264_COMMON(self) ((tdav_codec_h264_common_t*)(self)) +#define TDAV_DECLARE_CODEC_H264_COMMON tdav_codec_h264_common_t __video__ + +static int tdav_codec_h264_common_init(tdav_codec_h264_common_t * h264) +{ + if(h264){ + + } + return 0; +} + +static int tdav_codec_h264_common_deinit(tdav_codec_h264_common_t * h264) +{ + if(h264){ + tmedia_codec_video_deinit(TMEDIA_CODEC_VIDEO(h264)); + TSK_FREE(h264->rtp.ptr); + h264->rtp.size = 0; + } + return 0; +} + +static tdav_codec_h264_profile_t tdav_codec_h264_common_get_profile(const char* fmtp) +{ + tdav_codec_h264_profile_t profile = tdav_codec_h264_bp99; + tsk_size_t size = tsk_strlen(fmtp); + int start, end; + + if((start = tsk_strindexOf(fmtp, size, "profile-level-id")) !=-1){ + tsk_param_t* param; + if((end = tsk_strindexOf((fmtp+start), (size-start), ";")) == -1){ + end = size; + } + + if((param = tsk_params_parse_param((fmtp+start), (end-start)))){ + profile_idc_t p_idc; + level_idc_t l_idc; + if(param->value){ + tsk_strtrim_both(¶m->value); + } + + tdav_codec_h264_parse_profile(param->value, &p_idc, tsk_null, &l_idc); + + switch(p_idc){ + case profile_idc_baseline: + switch(l_idc){ + case level_idc_1_0: + case level_idc_1_b: + case level_idc_1_1: + case level_idc_1_2: + case level_idc_1_3: + profile = tdav_codec_h264_bp10; + break; + case level_idc_2_0: + case level_idc_2_1: + case level_idc_2_2: + profile = tdav_codec_h264_bp20; + break; + case level_idc_3_0: + profile = tdav_codec_h264_bp30; + break; + } + break; + case profile_idc_extended: + case profile_idc_main: + case profile_idc_high: + default: + /* Not supported */ + break; + } + + TSK_OBJECT_SAFE_FREE(param); + } + } + return profile; +} + +TDAV_END_DECLS + +#endif /* TINYDAV_CODEC_H264_COMMON_H */ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_cuda.h b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_cuda.h new file mode 100644 index 00000000..22800e0f --- /dev/null +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_cuda.h @@ -0,0 +1,126 @@ +/* +* Copyright (C) 2011 Doubango Telecom . +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ + +/**@file tdav_codec_h264_cuda.h + * @brief H.264 codec plugin using NVIDIA CUDA for encoding/decoding. + * Env: gpucomputingsdk_4.0.17_win_32, cudatoolkit_4.0.17_win_32 and 280.26-notebook-win7-winvista-32bit-international-whql. + * http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoDecoder_Library.pdf + * http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoEncoder_Library.pdf + * RTP payloader/depayloader follows RFC 3984. + * + * @author Mamadou Diop + * + */ +#ifndef TINYDAV_CODEC_H264_CUDA_H +#define TINYDAV_CODEC_H264_CUDA_H + +#include "tinydav_config.h" + +#if HAVE_CUDA + +#include "tinydav/codecs/h264/tdav_codec_h264_common.h" + +// I really don't want to use C++ code :( +#if !defined(__cplusplus) +typedef enum NVVE_FrameRate NVVE_FrameRate; +typedef enum NVVE_GPUOffloadLevel NVVE_GPUOffloadLevel; +typedef enum NVVE_ASPECT_RATIO_TYPE NVVE_ASPECT_RATIO_TYPE; +typedef enum NVVE_SurfaceFormat NVVE_SurfaceFormat; +typedef enum NVVE_PicStruct NVVE_PicStruct; +typedef enum NVVE_FIELD_MODE NVVE_FIELD_MODE; +typedef enum NVVE_RateCtrlType NVVE_RateCtrlType; +typedef enum NVVE_DI_MODE NVVE_DI_MODE; +typedef enum NVVE_PRESETS_TARGET NVVE_PRESETS_TARGET; +typedef enum NVVE_DI_MODE NVVE_DI_MODE; + +typedef struct NVEncoderParams NVEncoderParams; +#endif /* __cplusplus */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsk_mutex.h" + +TDAV_BEGIN_DECLS + +typedef struct tdav_codec_h264_cuda_s +{ + TDAV_DECLARE_CODEC_H264_COMMON; + + struct { + NVEncoder context; + NVEncoderParams ctx_params; + NVVE_CallbackParams clb_params; + void *buffer; + tsk_size_t buffer_size; + int64_t frame_count; + } encoder; + + struct { + tsk_mutex_handle_t *mutex; + CUvideodecoder context; + CUVIDDECODECREATEINFO info; + CUvideoparser cu_parser; + CUVIDPARSERPARAMS cu_paser_params; + CUdevice cu_device; + IDirect3D9 *dx_d3d; + IDirect3DDevice9 *dx_d3ddevice; + CUcontext cu_context; + void* accumulator; + tsk_size_t accumulator_pos; + tsk_size_t accumulator_size; + void *cu_buffer; + tsk_size_t cu_buffer_size; + tsk_size_t cu_buffer_pitch; + tsk_bool_t cu_buffer_avail; + uint16_t last_seq; + } decoder; +} +tdav_codec_h264_cuda_t; + +TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp10_plugin_def_t; +TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp20_plugin_def_t; +TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp30_plugin_def_t; + +tsk_bool_t tdav_codec_h264_cuda_is_supported(); +static inline tsk_bool_t tdav_codec_h264_is_cuda_plugin(const tmedia_codec_plugin_def_t *plugin) +{ + if(plugin && (plugin == tdav_codec_h264_cuda_bp10_plugin_def_t || plugin == tdav_codec_h264_cuda_bp20_plugin_def_t || plugin == tdav_codec_h264_cuda_bp30_plugin_def_t)){ + return tsk_true; + } + return tsk_false; +} + +TDAV_END_DECLS + +#endif /* HAVE_CUDA */ + +#endif /* TINYDAV_CODEC_H264_CUDA_H */ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_rtp.h b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_rtp.h index c79bc6d5..9257a891 100644 --- a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_rtp.h +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/h264/tdav_codec_h264_rtp.h @@ -32,8 +32,6 @@ #include "tinydav_config.h" -#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264) - #include "tsk_common.h" TDAV_BEGIN_DECLS @@ -46,7 +44,7 @@ TDAV_BEGIN_DECLS #define H264_START_CODE_PREFIX_SIZE 4 -struct tdav_codec_h264_s; +struct tdav_codec_h264_common_s; extern uint8_t H264_START_CODE_PREFIX[4]; @@ -114,10 +112,8 @@ nal_unit_type_t; int tdav_codec_h264_parse_profile(const char* profile_level_id, profile_idc_t *p_idc, profile_iop_t *p_iop, level_idc_t *l_idc); int tdav_codec_h264_get_pay(const void* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size, tsk_bool_t* append_scp); -void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *data, tsk_size_t size, tsk_bool_t marker); +void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_common_s *self, const void *data, tsk_size_t size, tsk_bool_t marker); TDAV_END_DECLS -#endif /* HAVE_FFMPEG */ - #endif /* TINYDAV_CODEC_H264_RTP_H */ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/vpx/tdav_codec_vp8.h b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/vpx/tdav_codec_vp8.h index 7fdb6fa5..95b2dda0 100644 --- a/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/vpx/tdav_codec_vp8.h +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/codecs/vpx/tdav_codec_vp8.h @@ -74,6 +74,7 @@ typedef struct tdav_codec_vp8_s unsigned last_PartID:4; unsigned last_S:1; unsigned last_N:1; + unsigned frame_corrupted; } decoder; } tdav_codec_vp8_t; diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/tdav.h b/branches/2.0/doubango/tinyDAV/include/tinydav/tdav.h index b5e81f6f..a0085475 100644 --- a/branches/2.0/doubango/tinyDAV/include/tinydav/tdav.h +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/tdav.h @@ -65,9 +65,10 @@ typedef enum tdav_codec_id_e tdav_codec_id_h264_bp10 = 0x00010000<<4, tdav_codec_id_h264_bp20 = 0x00010000<<5, tdav_codec_id_h264_bp30 = 0x00010000<<6, - tdav_codec_id_theora = 0x00010000<<7, - tdav_codec_id_mp4ves_es = 0x00010000<<8, - tdav_codec_id_vp8 = 0x00010000<<9, + tdav_codec_id_h264_svc = 0x00010000<<7, + tdav_codec_id_theora = 0x00010000<<8, + tdav_codec_id_mp4ves_es = 0x00010000<<9, + tdav_codec_id_vp8 = 0x00010000<<10, } tdav_codec_id_t; diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_consumer_video.h b/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_consumer_video.h new file mode 100644 index 00000000..ff3dc1ef --- /dev/null +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_consumer_video.h @@ -0,0 +1,71 @@ +/* +* Copyright (C) 2011 Doubango Telecom +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ + +/**@file tdav_consumer_video.h + * @brief Base class for all Video consumers. + * + * @author Mamadou Diop + */ +#ifndef TINYDAV_CONSUMER_VIDEO_H +#define TINYDAV_CONSUMER_VIDEO_H + +#include "tinydav_config.h" + +#include "tinymedia/tmedia_consumer.h" + +#include "tsk_safeobj.h" + +TDAV_BEGIN_DECLS + +#define TDAV_CONSUMER_VIDEO(self) ((tdav_consumer_video_t*)(self)) + +typedef struct tdav_consumer_video_s +{ + TMEDIA_DECLARE_CONSUMER; + + struct tmedia_jitterbuffer_s* jitterbuffer; + + TSK_DECLARE_SAFEOBJ; +} +tdav_consumer_video_t; + +TINYDAV_API int tdav_consumer_video_init(tdav_consumer_video_t* self); +TINYDAV_API int tdav_consumer_video_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2); +#define tdav_consumer_video_prepare(self, codec) tmedia_consumer_prepare(TDAV_CONSUMER_VIDEO(self), codec) +#define tdav_consumer_video_start(self) tmedia_consumer_start(TDAV_CONSUMER_VIDEO(self)) +#define tdav_consumer_video_consume(self, buffer, size) tmedia_consumer_consume(TDAV_CONSUMER_VIDEO(self), buffer, size) +#define tdav_consumer_video_pause(self) tmedia_consumer_pause(TDAV_CONSUMER_VIDEO(self)) +#define tdav_consumer_video_stop(self) tmedia_consumer_stop(TDAV_CONSUMER_VIDEO(self)) +#define tdav_consumer_video_has_jb(self) ((self) && (self)->jitterbuffer) +TINYDAV_API int tdav_consumer_video_set(tdav_consumer_video_t* self, const tmedia_param_t* param); +TINYDAV_API int tdav_consumer_video_put(tdav_consumer_video_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr); +TINYDAV_API tsk_size_t tdav_consumer_video_get(tdav_consumer_video_t* self, void* out_data, tsk_size_t out_size); +TINYDAV_API int tdav_consumer_video_tick(tdav_consumer_video_t* self); +TINYDAV_API int tdav_consumer_video_reset(tdav_consumer_video_t* self); +TINYDAV_API int tdav_consumer_video_deinit(tdav_consumer_video_t* self); + +#define TDAV_DECLARE_CONSUMER_VIDEO tdav_consumer_video_t __consumer_video__ + +TDAV_END_DECLS + + +#endif /* TINYDAV_CONSUMER_VIDEO_H */ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_converter_video.h b/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_converter_video.h index 7d844162..a1912d32 100644 --- a/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_converter_video.h +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_converter_video.h @@ -50,7 +50,8 @@ typedef struct tdav_converter_video_s #if HAVE_FFMPEG || HAVE_SWSSCALE struct SwsContext *context; - enum PixelFormat pixfmt; + enum PixelFormat srcFormat; + enum PixelFormat dstFormat; AVFrame* srcFrame; AVFrame* dstFrame; @@ -69,18 +70,19 @@ typedef struct tdav_converter_video_s tsk_size_t dstWidth; tsk_size_t dstHeight; - tsk_bool_t toYUV420; - + // one shot parameters int rotation; + tsk_bool_t flip; } tdav_converter_video_t; -tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t chroma, tsk_bool_t toYUV420); +tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tmedia_chroma_t srcChroma, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t dstChroma); tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void* buffer, void** output, tsk_size_t* output_max_size); -#define tdav_converter_video_init(self, _rotation/*...To be completed with other parameters*/) \ +#define tdav_converter_video_init(self, _rotation, _flip/*...To be completed with other parameters*/) \ if((self)){ \ (self)->rotation = (_rotation); \ + (self)->flip = (_flip); \ } #define tdav_converter_video_flip(frame, height) \ diff --git a/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_video_jitterbuffer.h b/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_video_jitterbuffer.h new file mode 100644 index 00000000..af722b67 --- /dev/null +++ b/branches/2.0/doubango/tinyDAV/include/tinydav/video/tdav_video_jitterbuffer.h @@ -0,0 +1,77 @@ +/* +* Copyright (C) 2011 Doubango Telecom +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ + +/**@file tdav_video_jitterbuffer.h + * @brief Video Jitter Buffer + * + * @author Mamadou Diop + */ +#ifndef TINYDAV_VIDEO_JITTERBUFFER_H +#define TINYDAV_VIDEO_JITTERBUFFER_H + +#include "tinydav_config.h" + +#include "tinymedia/tmedia_jitterbuffer.h" + +#include "tsk_buffer.h" +#include "tsk_timer.h" +#include "tsk_list.h" +#include "tsk_safeobj.h" + +TDAV_BEGIN_DECLS + +/** Video JitterBuffer packet */ +typedef struct tdav_video_jitterbuffer_packet_s +{ + TSK_DECLARE_OBJECT; + TSK_DECLARE_SAFEOBJ; + + tsk_bool_t taken; + tsk_buffer_t *data; + int64_t seq_num; +} +tdav_video_jitterbuffer_packet_t; +TINYDAV_GEXTERN const tsk_object_def_t *tdav_video_jitterbuffer_packet_def_t; +typedef tsk_list_t tdav_video_jitterbuffer_packets_L_t; + +/** Video JitterBuffer */ +typedef struct tdav_video_jitterbuffer_s +{ + TMEDIA_DECLARE_JITTER_BUFFER; + TSK_DECLARE_SAFEOBJ; + + uint32_t fps; + uint32_t frame_duration; + uint32_t frame_max_count; + uint32_t frame_curr_index; + uint32_t tail; // in milliseconds + + tdav_video_jitterbuffer_packets_L_t * packets; + tsk_timer_manager_handle_t *timer; +} +tdav_video_jitterbuffer_t; + +const tmedia_jitterbuffer_plugin_def_t *tdav_video_jitterbuffer_plugin_def_t; + +TDAV_END_DECLS + +#endif /* TINYDAV_VIDEO_JITTERBUFFER_H */ diff --git a/branches/2.0/doubango/tinyDAV/src/audio/tdav_consumer_audio.c b/branches/2.0/doubango/tinyDAV/src/audio/tdav_consumer_audio.c index dab1bd08..6d8c4fdd 100644 --- a/branches/2.0/doubango/tinyDAV/src/audio/tdav_consumer_audio.c +++ b/branches/2.0/doubango/tinyDAV/src/audio/tdav_consumer_audio.c @@ -74,7 +74,7 @@ int tdav_consumer_audio_init(tdav_consumer_audio_t* self) TMEDIA_CONSUMER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_consumer_gain(), TDAV_AUDIO_GAIN_MAX); /* self:jitterbuffer */ - if(!(self->jitterbuffer = tmedia_jitterbuffer_create())){ + if(!(self->jitterbuffer = tmedia_jitterbuffer_create(tmedia_audio))){ TSK_DEBUG_ERROR("Failed to create jitter buffer"); return -2; } diff --git a/branches/2.0/doubango/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c b/branches/2.0/doubango/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c index 38d37569..7167e614 100644 --- a/branches/2.0/doubango/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c +++ b/branches/2.0/doubango/tinyDAV/src/audio/tdav_speakup_jitterbuffer.c @@ -47,7 +47,13 @@ #define TDAV_SPEAKUP_10MS_FRAME_SIZE(self) (((self)->rate * TDAV_SPEAKUP_10MS)/1000) #define TDAV_SPEAKUP_PTIME_FRAME_SIZE(self) (((self)->rate * (self)->framesize)/1000) -int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate) +static int tdav_speakup_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param) +{ + TSK_DEBUG_ERROR("Not implemented"); + return -2; +} + +static int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate) { tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self; if(!jitterbuffer->jbuffer){ @@ -65,12 +71,12 @@ int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_d return 0; } -int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self) +static int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self) { return 0; } -int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr) +static int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr) { tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self; const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr; @@ -135,7 +141,7 @@ int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_s return 0; } -tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size) +static tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size) { tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self; int jret; @@ -186,7 +192,7 @@ tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_ return (_10ms_count * jitterbuffer->_10ms_size_bytes); } -int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self) +static int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self) { tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self; if(jitterbuffer->jbuffer){ @@ -199,7 +205,7 @@ int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self) } } -int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self) +static int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self) { tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self; if(jitterbuffer->jbuffer){ @@ -254,9 +260,10 @@ static const tsk_object_def_t tdav_speakup_jitterbuffer_def_s = static const tmedia_jitterbuffer_plugin_def_t tdav_speakup_jitterbuffer_plugin_def_s = { &tdav_speakup_jitterbuffer_def_s, - + tmedia_audio, "Audio/video JitterBuffer based on Speex", + tdav_speakup_jitterbuffer_set, tdav_speakup_jitterbuffer_open, tdav_speakup_jitterbuffer_tick, tdav_speakup_jitterbuffer_put, diff --git a/branches/2.0/doubango/tinyDAV/src/audio/tdav_speex_jitterbuffer.c b/branches/2.0/doubango/tinyDAV/src/audio/tdav_speex_jitterbuffer.c index 80ed8272..d89af8e8 100644 --- a/branches/2.0/doubango/tinyDAV/src/audio/tdav_speex_jitterbuffer.c +++ b/branches/2.0/doubango/tinyDAV/src/audio/tdav_speex_jitterbuffer.c @@ -34,6 +34,11 @@ #include "tsk_memory.h" #include "tsk_debug.h" +static int tdav_speex_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param) +{ + TSK_DEBUG_ERROR("Not implemented"); + return -2; +} static int tdav_speex_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate) { @@ -184,9 +189,10 @@ static const tsk_object_def_t tdav_speex_jitterbuffer_def_s = static const tmedia_jitterbuffer_plugin_def_t tdav_speex_jitterbuffer_plugin_def_s = { &tdav_speex_jitterbuffer_def_s, + tmedia_audio, + "Audio JitterBuffer based on Speex", - "Audio/video JitterBuffer based on Speex", - + tdav_speex_jitterbuffer_set, tdav_speex_jitterbuffer_open, tdav_speex_jitterbuffer_tick, tdav_speex_jitterbuffer_put, diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/h261/tdav_codec_h261.c b/branches/2.0/doubango/tinyDAV/src/codecs/h261/tdav_codec_h261.c index 639cffdf..8cea9159 100644 --- a/branches/2.0/doubango/tinyDAV/src/codecs/h261/tdav_codec_h261.c +++ b/branches/2.0/doubango/tinyDAV/src/codecs/h261/tdav_codec_h261.c @@ -223,11 +223,6 @@ tsk_size_t tdav_codec_h261_encode(tmedia_codec_t* self, const void* in_data, tsk TSK_DEBUG_ERROR("Invalid size"); return 0; } - - // Flip - if(self->video.flip.encoded){ - tdav_converter_video_flip(h261->encoder.picture, h261->encoder.context->height); - } // Encode data h261->encoder.picture->pts = AV_NOPTS_VALUE; @@ -331,9 +326,6 @@ tsk_size_t tdav_codec_h261_decode(tmedia_codec_t* self, const void* in_data, tsk retsize = xsize; TMEDIA_CODEC_VIDEO(h261)->in.width = h261->decoder.context->width; TMEDIA_CODEC_VIDEO(h261)->in.height = h261->decoder.context->height; - if(self->video.flip.decoded){ - tdav_converter_video_flip(h261->decoder.picture, h261->decoder.context->height); - } /* copy picture into a linear buffer */ avpicture_layout((AVPicture *)h261->decoder.picture, h261->decoder.context->pix_fmt, h261->decoder.context->width, h261->decoder.context->height, *out_data, retsize); diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/h263/tdav_codec_h263.c b/branches/2.0/doubango/tinyDAV/src/codecs/h263/tdav_codec_h263.c index 1e5479e4..0e05fefe 100644 --- a/branches/2.0/doubango/tinyDAV/src/codecs/h263/tdav_codec_h263.c +++ b/branches/2.0/doubango/tinyDAV/src/codecs/h263/tdav_codec_h263.c @@ -288,11 +288,6 @@ static tsk_size_t tdav_codec_h263_encode(tmedia_codec_t* self, const void* in_da TSK_DEBUG_ERROR("Invalid size"); return 0; } - - // Flip - if(self->video.flip.encoded){ - tdav_converter_video_flip(h263->encoder.picture, h263->encoder.context->height); - } h263->encoder.picture->pts = AV_NOPTS_VALUE; h263->encoder.picture->quality = h263->encoder.context->global_quality; @@ -442,10 +437,6 @@ static tsk_size_t tdav_codec_h263_decode(tmedia_codec_t* self, const void* in_da retsize = xsize; TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width; TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height; - // flip - if(self->video.flip.decoded){ - tdav_converter_video_flip(h263->decoder.picture, h263->decoder.context->height); - } /* copy picture into a linear buffer */ avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height, *out_data, retsize); @@ -755,10 +746,6 @@ static tsk_size_t tdav_codec_h263p_decode(tmedia_codec_t* self, const void* in_d retsize = xsize; TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width; TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height; - // flip - if(self->video.flip.decoded){ - tdav_converter_video_flip(h263->decoder.picture, h263->decoder.context->height); - } /* copy picture into a linear buffer */ avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height, *out_data, retsize); diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264.c b/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264.c index 8a838b90..5fe765af 100644 --- a/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264.c +++ b/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264.c @@ -21,7 +21,7 @@ */ /**@file tdav_codec_h264.c - * @brief H.264 codec plugin + * @brief H.264 codec plugin using FFmpeg for decoding and x264 for encoding * RTP payloader/depayloader follows RFC 3984 * * @author Mamadou Diop @@ -41,13 +41,8 @@ #include "tsk_memory.h" #include "tsk_debug.h" -#define H264_PACKETIZATION_MODE Non_Interleaved_Mode -#define H264_MAX_BR 452 -#define H264_MAX_MBPS 11880 - -int tdav_codec_h264_init(tdav_codec_h264_t* self, tdav_codec_h264_profile_t profile); -int tdav_codec_h264_deinit(tdav_codec_h264_t* self); -tdav_codec_h264_profile_t tdav_codec_h264_get_profile(const char* fmtp); +static int tdav_codec_h264_init(tdav_codec_h264_t* self, tdav_codec_h264_profile_t profile); +static int tdav_codec_h264_deinit(tdav_codec_h264_t* self); static void tdav_codec_h264_encap(const tdav_codec_h264_t* h264, const uint8_t* pdata, tsk_size_t size); @@ -55,7 +50,7 @@ static void tdav_codec_h264_encap(const tdav_codec_h264_t* h264, const uint8_t* #define tdav_codec_h264_fmtp_set tsk_null /* FIXME: should be removed from all plugins (useless) */ -int tdav_codec_h264_open(tmedia_codec_t* self) +static int tdav_codec_h264_open(tmedia_codec_t* self) { int ret; int size; @@ -109,7 +104,7 @@ int tdav_codec_h264_open(tmedia_codec_t* self) h264->encoder.context->b_frame_strategy = 1; h264->encoder.context->chromaoffset = 0; - switch(h264->profile){ + switch(TDAV_CODEC_H264_COMMON(h264)->profile){ case tdav_codec_h264_bp10: default: h264->encoder.context->profile = FF_PROFILE_H264_BASELINE; @@ -174,12 +169,6 @@ int tdav_codec_h264_open(tmedia_codec_t* self) } avcodec_get_frame_defaults(h264->decoder.picture); - size = avpicture_get_size(PIX_FMT_YUV420P, h264->decoder.context->width, h264->decoder.context->height); - if(!(h264->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){ - TSK_DEBUG_ERROR("Failed to allocate decoder buffer"); - return -2; - } - // Open decoder if((ret = avcodec_open(h264->decoder.context, h264->decoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(h264)->plugin->desc); @@ -189,7 +178,7 @@ int tdav_codec_h264_open(tmedia_codec_t* self) return 0; } -int tdav_codec_h264_close(tmedia_codec_t* self) +static int tdav_codec_h264_close(tmedia_codec_t* self) { tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self; @@ -224,15 +213,13 @@ int tdav_codec_h264_close(tmedia_codec_t* self) if(h264->decoder.picture){ av_free(h264->decoder.picture); } - if(h264->decoder.accumulator){ - TSK_FREE(h264->decoder.accumulator); - h264->decoder.accumulator_pos = 0; - } + TSK_FREE(h264->decoder.accumulator); + h264->decoder.accumulator_pos = 0; return 0; } -tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size) +static tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size) { int ret = 0; int size; @@ -256,11 +243,6 @@ tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk TSK_DEBUG_ERROR("Invalid size"); return 0; } - - // Flip - if(self->video.flip.encoded){ - tdav_converter_video_flip(h264->encoder.picture, h264->encoder.context->height); - } // send keyframe for: // - the first frame @@ -291,7 +273,7 @@ tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk return 0; } -tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr) +static tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr) { tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self; const trtp_rtp_header_t* rtp_hdr = proto_hdr; @@ -300,14 +282,16 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk tsk_size_t pay_size = 0; int ret; tsk_bool_t append_scp; - tsk_size_t xsize, retsize = 0; + tsk_size_t retsize = 0, size_to_copy = 0; + static tsk_size_t xmax_size = (1920 * 1080 * 3) >> 3; + static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX); int got_picture_ptr; if(!h264 || !in_data || !in_size || !out_data || !h264->decoder.context){ TSK_DEBUG_ERROR("Invalid parameter"); return 0; } - + //TSK_DEBUG_INFO("SeqNo=%hu", rtp_hdr->seq_num); /* Packet lost? */ @@ -341,22 +325,43 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk TSK_DEBUG_ERROR("Depayloader failed to get H.264 content"); return 0; } - xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height); + //append_scp = tsk_true; + size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0); - if((int)(h264->decoder.accumulator_pos + pay_size) <= xsize){ - if(append_scp){ - memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, sizeof(H264_START_CODE_PREFIX)); - h264->decoder.accumulator_pos += sizeof(H264_START_CODE_PREFIX); - } - - memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size); - h264->decoder.accumulator_pos += pay_size; - } - else{ - TSK_DEBUG_WARN("Buffer overflow"); - h264->decoder.accumulator_pos = 0; - return 0; - } + // start-accumulator + if(!h264->decoder.accumulator){ + if(size_to_copy > xmax_size){ + TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size); + return 0; + } + if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){ + TSK_DEBUG_ERROR("Failed to allocated new buffer"); + return 0; + } + h264->decoder.accumulator_size = size_to_copy; + } + if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){ + TSK_DEBUG_ERROR("BufferOverflow"); + h264->decoder.accumulator_pos = 0; + return 0; + } + if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){ + if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){ + TSK_DEBUG_ERROR("Failed to reallocated new buffer"); + h264->decoder.accumulator_pos = 0; + h264->decoder.accumulator_size = 0; + return 0; + } + h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy); + } + + if(append_scp){ + memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size); + h264->decoder.accumulator_pos += start_code_prefix_size; + } + memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size); + h264->decoder.accumulator_pos += pay_size; + // end-accumulator if(rtp_hdr->marker){ AVPacket packet; @@ -372,10 +377,10 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk TSK_DEBUG_ERROR("=============Failed to decode the buffer"); } else if(got_picture_ptr){ - if(self->video.flip.decoded){ - tdav_converter_video_flip(h264->decoder.picture, h264->decoder.context->height); - } + tsk_size_t xsize; + /* fill out */ + xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height); if(*out_max_sizeprofile)){ + if(((profile = tdav_codec_h264_common_get_profile(fmtp)) != tdav_codec_h264_bp99) && (profile != TDAV_CODEC_H264_COMMON(h264)->profile)){ TSK_DEBUG_INFO("Profile not matching"); return tsk_false; } @@ -437,7 +442,7 @@ tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* f /* === packetization-mode ===*/ if((val_int = tsk_params_get_param_value_as_int(params, "packetization-mode")) != -1){ if((packetization_mode_t)val_int == Single_NAL_Unit_Mode || (packetization_mode_t)val_int == Non_Interleaved_Mode){ - h264->pack_mode = (packetization_mode_t)val_int; + TDAV_CODEC_H264_COMMON(h264)->pack_mode = (packetization_mode_t)val_int; } else{ TSK_DEBUG_INFO("packetization-mode not matching"); @@ -475,10 +480,10 @@ tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* f // sx >>= 1; // sy >>= 1; //} - TMEDIA_CODEC_VIDEO(h264)->out.width = sx&(~1), TMEDIA_CODEC_VIDEO(h264)->in.height = sy&(~1); + TMEDIA_CODEC_VIDEO(h264)->out.width = sx&(~1), TMEDIA_CODEC_VIDEO(h264)->out.height = sy&(~1); } else{ - TMEDIA_CODEC_VIDEO(h264)->out.width = std_w, TMEDIA_CODEC_VIDEO(h264)->in.height = std_h; + TMEDIA_CODEC_VIDEO(h264)->out.width = std_w, TMEDIA_CODEC_VIDEO(h264)->out.height = std_h; } } } @@ -488,7 +493,7 @@ bail: return ret; } -char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self) +static char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self) { tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self; char* fmtp = tsk_null; @@ -498,7 +503,7 @@ char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self) return tsk_null; } - switch(h264->profile){ + switch(TDAV_CODEC_H264_COMMON(h264)->profile){ case tdav_codec_h264_bp10: fmtp = tsk_strdup("profile-level-id=42e00a"); break; @@ -512,7 +517,7 @@ char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self) if(fmtp){ tsk_strcat_2(&fmtp, "; packetization-mode=%d; max-br=%d; max-mbps=%d", - h264->pack_mode, TMEDIA_CODEC_VIDEO(h264)->in.max_br/1000, TMEDIA_CODEC_VIDEO(h264)->in.max_mbps/1000); + TDAV_CODEC_H264_COMMON(h264)->pack_mode, TMEDIA_CODEC_VIDEO(h264)->in.max_br/1000, TMEDIA_CODEC_VIDEO(h264)->in.max_mbps/1000); } return fmtp; @@ -540,7 +545,7 @@ static tsk_object_t* tdav_codec_h264_bp10_dtor(tsk_object_t * self) tdav_codec_h264_t *h264 = self; if(h264){ /* deinit base */ - tmedia_codec_video_deinit(self); + tdav_codec_h264_common_deinit(self); /* deinit self */ tdav_codec_h264_deinit(h264); @@ -604,7 +609,7 @@ static tsk_object_t* tdav_codec_h264_bp20_dtor(tsk_object_t * self) tdav_codec_h264_t *h264 = self; if(h264){ /* deinit base */ - tmedia_codec_video_deinit(self); + tdav_codec_h264_common_deinit(self); /* deinit self */ tdav_codec_h264_deinit(h264); @@ -668,7 +673,7 @@ static tsk_object_t* tdav_codec_h264_bp30_dtor(tsk_object_t * self) tdav_codec_h264_t *h264 = self; if(h264){ /* deinit base */ - tmedia_codec_video_deinit(self); + tdav_codec_h264_common_deinit(self); /* deinit self */ tdav_codec_h264_deinit(h264); @@ -757,9 +762,14 @@ int tdav_codec_h264_init(tdav_codec_h264_t* self, tdav_codec_h264_profile_t prof TSK_DEBUG_ERROR("Invalid parameter"); return -1; } + + if((ret = tdav_codec_h264_common_init(TDAV_CODEC_H264_COMMON(self)))){ + TSK_DEBUG_ERROR("tdav_codec_h264_common_init() faile with error code=%d", ret); + return ret; + } - self->pack_mode = H264_PACKETIZATION_MODE; - self->profile = profile; + TDAV_CODEC_H264_COMMON(self)->pack_mode = H264_PACKETIZATION_MODE; + TDAV_CODEC_H264_COMMON(self)->profile = profile; TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000; TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000; @@ -793,102 +803,34 @@ int tdav_codec_h264_deinit(tdav_codec_h264_t* self) self->decoder.codec = tsk_null; // FFMpeg resources are destroyed by close() - - - TSK_FREE(self->rtp.ptr); - self->rtp.size = 0; return 0; } -tdav_codec_h264_profile_t tdav_codec_h264_get_profile(const char* fmtp) -{ - tdav_codec_h264_profile_t profile = tdav_codec_h264_bp99; - tsk_size_t size = tsk_strlen(fmtp); - int start, end; - - if((start = tsk_strindexOf(fmtp, size, "profile-level-id")) !=-1){ - tsk_param_t* param; - if((end = tsk_strindexOf((fmtp+start), (size-start), ";")) == -1){ - end = size; - } - - if((param = tsk_params_parse_param((fmtp+start), (end-start)))){ - profile_idc_t p_idc; - level_idc_t l_idc; - if(param->value){ - tsk_strtrim_both(¶m->value); - } - - tdav_codec_h264_parse_profile(param->value, &p_idc, tsk_null, &l_idc); - - switch(p_idc){ - case profile_idc_baseline: - switch(l_idc){ - case level_idc_1_0: - case level_idc_1_b: - case level_idc_1_1: - case level_idc_1_2: - case level_idc_1_3: - profile = tdav_codec_h264_bp10; - break; - case level_idc_2_0: - case level_idc_2_1: - case level_idc_2_2: - profile = tdav_codec_h264_bp20; - break; - case level_idc_3_0: - profile = tdav_codec_h264_bp30; - break; - } - break; - case profile_idc_extended: - case profile_idc_main: - case profile_idc_high: - default: - /* Not supported */ - break; - } - - TSK_OBJECT_SAFE_FREE(param); - } - } - return profile; -} - static void tdav_codec_h264_encap(const tdav_codec_h264_t* h264, const uint8_t* pdata, tsk_size_t size) { - register uint32_t i; - uint32_t last_scp, prev_scp; - static uint32_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 ..but */ + register int32_t i; + int32_t last_scp, prev_scp; + static int32_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 ..but */ if(!pdata || !size){ return; } last_scp = 0, prev_scp = 0; -/* -#if 1 - if(size < H264_RTP_PAYLOAD_SIZE){ - goto last; - } -#else - goto last; -#endif -*/ - for(i = size_of_scp; i<(size - size_of_scp); i++){ + for(i = size_of_scp; i<(int32_t)(size - size_of_scp); i++){ if(pdata[i] == H264_START_CODE_PREFIX[0] && pdata[i+1] == H264_START_CODE_PREFIX[1] && pdata[i+2] == H264_START_CODE_PREFIX[2] && pdata[i+3] == H264_START_CODE_PREFIX[3]){ /* Found Start Code Prefix */ prev_scp = last_scp; if((i - last_scp) >= H264_RTP_PAYLOAD_SIZE || 1){ - tdav_codec_h264_rtp_callback((tdav_codec_h264_t*) h264, pdata + prev_scp, + tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + prev_scp, (i - prev_scp), (prev_scp == size)); } last_scp = i; } } -//last: - if(last_scp < size){ - tdav_codec_h264_rtp_callback((tdav_codec_h264_t*) h264, pdata + last_scp, + + if(last_scp < (int32_t)size){ + tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + last_scp, (size - last_scp), tsk_true); } } diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx b/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx new file mode 100644 index 00000000..f35db610 --- /dev/null +++ b/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_cuda.cxx @@ -0,0 +1,1129 @@ +/* +* Copyright (C) 2011 Doubango Telecom . +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ + +/**@file tdav_codec_h264_cuda.c + * @brief H.264 codec plugin using NVIDIA CUDA for encoding/decoding + * Env: gpucomputingsdk_4.0.17_win_32, cudatoolkit_4.0.17_win_32 and 280.26-notebook-win7-winvista-32bit-international-whql. + * http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoDecoder_Library.pdf + * http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoEncoder_Library.pdf + * + * RTP payloader/depayloader follows RFC 3984. + * + * @author Mamadou Diop + * + */ +#include "tinydav/codecs/h264/tdav_codec_h264_cuda.h" + +#if HAVE_CUDA + +#include "tinyrtp/rtp/trtp_rtp_packet.h" + +#if defined(_MSC_VER) +# pragma comment(lib, "nvcuvenc.lib") +# pragma comment(lib, "nvcuvid.lib") +# pragma comment(lib, "cuda.lib") + +# pragma comment(lib, "d3d9.lib") +# pragma comment(lib, "d3dx9.lib") +#endif + +#include "tsk_memory.h" +#include "tsk_debug.h" + +#include +#include + +#define tdav_codec_h264_cuda_fmtp_set tsk_null + +#if !defined(CUDA_MAX_FRM_CNT) +# define CUDA_MAX_FRM_CNT 10 +#endif + +#include "tsk_semaphore.h" +tsk_semaphore_handle_t *sem = tsk_null; + +#define InitH264DecoderInfo(_self) \ + memset(&_self->decoder.info, 0, sizeof(_self->decoder.info)); \ + _self->decoder.info.ulCreationFlags = cudaVideoCreate_PreferCUDA; \ + _self->decoder.info.CodecType = cudaVideoCodec_H264; \ + _self->decoder.info.ulWidth = TMEDIA_CODEC_VIDEO(_self)->in.width; \ + _self->decoder.info.ulTargetWidth = TMEDIA_CODEC_VIDEO(_self)->in.width; \ + _self->decoder.info.ulHeight = TMEDIA_CODEC_VIDEO(_self)->in.height; \ + _self->decoder.info.ulTargetHeight = TMEDIA_CODEC_VIDEO(_self)->in.height; \ + _self->decoder.info.ulNumDecodeSurfaces = CUDA_MAX_FRM_CNT; \ + _self->decoder.info.ulNumOutputSurfaces = 1; \ + _self->decoder.info.ChromaFormat = cudaVideoChromaFormat_420; \ + _self->decoder.info.OutputFormat = cudaVideoSurfaceFormat_NV12; \ + _self->decoder.info.DeinterlaceMode = cudaVideoDeinterlaceMode_Adaptive; + +static int CUDAAPI _NVCallback_HandleVideoSequence(void *pvUserData, CUVIDEOFORMAT *pFormat); +static int CUDAAPI _NVCallback_HandlePictureDecode(void *pvUserData, CUVIDPICPARAMS *pPicParams); +static int CUDAAPI _NVCallback_HandlePictureDisplay(void *pvUserData, CUVIDPARSERDISPINFO *pPicParams); +static unsigned char* CUDAAPI _NVCallback_HandleAcquireBitStream(int *pBufferSize, void *pUserdata); +static void CUDAAPI _NVCallback_HandleReleaseBitStream(int nBytesInBuffer, unsigned char *cb,void *pUserdata); +static void CUDAAPI _NVCallback_HandleOnBeginFrame(const NVVE_BeginFrameInfo *pbfi, void *pUserdata); +static void CUDAAPI _NVCallback_HandleOnEndFrame(const NVVE_EndFrameInfo *pefi, void *pUserdata); + +static inline void _tdav_codec_h264_cuda_encap(const tdav_codec_h264_cuda_t* h264, const uint8_t* pdata, tsk_size_t size); +static inline tsk_size_t _tdav_codec_h264_cuda_pict_layout(tdav_codec_h264_cuda_t* self, void**output, tsk_size_t *output_size); + +static int tdav_codec_h264_cuda_open(tmedia_codec_t* self) +{ + int ret = 0, i; + int bestGPU = 0, gpuPerf = 0, adapterCount; + static int low_latency = 1; + HRESULT hr; + CUresult cuResult; + D3DPRESENT_PARAMETERS d3dpp; + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self; + + if(!h264){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + // + // encoder + // + memset(&h264->encoder.clb_params, 0, sizeof(h264->encoder.clb_params)); + memset(&h264->encoder.ctx_params, 0, sizeof(h264->encoder.ctx_params)); + + h264->encoder.ctx_params.iInputSize[0] = TMEDIA_CODEC_VIDEO(h264)->out.width; + h264->encoder.ctx_params.iInputSize[1] = TMEDIA_CODEC_VIDEO(h264)->out.height; + h264->encoder.ctx_params.iOutputSize[0] = TMEDIA_CODEC_VIDEO(h264)->out.width; + h264->encoder.ctx_params.iOutputSize[1] = TMEDIA_CODEC_VIDEO(h264)->out.height; + h264->encoder.ctx_params.GPUOffloadLevel= NVVE_GPU_OFFLOAD_DEFAULT; + h264->encoder.ctx_params.iSurfaceFormat = (int)IYUV; + h264->encoder.ctx_params.iPictureType = (int)FRAME_PICTURE; + h264->encoder.ctx_params.Fieldmode = MODE_FRAME; + h264->encoder.ctx_params.Presets = (NVVE_PRESETS_TARGET)-1;//Should be iPod, Zune ... + h264->encoder.ctx_params.iP_Interval = 1; + h264->encoder.ctx_params.iAspectRatio[0] = 4; + h264->encoder.ctx_params.iAspectRatio[1] = 3; + h264->encoder.ctx_params.iAspectRatio[2] = 0; + h264->encoder.ctx_params.iIDR_Period = TMEDIA_CODEC_VIDEO(h264)->out.fps * 3; + h264->encoder.ctx_params.iUseDeviceMem = 0; + h264->encoder.ctx_params.iDynamicGOP = 0; + h264->encoder.ctx_params.RCType = RC_VBR; + h264->encoder.ctx_params.iAvgBitrate = 400000; + h264->encoder.ctx_params.iPeakBitrate = 800000; + h264->encoder.ctx_params.iQP_Level_Intra = 25; + h264->encoder.ctx_params.iQP_Level_InterP = 28; + h264->encoder.ctx_params.iQP_Level_InterB = 31; + h264->encoder.ctx_params.iFrameRate[0] = TMEDIA_CODEC_VIDEO(h264)->out.fps * 1000; + h264->encoder.ctx_params.iFrameRate[1] = 1000; + h264->encoder.ctx_params.iDeblockMode = 1; + h264->encoder.ctx_params.iForceIntra = 0; + h264->encoder.ctx_params.iForceIDR = 0; + h264->encoder.ctx_params.iClearStat = 0; + h264->encoder.ctx_params.DIMode = DI_MEDIAN; + h264->encoder.ctx_params.iDisableSPSPPS = 1; // Do not include SPS/PPS frames + h264->encoder.ctx_params.iNaluFramingType = 0; // StartCodes + h264->encoder.ctx_params.iMultiGPU = 1; + switch(TDAV_CODEC_H264_COMMON(h264)->profile){ + case tdav_codec_h264_bp10: + h264->encoder.ctx_params.iDisableCabac = 1; + h264->encoder.ctx_params.iProfileLevel = 0xff42; + break; + case tdav_codec_h264_bp20: + h264->encoder.ctx_params.iDisableCabac = 1; + h264->encoder.ctx_params.iProfileLevel = 0xff42; + break; + case tdav_codec_h264_bp30: + h264->encoder.ctx_params.iDisableCabac = 1; + h264->encoder.ctx_params.iProfileLevel = 0xff42; + break; + default: + break; + } + + hr = NVCreateEncoder(&h264->encoder.context); + if(FAILED(hr)){ + TSK_DEBUG_ERROR("NVCreateEncoder failed with error code = %08x", hr); + return -2; + } + + hr = NVSetCodec(h264->encoder.context, NV_CODEC_TYPE_H264); + if(FAILED(hr)){ + TSK_DEBUG_ERROR("NVSetCodec failed with error code = %08x", hr); + return -2; + } + + hr = NVSetDefaultParam(h264->encoder.context); + if(FAILED(hr)){ + TSK_DEBUG_ERROR("NVSetDefaultParam() failed with error code = %08x", hr); + return -2; + } + + hr = NVGetParamValue(h264->encoder.context, NVVE_GET_GPU_COUNT, &h264->encoder.ctx_params.GPU_count); + if(SUCCEEDED(hr)){ + int temp = 0, deviceCount; + for (deviceCount=0; deviceCount < h264->encoder.ctx_params.GPU_count; deviceCount++) { + NVVE_GPUAttributes GPUAttributes = {0}; + + GPUAttributes.iGpuOrdinal = deviceCount; + hr = NVGetParamValue(h264->encoder.context, NVVE_GET_GPU_ATTRIBUTES, &GPUAttributes); + if(FAILED(hr)){ + TSK_DEBUG_ERROR("NVGetParamValue(NVVE_GET_GPU_ATTRIBUTES) failed with error code = %08x", hr); + continue; + } + + temp = GPUAttributes.iClockRate * GPUAttributes.iMultiProcessorCount; + temp = temp * _ConvertSMVer2Cores(GPUAttributes.iMajor, GPUAttributes.iMinor); + + if(temp > gpuPerf){ + gpuPerf = temp; + bestGPU = deviceCount; + } + } + } + else{ + TSK_DEBUG_ERROR("NVGetParamValue(NVVE_GET_GPU_COUNT) failed with error code = %08x", hr); + return -2; + } + + h264->encoder.ctx_params.iForcedGPU = bestGPU; + hr = NVSetParamValue(h264->encoder.context, NVVE_FORCE_GPU_SELECTION, &h264->encoder.ctx_params.iForcedGPU); + if(FAILED(hr)){ + TSK_DEBUG_WARN("NVSetParamValue(NVVE_FORCE_GPU_SELECTION) failed with error code = %08x", hr); + } + + hr = NVSetParamValue(h264->encoder.context, NVVE_DEVICE_MEMORY_INPUT, &(h264->encoder.ctx_params.iUseDeviceMem)); + if(FAILED(hr)){ + TSK_DEBUG_ERROR("NVSetParamValue(NVVE_OUT_SIZE) failed with error code = %08x", hr); + return -2; + } + + h264->encoder.buffer_size = (h264->encoder.ctx_params.iOutputSize[1] * h264->encoder.ctx_params.iOutputSize[0] * 3) >> 4; + if(!h264->encoder.buffer && !(h264->encoder.buffer = tsk_realloc(h264->encoder.buffer, h264->encoder.buffer_size))){ + TSK_DEBUG_ERROR("Failed to allocate buffer with size=%u", h264->encoder.buffer_size); + h264->encoder.buffer_size = 0; + return -2; + } + + hr = NVSetParamValue(h264->encoder.context,NVVE_OUT_SIZE, &(h264->encoder.ctx_params.iOutputSize)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_OUT_SIZE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_IN_SIZE, &(h264->encoder.ctx_params.iInputSize)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_IN_SIZE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_MULTI_GPU, &(h264->encoder.ctx_params.iMultiGPU)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_MULTI_GPU) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_ASPECT_RATIO, &(h264->encoder.ctx_params.iAspectRatio));if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_ASPECT_RATIO) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_FIELD_ENC_MODE, &(h264->encoder.ctx_params.Fieldmode)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FIELD_ENC_MODE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_P_INTERVAL, &(h264->encoder.ctx_params.iP_Interval)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_P_INTERVAL) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_IDR_PERIOD, &(h264->encoder.ctx_params.iIDR_Period)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_IDR_PERIOD) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_DYNAMIC_GOP, &(h264->encoder.ctx_params.iDynamicGOP)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DYNAMIC_GOP) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_RC_TYPE, &(h264->encoder.ctx_params.RCType)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_RC_TYPE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_AVG_BITRATE, &(h264->encoder.ctx_params.iAvgBitrate)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_AVG_BITRATE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_PEAK_BITRATE, &(h264->encoder.ctx_params.iPeakBitrate)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_PEAK_BITRATE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_QP_LEVEL_INTRA, &(h264->encoder.ctx_params.iQP_Level_Intra)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_OUT_SIZE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_QP_LEVEL_INTER_P,&(h264->encoder.ctx_params.iQP_Level_InterP)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_QP_LEVEL_INTER_P) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_QP_LEVEL_INTER_B,&(h264->encoder.ctx_params.iQP_Level_InterB)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_QP_LEVEL_INTER_B) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_FRAME_RATE, &(h264->encoder.ctx_params.iFrameRate)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FRAME_RATE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_DEBLOCK_MODE, &(h264->encoder.ctx_params.iDeblockMode)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DEBLOCK_MODE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_PROFILE_LEVEL, &(h264->encoder.ctx_params.iProfileLevel)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_PROFILE_LEVEL) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_FORCE_INTRA, &(h264->encoder.ctx_params.iForceIntra)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FORCE_INTRA) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_FORCE_IDR, &(h264->encoder.ctx_params.iForceIDR)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_FORCE_IDR) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_CLEAR_STAT, &(h264->encoder.ctx_params.iClearStat)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_CLEAR_STAT) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_SET_DEINTERLACE,&(h264->encoder.ctx_params.DIMode)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_SET_DEINTERLACE) failed with error code = %08x", hr); } + if (h264->encoder.ctx_params.Presets != -1) { + hr = NVSetParamValue(h264->encoder.context,NVVE_PRESETS, &(h264->encoder.ctx_params.Presets)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_PRESETS) failed with error code = %08x", hr); } + } + hr = NVSetParamValue(h264->encoder.context,NVVE_DISABLE_CABAC, &(h264->encoder.ctx_params.iDisableCabac)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DISABLE_CABAC) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_CONFIGURE_NALU_FRAMING_TYPE, &(h264->encoder.ctx_params.iNaluFramingType)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_CONFIGURE_NALU_FRAMING_TYPE) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_DISABLE_SPS_PPS,&(h264->encoder.ctx_params.iDisableSPSPPS)); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_DISABLE_SPS_PPS) failed with error code = %08x", hr); } + hr = NVSetParamValue(h264->encoder.context,NVVE_LOW_LATENCY,&low_latency); if (hr!=S_OK) { TSK_DEBUG_WARN("NVSetParamValue(NVVE_LOW_LATENCY) failed with error code = %08x", hr); } + + h264->encoder.clb_params.pfnacquirebitstream = _NVCallback_HandleAcquireBitStream; + h264->encoder.clb_params.pfnonbeginframe = _NVCallback_HandleOnBeginFrame; + h264->encoder.clb_params.pfnonendframe = _NVCallback_HandleOnEndFrame; + h264->encoder.clb_params.pfnreleasebitstream = _NVCallback_HandleReleaseBitStream; + NVRegisterCB(h264->encoder.context, h264->encoder.clb_params, h264); + + + hr = NVCreateHWEncoder(h264->encoder.context); + if(FAILED(hr)){ + TSK_DEBUG_ERROR("NVCreateHWEncoder failed with error code = %08x", hr); + return -2; + } + + + + // + // decoder + // + if((cuResult = cuInit(0)) != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuInit(0) failed with error code = %d", (int)cuResult); + return -3; + } + + InitH264DecoderInfo(h264); + + h264->decoder.cu_device = cutilDrvGetMaxGflopsGraphicsDeviceId(); + +#if _DEBUG || DEBUG + { + int major, minor; + size_t totalGlobalMem; + char deviceName[256]; + cuDeviceComputeCapability(&major, &minor, h264->decoder.cu_device); + cuDeviceGetName(deviceName, sizeof(deviceName), h264->decoder.cu_device); + TSK_DEBUG_INFO("Using GPU Device %d: %s has SM %d.%d compute capability", h264->decoder.cu_device, deviceName, major, minor); + + cutilDrvSafeCallNoSync(cuDeviceTotalMem(&totalGlobalMem, h264->decoder.cu_device) ); + TSK_DEBUG_INFO("Total amount of global memory in GPU device: %4.4f MB", (float)totalGlobalMem/(1024*1024)); + } +#endif + + // create Direct3D instance + h264->decoder.dx_d3d = Direct3DCreate9(D3D_SDK_VERSION); + if(!h264->decoder.dx_d3d){ + TSK_DEBUG_ERROR("Direct3DCreate9 failed"); + return -3; + } + adapterCount = h264->decoder.dx_d3d->GetAdapterCount(); + for(i=0; idecoder.info.ulTargetWidth; + d3dpp.BackBufferHeight = h264->decoder.info.ulTargetHeight; + d3dpp.BackBufferCount = 1; + d3dpp.SwapEffect = D3DSWAPEFFECT_COPY; + d3dpp.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE; + d3dpp.Flags = D3DPRESENTFLAG_VIDEO;//D3DPRESENTFLAG_LOCKABLE_BACKBUFFER; + hr = h264->decoder.dx_d3d->CreateDevice(i, + D3DDEVTYPE_HAL, + GetDesktopWindow(), + D3DCREATE_FPU_PRESERVE | D3DCREATE_MULTITHREADED | D3DCREATE_HARDWARE_VERTEXPROCESSING, + &d3dpp, + &h264->decoder.dx_d3ddevice); + if(hr == S_OK){ + cuResult = cuD3D9CtxCreate(&h264->decoder.cu_context, &h264->decoder.cu_device, 0, h264->decoder.dx_d3ddevice); + if(cuResult == CUDA_SUCCESS){ + break; + } + if(h264->decoder.dx_d3ddevice){ + h264->decoder.dx_d3ddevice->Release(); + h264->decoder.dx_d3ddevice = NULL; + } + } + } + + memset(&h264->decoder.cu_paser_params, 0, sizeof(h264->decoder.cu_paser_params)); + h264->decoder.cu_paser_params.CodecType = cudaVideoCodec_H264; + h264->decoder.cu_paser_params.ulMaxNumDecodeSurfaces = CUDA_MAX_FRM_CNT; + h264->decoder.cu_paser_params.pUserData = h264; + h264->decoder.cu_paser_params.pfnSequenceCallback = _NVCallback_HandleVideoSequence; + h264->decoder.cu_paser_params.pfnDecodePicture = _NVCallback_HandlePictureDecode; + h264->decoder.cu_paser_params.pfnDisplayPicture = _NVCallback_HandlePictureDisplay; + cuResult = cuvidCreateVideoParser(&h264->decoder.cu_parser, &h264->decoder.cu_paser_params); + if(cuResult != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuvidCreateVideoParser(0) failed with error code = %d", (int)cuResult); + return -3; + } + + cuResult = cuvidCreateDecoder(&h264->decoder.context, &h264->decoder.info); + if(CUDA_SUCCESS != cuResult){ + TSK_DEBUG_ERROR("cuvidCreateDecoder failed with error code=%d", (int)cuResult); + return -3; + } + + return ret; +} + +static int tdav_codec_h264_cuda_close(tmedia_codec_t* self) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self; + + if(!h264){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + if(h264->encoder.context){ + NVDestroyEncoder(h264->encoder.context); + h264->encoder.context = NULL; + } + return 0; +} + +static tsk_size_t tdav_codec_h264_cuda_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size) +{ + NVVE_EncodeFrameParams efparams; + int ret = 0; + unsigned long flags = 0; + HRESULT hr; + + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self; + + if(!self || !in_data || !in_size || !out_data){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0; + } + + if(!self->opened){ + TSK_DEBUG_ERROR("Codec not opened"); + return 0; + } + + if((h264->encoder.ctx_params.iOutputSize[1] * h264->encoder.ctx_params.iOutputSize[0] * 3)>>1 != in_size){ + /* guard */ + TSK_DEBUG_ERROR("Invalid size"); + return 0; + } + + //return 0; + + efparams.Width = h264->encoder.ctx_params.iOutputSize[0]; + efparams.Height = h264->encoder.ctx_params.iOutputSize[1]; + efparams.Pitch = (h264->encoder.ctx_params.nDeviceMemPitch ? h264->encoder.ctx_params.nDeviceMemPitch : h264->encoder.ctx_params.iOutputSize[0]); + efparams.PictureStruc = (NVVE_PicStruct)h264->encoder.ctx_params.iPictureType; + efparams.SurfFmt = (NVVE_SurfaceFormat)h264->encoder.ctx_params.iSurfaceFormat; + efparams.progressiveFrame = (h264->encoder.ctx_params.iSurfaceFormat == 3) ? 1 : 0; + efparams.repeatFirstField = 0; + efparams.topfieldfirst = (h264->encoder.ctx_params.iSurfaceFormat == 1) ? 1 : 0; + efparams.picBuf = (unsigned char *)in_data; + efparams.bLast = 0; + + // send keyframe for: + // - the first frame + // - every second within the first 4seconds + // - every 7 seconds after the first 4seconds + if(h264->encoder.frame_count++ == 0 + || + ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) ) + || + ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 ) + ) + { + flags |= 0x04; // FORCE IDR + if(h264->encoder.ctx_params.iDisableSPSPPS){ + unsigned char SPSPPSBuff[1024]; + int SPSPPSBuffSize = sizeof(SPSPPSBuff); + hr = NVGetSPSPPS(h264->encoder.context, SPSPPSBuff, SPSPPSBuffSize, &SPSPPSBuffSize); + if(SUCCEEDED(hr)){ + int size = 0; + while(size < SPSPPSBuffSize - 2){ + int16_t next_size = ((int16_t)SPSPPSBuff[size])<<1 | ((int16_t)SPSPPSBuff[size + 1]); + _tdav_codec_h264_cuda_encap(h264, &SPSPPSBuff[size + 2], next_size); + size+=next_size + 2; + } + } + else{ + TSK_DEBUG_ERROR("NVGetSPSPPS failed with error code = %08x", hr) + } + } + } + + hr = NVEncodeFrame(h264->encoder.context, &efparams, flags, NULL); + + return 0; +} + +static tsk_size_t tdav_codec_h264_cuda_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self; + const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr; + const uint8_t* pay_ptr = tsk_null; + tsk_size_t pay_size = 0, retsize = 0, size_to_copy = 0; + int ret = 0; + tsk_bool_t append_scp = tsk_false; + static tsk_size_t xmax_size = (1920 * 1080 * 3) >> 3; + static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX); + + // Packet lost? + if(h264->decoder.last_seq != (rtp_hdr->seq_num - 1) && h264->decoder.last_seq){ + if(h264->decoder.last_seq == rtp_hdr->seq_num){ + // Could happen on some stupid emulators + TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num); + return 0; + } + TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num); + } + h264->decoder.last_seq = rtp_hdr->seq_num; + + /* 5.3. NAL Unit Octet Usage + +---------------+ + |0|1|2|3|4|5|6|7| + +-+-+-+-+-+-+-+-+ + |F|NRI| Type | + +---------------+ + */ + if(*((uint8_t*)in_data) >> 7){ + TSK_DEBUG_WARN("F=1"); + /* reset accumulator */ + h264->decoder.accumulator = 0; + goto bail; + } + + // get payload + if((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp)) || !pay_ptr || !pay_size){ + TSK_DEBUG_ERROR("Depayloader failed to get H.264 content"); + goto bail; + } + //append_scp = tsk_true; + size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0); + + // start-accumulator + if(!h264->decoder.accumulator){ + if(size_to_copy > xmax_size){ + TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size); + goto bail; + } + if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){ + TSK_DEBUG_ERROR("Failed to allocated new buffer"); + goto bail; + } + h264->decoder.accumulator_size = size_to_copy; + } + if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){ + TSK_DEBUG_ERROR("BufferOverflow"); + h264->decoder.accumulator_pos = 0; + goto bail; + } + if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){ + if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){ + TSK_DEBUG_ERROR("Failed to reallocated new buffer"); + h264->decoder.accumulator_pos = 0; + h264->decoder.accumulator_size = 0; + goto bail; + } + h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy); + } + + if(append_scp){ + memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size); + h264->decoder.accumulator_pos += start_code_prefix_size; + } + memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size); + h264->decoder.accumulator_pos += pay_size; + // end-accumulator + + if(rtp_hdr->marker){ + CUVIDSOURCEDATAPACKET pkt; + CUresult cuResult; + pkt.flags = 0; + pkt.payload_size = (unsigned long) h264->decoder.accumulator_pos; + pkt.payload = (unsigned char *)h264->decoder.accumulator; + pkt.timestamp = 0; + + // reset accumulator + h264->decoder.accumulator_pos = 0; + cuResult = cuvidParseVideoData(h264->decoder.cu_parser, &pkt); + if(cuResult != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuvidParseVideoData() failed with error code = %d", (int)cuResult); + goto bail; + } + + if(h264->decoder.cu_buffer_avail){ + h264->decoder.cu_buffer_avail = tsk_false; + if((retsize = _tdav_codec_h264_cuda_pict_layout(h264, out_data, out_max_size)) == 0){ + TSK_DEBUG_ERROR("_tdav_codec_h264_cuda_pict_layout failed"); + goto bail; + } + } + } + +bail: + return retsize; +} + +static tsk_bool_t tdav_codec_h264_cuda_fmtp_match(const tmedia_codec_t* codec, const char* fmtp) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)codec; + tdav_codec_h264_profile_t profile; + + if(!h264){ + TSK_DEBUG_ERROR("Invalid parameter"); + return tsk_false; + } + + /* Check whether the profile match (If the profile is missing, then we consider that it's ok) */ + if(((profile = tdav_codec_h264_common_get_profile(fmtp)) != tdav_codec_h264_bp99) && (profile != TDAV_CODEC_H264_COMMON(h264)->profile)){ + TSK_DEBUG_INFO("Profile not matching"); + return tsk_false; + } + + TMEDIA_CODEC_VIDEO(h264)->in.width = 800, TMEDIA_CODEC_VIDEO(h264)->in.height = 640; + TMEDIA_CODEC_VIDEO(h264)->out.width = 800, TMEDIA_CODEC_VIDEO(h264)->out.height = 640; + //TMEDIA_CODEC_VIDEO(h264)->in.width = 352, TMEDIA_CODEC_VIDEO(h264)->in.height = 288; + //TMEDIA_CODEC_VIDEO(h264)->out.width = 352, TMEDIA_CODEC_VIDEO(h264)->out.height = 288; + + return tsk_true; +} + +static char* tdav_codec_h264_cuda_fmtp_get(const tmedia_codec_t* self) +{ + char* fmtp = tsk_null; + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self; + + switch(TDAV_CODEC_H264_COMMON(h264)->profile){ + case tdav_codec_h264_bp10: + fmtp = tsk_strdup("profile-level-id=42e00a"); + break; + case tdav_codec_h264_bp20: + fmtp = tsk_strdup("profile-level-id=42e014"); + break; + case tdav_codec_h264_bp30: + fmtp = tsk_strdup("profile-level-id=42e01e"); + break; + } + + //1080p(1920 x 1080), 720p(1280 x 720), SVGA(800 x 600), VGA(640 x 480), 4CIF(704 x 576), CIF(352 x 288), QCIF(176 x 144), SQCIF(128 x 96) + return fmtp; +} + +tsk_bool_t tdav_codec_h264_cuda_is_supported() +{ + static tsk_bool_t __already_checked = tsk_false; + static tsk_bool_t __is_supported = tsk_false; + if(!__already_checked){ + HRESULT hr; + __already_checked = tsk_true; + hr = NVGetHWEncodeCaps(); + if(SUCCEEDED(hr)){ + NVEncoder encoder; + hr = NVCreateEncoder(&encoder); + if(SUCCEEDED(hr)){ + hr = NVIsSupportedCodec(encoder, NV_CODEC_TYPE_H264); + __is_supported = SUCCEEDED(hr); + } + else{ + TSK_DEBUG_ERROR("NVCreateEncoder() failed with error code = %08x", hr); + } + if(encoder){ + NVDestroyEncoder(encoder); + encoder = NULL; + } + } + } + return __is_supported; +} + +static int tdav_codec_h264_cuda_init(tdav_codec_h264_cuda_t* self, tdav_codec_h264_profile_t profile) +{ + int ret = 0; + + if(!self){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + if((ret = tdav_codec_h264_common_init(TDAV_CODEC_H264_COMMON(self)))){ + TSK_DEBUG_ERROR("tdav_codec_h264_common_init() faile with error code=%d", ret); + return ret; + } + + if(!self->decoder.mutex && !(self->decoder.mutex = tsk_mutex_create_2(tsk_false))){ + TSK_DEBUG_ERROR("Failed to create mutex"); + return -2; + } + + sem = tsk_semaphore_create_2(1); + + TDAV_CODEC_H264_COMMON(self)->pack_mode = H264_PACKETIZATION_MODE; + TDAV_CODEC_H264_COMMON(self)->profile = profile; + TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000; + TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000; + + // At this time self->plugin is Null + TMEDIA_CODEC_VIDEO(self)->in.width = TMEDIA_CODEC_VIDEO(self)->out.width = 176; + TMEDIA_CODEC_VIDEO(self)->in.height = TMEDIA_CODEC_VIDEO(self)->out.height = 144; + TMEDIA_CODEC_VIDEO(self)->in.fps = TMEDIA_CODEC_VIDEO(self)->out.fps = 15; + TMEDIA_CODEC_VIDEO(self)->in.chroma = tmedia_chroma_yuv420p;// no choice + + return 0; +} + +static int tdav_codec_h264_cuda_deinit(tdav_codec_h264_cuda_t* self) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)self; + + if(!h264){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + if(h264->encoder.context){ + NVDestroyEncoder(h264->encoder.context); + h264->encoder.context = NULL; + } + TSK_FREE(h264->encoder.buffer); + h264->encoder.buffer_size = 0; + + if(h264->decoder.context){ + cuvidDestroyDecoder(h264->decoder.context); + h264->decoder.context = NULL; + } + if(h264->decoder.cu_context){ + cuCtxDestroy(h264->decoder.cu_context); + h264->decoder.cu_context = NULL; + } + if (h264->decoder.dx_d3ddevice){ + h264->decoder.dx_d3ddevice->Release(); + h264->decoder.dx_d3ddevice = NULL; + } + if (h264->decoder.dx_d3d){ + h264->decoder.dx_d3d->Release(); + h264->decoder.dx_d3d = NULL; + } + if(h264->decoder.cu_parser){ + cuvidDestroyVideoParser(h264->decoder.cu_parser); + h264->decoder.cu_parser = NULL; + } + if(h264->decoder.cu_buffer){ + cuMemFreeHost(h264->decoder.cu_buffer); + h264->decoder.cu_buffer = NULL; + } + h264->decoder.cu_buffer_size = 0; + if(self->decoder.mutex){ + tsk_mutex_destroy(&self->decoder.mutex); + } + + TSK_FREE(h264->decoder.accumulator); + h264->decoder.accumulator_pos = 0; + h264->decoder.accumulator_size = 0; + + return 0; +} + +static inline void _tdav_codec_h264_cuda_encap(const tdav_codec_h264_cuda_t* h264, const uint8_t* pdata, tsk_size_t size) +{ + register int32_t i; + int32_t last_scp, prev_scp; + static int32_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 ..but */ + + if(!pdata || !size){ + return; + } + + last_scp = 0, prev_scp = 0; + + for(i = size_of_scp; i<(int32_t)(size - size_of_scp); i++){ + if(pdata[i] == H264_START_CODE_PREFIX[0] && pdata[i+1] == H264_START_CODE_PREFIX[1] && pdata[i+2] == H264_START_CODE_PREFIX[2] && pdata[i+3] == H264_START_CODE_PREFIX[3]){ /* Found Start Code Prefix */ + prev_scp = last_scp; + if((i - last_scp) >= H264_RTP_PAYLOAD_SIZE || 1){ + tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + prev_scp, + (i - prev_scp), (prev_scp == size)); + } + last_scp = i; + } + } + if(last_scp < (int32_t)size){ + tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + last_scp, + (size - last_scp), tsk_true); + } +} + +static inline tsk_size_t _tdav_codec_h264_cuda_pict_layout(tdav_codec_h264_cuda_t* self, void**output, tsk_size_t *output_size) +{ + if(self && self->decoder.cu_buffer && self->decoder.cu_buffer_size){ + unsigned int w = TMEDIA_CODEC_VIDEO(self)->in.width; + unsigned int h = TMEDIA_CODEC_VIDEO(self)->in.height; + unsigned int pitch = self->decoder.cu_buffer_pitch; + tsk_size_t xsize = (w * h * 3) >> 1; + // resize if too short + if(*output_sizedecoder.cu_buffer; + register unsigned char *iyuv = (unsigned char *)*output, *i = iyuv, *j; + // copy luma + for (y=0; ycoded_width != TMEDIA_CODEC_VIDEO(h264)->in.width || pFormat->coded_height != TMEDIA_CODEC_VIDEO(h264)->in.height){ + tsk_mutex_lock(h264->decoder.mutex); + + TMEDIA_CODEC_VIDEO(h264)->in.width = pFormat->coded_width; + TMEDIA_CODEC_VIDEO(h264)->in.height = pFormat->coded_height; + + InitH264DecoderInfo(h264); + CUresult cuResult; + if(h264->decoder.context){ + cuResult = cuvidDestroyDecoder(h264->decoder.context); + if(CUDA_SUCCESS != cuResult){ + TSK_DEBUG_ERROR("cuvidDestroyDecoder failed with error code=%d", (int)cuResult); + ret = 0; + } + h264->decoder.context = NULL; + } + cuResult = cuvidCreateDecoder(&h264->decoder.context, &h264->decoder.info); + if(CUDA_SUCCESS != cuResult){ + TSK_DEBUG_ERROR("cuvidCreateDecoder failed with error code=%d", (int)cuResult); + ret = 0; + } + + tsk_mutex_unlock(h264->decoder.mutex); + } + + + + return ret;//success +} + +static int CUDAAPI _NVCallback_HandlePictureDecode(void *pvUserData, CUVIDPICPARAMS *pPicParams) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pvUserData; + if(!h264 || !pPicParams){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0;//error + } + + tsk_mutex_lock(h264->decoder.mutex); + CUresult cuResult = cuvidDecodePicture(h264->decoder.context, pPicParams); + tsk_mutex_unlock(h264->decoder.mutex); + + if(cuResult != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuvidDecodePicture failed with error code= %d", cuResult); + return 0;//error + } + + return 1;//success +} + +static int CUDAAPI _NVCallback_HandlePictureDisplay(void *pvUserData, CUVIDPARSERDISPINFO *pPicParams) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pvUserData; + CUVIDPROCPARAMS vpp; + CUdeviceptr devPtr; + CUresult cuResult; + tsk_size_t nv12_size; + tsk_bool_t mapped = tsk_false; + int ret = 1;//success + + if(!h264 || !pPicParams){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0;//error + } +tsk_semaphore_decrement(sem); + cuResult = cuCtxPushCurrent(h264->decoder.cu_context); + if(cuResult != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuCtxPushCurrent failed with error code = %d", (int)cuResult); + ret = 0;//error + goto bail; + } + + memset(&vpp, 0, sizeof(vpp)); + vpp.progressive_frame = pPicParams->progressive_frame; + vpp.top_field_first = pPicParams->top_field_first; + cuResult = cuvidMapVideoFrame(h264->decoder.context, pPicParams->picture_index, &devPtr, &h264->decoder.cu_buffer_pitch, &vpp); + + if(cuResult != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuvidMapVideoFrame failed with error code = %d", (int)cuResult); + ret = 0;//error + goto bail; + } + mapped = tsk_true; + nv12_size = h264->decoder.cu_buffer_pitch * (h264->decoder.info.ulTargetHeight + h264->decoder.info.ulTargetHeight/2); // 12bpp + //nv12_size = (w * h * 3) >> 1; + if ((!h264->decoder.cu_buffer) || (nv12_size > h264->decoder.cu_buffer_size)){ + h264->decoder.cu_buffer_size = 0; + if (h264->decoder.cu_buffer){ + cuResult = cuMemFreeHost(h264->decoder.cu_buffer); + h264->decoder.cu_buffer = NULL; + } + cuResult = cuMemAllocHost((void**)&h264->decoder.cu_buffer, nv12_size); + if (cuResult != CUDA_SUCCESS){ + TSK_DEBUG_ERROR("cuMemAllocHost failed to allocate %d bytes (error code=%d)", nv12_size, (int)cuResult); + h264->decoder.cu_buffer = 0; + h264->decoder.cu_buffer_size = 0; + ret = 0;//error + } + else{ + h264->decoder.cu_buffer_size = nv12_size; + } + } + if(h264->decoder.cu_buffer){ + cuResult = cuMemcpyDtoH(h264->decoder.cu_buffer, devPtr, nv12_size); + } + +bail: + if(mapped){ + cuResult = cuvidUnmapVideoFrame(h264->decoder.context, devPtr); + } + cuResult = cuCtxPopCurrent(NULL); +tsk_semaphore_increment(sem); + h264->decoder.cu_buffer_avail = (ret == 1); + return ret; +} + +static unsigned char* CUDAAPI _NVCallback_HandleAcquireBitStream(int *pBufferSize, void *pUserdata) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pUserdata; + if(!h264 || !pBufferSize){ + TSK_DEBUG_ERROR("Invalid parameter"); + return tsk_null; + } + + *pBufferSize = (int)h264->encoder.buffer_size; + return (unsigned char*)h264->encoder.buffer; +} + +static void CUDAAPI _NVCallback_HandleReleaseBitStream(int nBytesInBuffer, unsigned char *cb,void *pUserdata) +{ + tdav_codec_h264_cuda_t* h264 = (tdav_codec_h264_cuda_t*)pUserdata; + if(!h264 || !cb || !nBytesInBuffer){ + TSK_DEBUG_ERROR("Invalid parameter"); + return; + } + _tdav_codec_h264_cuda_encap(h264, cb, (tsk_size_t)nBytesInBuffer); + + return; +} + +static void CUDAAPI _NVCallback_HandleOnBeginFrame(const NVVE_BeginFrameInfo *pbfi, void *pUserdata) +{ + return; +} + +static void CUDAAPI _NVCallback_HandleOnEndFrame(const NVVE_EndFrameInfo *pefi, void *pUserdata) +{ + return; +} + +/* ============ H.264 Base Profile 1.0 Plugin interface ================= */ + +/* constructor */ +static tsk_object_t* tdav_codec_h264_cuda_bp10_ctor(tsk_object_t * self, va_list * app) +{ + tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self; + if(h264){ + /* init base: called by tmedia_codec_create() */ + /* init self */ + tdav_codec_h264_cuda_init(h264, tdav_codec_h264_bp10); + } + return self; +} +/* destructor */ +static tsk_object_t* tdav_codec_h264_cuda_bp10_dtor(tsk_object_t * self) +{ + tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self; + if(h264){ + /* deinit base */ + tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self)); + /* deinit self */ + tdav_codec_h264_cuda_deinit(h264); + } + + return self; +} +/* object definition */ +static const tsk_object_def_t tdav_codec_h264_cuda_bp10_def_s = +{ + sizeof(tdav_codec_h264_cuda_t), + tdav_codec_h264_cuda_bp10_ctor, + tdav_codec_h264_cuda_bp10_dtor, + tmedia_codec_cmp, +}; +/* plugin definition*/ +static const tmedia_codec_plugin_def_t tdav_codec_h264_cuda_bp10_plugin_def_s = +{ + &tdav_codec_h264_cuda_bp10_def_s, + + tmedia_video, + "H264", + "H264 Base Profile 1.0 using CUDA", + TMEDIA_CODEC_FORMAT_H264_BP10, + tsk_true, + 90000, // rate + + /* audio */ + { 0 }, + + /* video */ + {176, 144, 15}, + + tdav_codec_h264_cuda_open, + tdav_codec_h264_cuda_close, + tdav_codec_h264_cuda_encode, + tdav_codec_h264_cuda_decode, + tdav_codec_h264_cuda_fmtp_match, + tdav_codec_h264_cuda_fmtp_get, + tdav_codec_h264_cuda_fmtp_set +}; +extern const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp10_plugin_def_t = &tdav_codec_h264_cuda_bp10_plugin_def_s; + + +/* ============ H.264 Base Profile 2.0 Plugin interface ================= */ + +/* constructor */ +static tsk_object_t* tdav_codec_h264_cuda_bp20_ctor(tsk_object_t * self, va_list * app) +{ + tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self; + if(h264){ + /* init base: called by tmedia_codec_create() */ + /* init self */ + tdav_codec_h264_cuda_init(h264, tdav_codec_h264_bp20); + } + return self; +} +/* destructor */ +static tsk_object_t* tdav_codec_h264_cuda_bp20_dtor(tsk_object_t * self) +{ + tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self; + if(h264){ + /* deinit base */ + tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self)); + /* deinit self */ + tdav_codec_h264_cuda_deinit(h264); + + } + + return self; +} +/* object definition */ +static const tsk_object_def_t tdav_codec_h264_cuda_bp20_def_s = +{ + sizeof(tdav_codec_h264_cuda_t), + tdav_codec_h264_cuda_bp20_ctor, + tdav_codec_h264_cuda_bp20_dtor, + tmedia_codec_cmp, +}; +/* plugin definition*/ +static const tmedia_codec_plugin_def_t tdav_codec_h264_cuda_bp20_plugin_def_s = +{ + &tdav_codec_h264_cuda_bp20_def_s, + + tmedia_video, + "H264", + "H264 Base Profile 2.0 using CUDA", + TMEDIA_CODEC_FORMAT_H264_BP20, + tsk_true, + 90000, // rate + + /* audio */ + { 0 }, + + /* video */ + {352, 288, 15}, + + tdav_codec_h264_cuda_open, + tdav_codec_h264_cuda_close, + tdav_codec_h264_cuda_encode, + tdav_codec_h264_cuda_decode, + tdav_codec_h264_cuda_fmtp_match, + tdav_codec_h264_cuda_fmtp_get, + tdav_codec_h264_cuda_fmtp_set +}; +extern const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp20_plugin_def_t = &tdav_codec_h264_cuda_bp20_plugin_def_s; + + +/* ============ H.264 Base Profile 3.0 Plugin interface ================= */ + +/* constructor */ +static tsk_object_t* tdav_codec_h264_cuda_bp30_ctor(tsk_object_t * self, va_list * app) +{ + tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self; + if(h264){ + /* init base: called by tmedia_codec_create() */ + /* init self */ + tdav_codec_h264_cuda_init(h264, tdav_codec_h264_bp30); + } + return self; +} +/* destructor */ +static tsk_object_t* tdav_codec_h264_cuda_bp30_dtor(tsk_object_t * self) +{ + tdav_codec_h264_cuda_t *h264 = (tdav_codec_h264_cuda_t *)self; + if(h264){ + /* deinit base */ + tdav_codec_h264_common_deinit(TDAV_CODEC_H264_COMMON(self)); + /* deinit self */ + tdav_codec_h264_cuda_deinit(h264); + + } + + return self; +} +/* object definition */ +static const tsk_object_def_t tdav_codec_h264_cuda_bp30_def_s = +{ + sizeof(tdav_codec_h264_cuda_t), + tdav_codec_h264_cuda_bp30_ctor, + tdav_codec_h264_cuda_bp30_dtor, + tmedia_codec_cmp, +}; +/* plugin definition*/ +static const tmedia_codec_plugin_def_t tdav_codec_h264_cuda_bp30_plugin_def_s = +{ + &tdav_codec_h264_cuda_bp30_def_s, + + tmedia_video, + "H264", + "H264 Base Profile 3.0 using CUDA", + TMEDIA_CODEC_FORMAT_H264_BP30, + tsk_true, + 90000, // rate + + /* audio */ + { 0 }, + + /* video */ + {352, 288, 15}, + + tdav_codec_h264_cuda_open, + tdav_codec_h264_cuda_close, + tdav_codec_h264_cuda_encode, + tdav_codec_h264_cuda_decode, + tdav_codec_h264_cuda_fmtp_match, + tdav_codec_h264_cuda_fmtp_get, + tdav_codec_h264_cuda_fmtp_set +}; +extern const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp30_plugin_def_t = &tdav_codec_h264_cuda_bp30_plugin_def_s; + + +#endif /* HAVE_CUDA */ diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c b/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c index 3534b5e1..1285c8f0 100644 --- a/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c +++ b/branches/2.0/doubango/tinyDAV/src/codecs/h264/tdav_codec_h264_rtp.c @@ -29,9 +29,7 @@ */ #include "tinydav/codecs/h264/tdav_codec_h264_rtp.h" -#include "tinydav/codecs/h264/tdav_codec_h264.h" - -#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264) +#include "tinydav/codecs/h264/tdav_codec_h264_common.h" #include "tinymedia/tmedia_codec.h" @@ -164,11 +162,10 @@ int tdav_codec_h264_get_pay(const void* in_data, tsk_size_t in_size, const void* case stap_b: case mtap16: case mtap24: + case fu_b: break; case fu_a: return tdav_codec_h264_get_fua_pay(pdata, in_size, out_data, out_size, append_scp); - case fu_b: - return -1; default: /* NAL unit (1-23) */ return tdav_codec_h264_get_nalunit_pay(pdata, in_size, out_data, out_size); } @@ -278,12 +275,11 @@ int tdav_codec_h264_get_nalunit_pay(const uint8_t* in_data, tsk_size_t in_size, return 0; } -#if TDAV_UNDER_WINDOWS -# include "tsk_thread.h" -#endif -void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *data, tsk_size_t size, tsk_bool_t marker) +void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_common_s *self, const void *data, tsk_size_t size, tsk_bool_t marker) { - uint8_t* pdata = (uint8_t*)data; + uint8_t* pdata = (uint8_t*)data; + + //TSK_DEBUG_INFO("%x %x %x %x -- %u", pdata[0], pdata[1], pdata[2], pdata[3], size); if(size>4 && pdata[0] == H264_START_CODE_PREFIX[0] && pdata[1] == H264_START_CODE_PREFIX[1]){ if(pdata[2] == H264_START_CODE_PREFIX[3]){ @@ -304,10 +300,6 @@ void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *da } } else if(size > H264_NAL_UNIT_TYPE_HEADER_SIZE){ -#if TDAV_UNDER_WINDOWS - tsk_bool_t burst = ((size/H264_RTP_PAYLOAD_SIZE) > 5); - int count = 0; -#endif /* Should be Fragmented as FUA */ uint8_t fua_hdr[H264_FUA_HEADER_SIZE]; /* "FU indicator" and "FU header" - 2bytes */ fua_hdr[0] = pdata[0] & 0x60/* F=0 */, fua_hdr[0] |= fu_a; @@ -343,14 +335,7 @@ void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *da // send data if(TMEDIA_CODEC_VIDEO(self)->callback){ TMEDIA_CODEC_VIDEO(self)->callback(TMEDIA_CODEC_VIDEO(self)->callback_data, self->rtp.ptr, (packet_size + H264_FUA_HEADER_SIZE), (3003* (30/TMEDIA_CODEC_VIDEO(self)->out.fps)), (size == 0)); -#if TDAV_UNDER_WINDOWS// FIXME: WinSock problem: Why do we get packet lost (burst case only)? - if(burst && (++count % 2 == 0)){ - tsk_thread_sleep(1); // 1 millisecond - } -#endif } } } } - -#endif /* HAVE_FFMPEG */ \ No newline at end of file diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c b/branches/2.0/doubango/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c index 2f86d2a9..d6926c50 100644 --- a/branches/2.0/doubango/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c +++ b/branches/2.0/doubango/tinyDAV/src/codecs/mp4ves/tdav_codec_mp4ves.c @@ -290,11 +290,6 @@ tsk_size_t tdav_codec_mp4ves_encode(tmedia_codec_t* self, const void* in_data, t TSK_DEBUG_ERROR("Invalid size"); return 0; } - - // Flip - if(self->video.flip.encoded){ - tdav_converter_video_flip(mp4v->encoder.picture, mp4v->encoder.context->height); - } mp4v->encoder.picture->pts = AV_NOPTS_VALUE; mp4v->encoder.picture->quality = mp4v->encoder.context->global_quality; @@ -369,10 +364,7 @@ tsk_size_t tdav_codec_mp4ves_decode(tmedia_codec_t* self, const void* in_data, t retsize = xsize; TMEDIA_CODEC_VIDEO(mp4v)->in.width = mp4v->decoder.context->width; TMEDIA_CODEC_VIDEO(mp4v)->in.height = mp4v->decoder.context->height; - // flip - if(self->video.flip.decoded){ - tdav_converter_video_flip(mp4v->decoder.picture, mp4v->decoder.context->height); - } + /* copy picture into a linear buffer */ avpicture_layout((AVPicture *)mp4v->decoder.picture, mp4v->decoder.context->pix_fmt, mp4v->decoder.context->width, mp4v->decoder.context->height, *out_data, retsize); diff --git a/branches/2.0/doubango/tinyDAV/src/codecs/theora/tdav_codec_theora.c b/branches/2.0/doubango/tinyDAV/src/codecs/theora/tdav_codec_theora.c index 1ef8aefd..40ffe6e8 100644 --- a/branches/2.0/doubango/tinyDAV/src/codecs/theora/tdav_codec_theora.c +++ b/branches/2.0/doubango/tinyDAV/src/codecs/theora/tdav_codec_theora.c @@ -261,11 +261,6 @@ tsk_size_t tdav_codec_theora_encode(tmedia_codec_t* self, const void* in_data, t return 0; } - // Flip - if(self->video.flip.encoded){ - tdav_converter_video_flip(theora->encoder.picture, theora->encoder.context->height); - } - // Encode data //theora->encoder.picture->pts = tsk_time_now(); theora->encoder.picture->pts = AV_NOPTS_VALUE; @@ -365,9 +360,7 @@ tsk_size_t tdav_codec_theora_decode(tmedia_codec_t* self, const void* in_data, t retsize = xsize; TMEDIA_CODEC_VIDEO(theora)->in.width = theora->decoder.context->width; TMEDIA_CODEC_VIDEO(theora)->in.height = theora->decoder.context->height; - if(self->video.flip.decoded){ - tdav_converter_video_flip(theora->decoder.picture, theora->decoder.context->height); - } + /* allocate buffer */ if(*out_max_size +#endif + #include "tinyrtp/rtp/trtp_rtp_packet.h" #include "tsk_memory.h" @@ -39,9 +43,15 @@ #include "tsk_debug.h" #define TDAV_VP8_PAY_DESC_SIZE 1 /* |X|R|N|S|PartID| */ -#define TDAV_SYSTEM_CORES_COUNT 1 -#define TDAV_VP8_GOP_SIZE_IN_SECONDS 2 -#define TDAV_VP8_RTP_PAYLOAD_MAX_SIZE 950 +#define TDAV_SYSTEM_CORES_COUNT 0 +#define TDAV_VP8_GOP_SIZE_IN_SECONDS 5 +#define TDAV_VP8_RTP_PAYLOAD_MAX_SIZE 1050 +#if !defined(TDAV_VP8_MAX_BANDWIDTH_KB) +# define TDAV_VP8_MAX_BANDWIDTH_KB 6000 +#endif +#if !defined(TDAV_VP8_MIN_BANDWIDTH_KB) +# define TDAV_VP8_MIN_BANDWIDTH_KB 100 +#endif #define vp8_interface_enc (vpx_codec_vp8_cx()) #define vp8_interface_dec (vpx_codec_vp8_dx()) @@ -57,9 +67,12 @@ static int tdav_codec_vp8_open(tmedia_codec_t* self) vpx_codec_enc_cfg_t enc_cfg; vpx_codec_dec_cfg_t dec_cfg; + vpx_codec_caps_t dec_caps; vpx_enc_frame_flags_t enc_flags; vpx_codec_flags_t dec_flags = 0; vpx_codec_err_t vpx_ret; + static vp8_postproc_cfg_t __pp = { VP8_DEBLOCK | VP8_DEMACROBLOCK, 4, 0}; + if(!vp8){ TSK_DEBUG_ERROR("Invalid parameter"); @@ -79,15 +92,27 @@ static int tdav_codec_vp8_open(tmedia_codec_t* self) enc_cfg.g_timebase.den = TMEDIA_CODEC_VIDEO(vp8)->out.fps; enc_cfg.rc_target_bitrate = TMEDIA_CODEC_VIDEO(vp8)->out.width * TMEDIA_CODEC_VIDEO(vp8)->out.height * enc_cfg.rc_target_bitrate / enc_cfg.g_w / enc_cfg.g_h; + enc_cfg.rc_target_bitrate = TSK_CLAMP(TDAV_VP8_MIN_BANDWIDTH_KB, enc_cfg.rc_target_bitrate>>=0, TDAV_VP8_MAX_BANDWIDTH_KB); + enc_cfg.rc_end_usage = VPX_CBR; enc_cfg.g_w = TMEDIA_CODEC_VIDEO(vp8)->out.width; enc_cfg.g_h = TMEDIA_CODEC_VIDEO(vp8)->out.height; + enc_cfg.kf_mode = VPX_KF_DISABLED; enc_cfg.g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT; enc_cfg.g_lag_in_frames = 0; +#if TDAV_UNDER_WINDOWS + { + SYSTEM_INFO SystemInfo; + GetSystemInfo(&SystemInfo); + enc_cfg.g_threads = SystemInfo.dwNumberOfProcessors; + } +#else enc_cfg.g_threads = TDAV_SYSTEM_CORES_COUNT; +#endif enc_cfg.g_pass = VPX_RC_ONE_PASS; - enc_cfg.rc_min_quantizer = 4; - enc_cfg.rc_max_quantizer = 56; + enc_cfg.rc_min_quantizer = TSK_CLAMP(enc_cfg.rc_min_quantizer, 10, enc_cfg.rc_max_quantizer); + enc_cfg.rc_max_quantizer = TSK_CLAMP(enc_cfg.rc_min_quantizer, 51, enc_cfg.rc_max_quantizer); enc_cfg.rc_resize_allowed = 0; + //enc_cfg.g_profile = 1; enc_flags = 0; //VPX_EFLAG_XXX @@ -99,20 +124,44 @@ static int tdav_codec_vp8_open(tmedia_codec_t* self) vp8->encoder.gop_size = TDAV_VP8_GOP_SIZE_IN_SECONDS * TMEDIA_CODEC_VIDEO(vp8)->out.fps; vp8->encoder.initialized = tsk_true; + //vpx_codec_control(&vp8->encoder.context, VP8E_SET_CPUUSED, 0); + //vpx_codec_control(&vp8->encoder.context, VP8E_SET_SHARPNESS, 7); + //vpx_codec_control(&vp8->encoder.context, VP8E_SET_ENABLEAUTOALTREF, 1); + + + // // Decoder // dec_cfg.w = TMEDIA_CODEC_VIDEO(vp8)->out.width; dec_cfg.h = TMEDIA_CODEC_VIDEO(vp8)->out.height; +#if TDAV_UNDER_WINDOWS + { + SYSTEM_INFO SystemInfo; + GetSystemInfo(&SystemInfo); + dec_cfg.threads = SystemInfo.dwNumberOfProcessors; + } +#else dec_cfg.threads = TDAV_SYSTEM_CORES_COUNT; +#endif + + dec_caps = vpx_codec_get_caps(&vpx_codec_vp8_dx_algo); + if(dec_caps & VPX_CODEC_CAP_POSTPROC){ + dec_flags |= VPX_CODEC_USE_POSTPROC; + } + if(dec_caps & VPX_CODEC_CAP_ERROR_CONCEALMENT){ + dec_flags |= VPX_CODEC_USE_ERROR_CONCEALMENT; + } + if((vpx_ret = vpx_codec_dec_init(&vp8->decoder.context, vp8_interface_dec, &dec_cfg, dec_flags)) != VPX_CODEC_OK){ TSK_DEBUG_ERROR("vpx_codec_dec_init failed with error =%s", vpx_codec_err_to_string(vpx_ret)); return -4; } - vp8->decoder.initialized = tsk_true; - - + if((vpx_ret = vpx_codec_control(&vp8->decoder.context, VP8_SET_POSTPROC, &__pp))){ + TSK_DEBUG_WARN("vpx_codec_dec_init failed with error =%s", vpx_codec_err_to_string(vpx_ret)); + } + vp8->decoder.initialized = tsk_true; return 0; } @@ -162,10 +211,11 @@ static tsk_size_t tdav_codec_vp8_encode(tmedia_codec_t* self, const void* in_dat return 0; } - // flip - if(self->video.flip.encoded){ +#if !HAVE_FFMPEG// convert flip use FFmpeg + if(TMEDIA_CODEC_VIDEO(self)->out.flip){ vpx_img_flip(&image); } +#endif // encode data ++vp8->encoder.pts; @@ -208,8 +258,9 @@ static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_dat tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self; const trtp_rtp_header_t* rtp_hdr = proto_hdr; const uint8_t* pdata = in_data; - tsk_size_t xmax_size, ret = 0; + tsk_size_t ret = 0; uint8_t X, R, N, S, PartID; // |X|R|N|S|PartID| + static tsk_size_t xmax_size = (1920 * 1080 * 3) >> 3; if(!self || !in_data || in_size<1 || !out_data || !vp8->decoder.initialized){ TSK_DEBUG_ERROR("Invalid parameter"); @@ -254,11 +305,10 @@ static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_dat goto bail; } TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num); + vp8->decoder.frame_corrupted = tsk_true; } vp8->decoder.last_seq = rtp_hdr->seq_num; - xmax_size = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1; - // start-accumulator if(!vp8->decoder.accumulator){ if(in_size > xmax_size){ @@ -295,36 +345,55 @@ static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_dat // (vp8->decoder.last_PartID == 0 && vp8->decoder.last_S && S) => previous was "first decodable" and current is new one if(rtp_hdr->marker /*|| (vp8->decoder.last_PartID == 0 && vp8->decoder.last_S)*/){ vpx_image_t *img; - vpx_codec_iter_t iter = tsk_null; - vpx_codec_err_t vpx_ret = vpx_codec_decode(&vp8->decoder.context, vp8->decoder.accumulator, vp8->decoder.accumulator_pos, tsk_null, 0); + vpx_codec_iter_t iter = tsk_null; + vpx_codec_err_t vpx_ret; + tsk_size_t pay_size = vp8->decoder.accumulator_pos; + // in all cases: reset accumulator vp8->decoder.accumulator_pos = 0; + + // libvpx will crash very ofen when the frame is corrupted => for now we decided not to decode such frame + // according to the latest release there is a function to check if the frame + // is corrupted or not => To be checked + if(vp8->decoder.frame_corrupted){ + vp8->decoder.frame_corrupted = tsk_false; + goto bail; + } + + vpx_ret = vpx_codec_decode(&vp8->decoder.context, vp8->decoder.accumulator, pay_size, tsk_null, 0); + if(vpx_ret != VPX_CODEC_OK){ TSK_DEBUG_ERROR("vpx_codec_decode failed with error =%s", vpx_codec_err_to_string(vpx_ret)); goto bail; } - // update sizes - TMEDIA_CODEC_VIDEO(vp8)->in.width = vp8->decoder.context.config.dec->w; - TMEDIA_CODEC_VIDEO(vp8)->in.height = vp8->decoder.context.config.dec->h; - xmax_size = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1; - // allocate destination buffer - if(*out_max_size decoder.accumulator_pos = 0; - *out_max_size = 0; - goto bail; - } - *out_max_size = xmax_size; - } + // copy decoded data ret = 0; while((img = vpx_codec_get_frame(&vp8->decoder.context, &iter))){ unsigned int plane, y; - // flip - if(self->video.flip.decoded){ + + // update sizes + TMEDIA_CODEC_VIDEO(vp8)->in.width = img->d_w; + TMEDIA_CODEC_VIDEO(vp8)->in.height = img->d_h; + xmax_size = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1; + // allocate destination buffer + if(*out_max_size decoder.accumulator_pos = 0; + *out_max_size = 0; + goto bail; + } + *out_max_size = xmax_size; + } + +#if !HAVE_FFMPEG// convert flip use FFmpeg + if(TMEDIA_CODEC_VIDEO(vp8)->in.flip){ vpx_img_flip(img); } +#endif + + // layout picture for(plane=0; plane < 3; plane++) { unsigned char *buf =img->planes[plane]; for(y=0; yd_h >> (plane ? 1 : 0); y++) { @@ -358,11 +427,11 @@ static tsk_bool_t tdav_codec_vp8_fmtp_match(const tmedia_codec_t* codec, const c TSK_DEBUG_ERROR("Failed to match fmtp=%s", fmtp); return tsk_false; } - + TMEDIA_CODEC_VIDEO(codec)->in.width = TMEDIA_CODEC_VIDEO(codec)->out.width = width; TMEDIA_CODEC_VIDEO(codec)->in.height = TMEDIA_CODEC_VIDEO(codec)->out.height = height; TMEDIA_CODEC_VIDEO(codec)->in.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps = fps; - + return tsk_true; } @@ -491,17 +560,30 @@ static void tdav_codec_vp8_encap(tdav_codec_vp8_t* self, const vpx_codec_cx_pkt_ // first partition (contains modes and motion vectors) part_ID = 0; // The first VP8 partition(containing modes and motion vectors) MUST be labeled with PartID = 0 - part_start = tsk_true; part_size = (frame_ptr[2] << 16) | (frame_ptr[1] << 8) | frame_ptr[0]; part_size = (part_size >> 5) & 0x7FFFF; if(part_size > pkt_size){ TSK_DEBUG_ERROR("part_size is > pkt_size(%u,%u)", part_size, pkt_size); return; } + + part_start = tsk_true; + +#if 0 // The first partition could be as big as 10kb for HD 720p video frames => we have to split it tdav_codec_vp8_rtp_callback(self, &frame_ptr[index], part_size, part_ID, part_start, non_ref, (index + part_size)==pkt_size); index += part_size; +#else + // first,first,....partitions (or fragment if part_size > TDAV_VP8_RTP_PAYLOAD_MAX_SIZE) + while(index TDAV_VP8_RTP_PAYLOAD_MAX_SIZE) + part_start = tsk_true; while(index #endif + +static inline tsk_bool_t _tdav_codec_is_supported(tdav_codec_id_t codec, const tmedia_codec_plugin_def_t* plugin); + int tdav_init() { int ret = 0; @@ -181,6 +188,13 @@ int tdav_init() #if HAVE_LIBVPX tmedia_codec_plugin_register(tdav_codec_vp8_plugin_def_t); #endif +#if HAVE_CUDA + if(tdav_codec_h264_cuda_is_supported()){ + tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp10_plugin_def_t); + tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp20_plugin_def_t); + tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp30_plugin_def_t); + } +#endif #if HAVE_FFMPEG tmedia_codec_plugin_register(tdav_codec_mp4ves_plugin_def_t); # if !defined(HAVE_H264) || HAVE_H264 @@ -253,6 +267,9 @@ int tdav_init() #else tmedia_jitterbuffer_plugin_register(tdav_speakup_jitterbuffer_plugin_def_t); #endif +#if TELEPRESENCE + tmedia_jitterbuffer_plugin_register(tdav_video_jitterbuffer_plugin_def_t); +#endif return ret; } @@ -292,8 +309,14 @@ static tdav_codec_decl_t __codecs[] = { { tdav_codec_id_vp8, &tdav_codec_vp8_plugin_def_t }, #endif +#if HAVE_CUDA + // tdav_codec_h264_cuda_is_supported() will be used to check availability at runtime + { tdav_codec_id_h264_bp30, &tdav_codec_h264_cuda_bp30_plugin_def_t }, + { tdav_codec_id_h264_bp20, &tdav_codec_h264_cuda_bp20_plugin_def_t }, + { tdav_codec_id_h264_bp10, &tdav_codec_h264_cuda_bp10_plugin_def_t }, +#endif #if HAVE_FFMPEG -# if !defined(HAVE_H264) || HAVE_H264 +# if (!defined(HAVE_H264) || HAVE_H264) || HAVE_CUDA { tdav_codec_id_h264_bp30, &tdav_codec_h264_bp30_plugin_def_t }, { tdav_codec_id_h264_bp20, &tdav_codec_h264_bp20_plugin_def_t }, { tdav_codec_id_h264_bp10, &tdav_codec_h264_bp10_plugin_def_t }, @@ -343,17 +366,19 @@ void tdav_set_codecs(tdav_codec_id_t codecs) { int i; - for(i=0; i +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ + +/**@file tdav_consumer_video.c + * @brief Base class for all Video consumers. + * + * @author Mamadou Diop + */ +#include "tinydav/video/tdav_consumer_video.h" +#include "tinymedia/tmedia_jitterbuffer.h" +#include "tinyrtp/rtp/trtp_rtp_header.h" + +#include "tsk_debug.h" + +#define TDAV_VIDEO_DEFAULT_WIDTH 176 +#define TDAV_VIDEO_DEFAULT_HEIGHT 144 +#define TDAV_VIDEO_DEFAULT_FPS 15 +#define TDAV_VIDEO_DEFAULT_AUTORESIZE tsk_true + +/** Initialize video consumer */ +int tdav_consumer_video_init(tdav_consumer_video_t* self) +{ + int ret; + + if(!self){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + /* base */ + if((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){ + return ret; + } + + /* self (should be update by prepare() by using the codec's info)*/ + TMEDIA_CONSUMER(self)->video.fps = TDAV_VIDEO_DEFAULT_FPS; + TMEDIA_CONSUMER(self)->video.display.width = TDAV_VIDEO_DEFAULT_WIDTH; + TMEDIA_CONSUMER(self)->video.display.height = TDAV_VIDEO_DEFAULT_HEIGHT; + TMEDIA_CONSUMER(self)->video.display.auto_resize = TDAV_VIDEO_DEFAULT_AUTORESIZE; + + /* self:jitterbuffer */ + if(!self->jitterbuffer && !(self->jitterbuffer = tmedia_jitterbuffer_create(tmedia_video))){ + TSK_DEBUG_WARN("Failed to video create jitter buffer"); + } + if(self->jitterbuffer){ + tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(self->jitterbuffer)); + } + + tsk_safeobj_init(self); + + return 0; +} + +/** +* Generic function to compare two consumers. +* @param consumer1 The first consumer to compare. +* @param consumer2 The second consumer to compare. +* @retval Returns an integral value indicating the relationship between the two consumers: +* <0 : @a consumer1 less than @a consumer2.
+* 0 : @a consumer1 identical to @a consumer2.
+* >0 : @a consumer1 greater than @a consumer2.
+*/ +int tdav_consumer_video_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2) +{ + return (TDAV_CONSUMER_VIDEO(consumer1) - TDAV_CONSUMER_VIDEO(consumer2)); +} + +int tdav_consumer_video_set(tdav_consumer_video_t* self, const tmedia_param_t* param) +{ + if(!self){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + return 0; +} + +int tdav_consumer_video_put(tdav_consumer_video_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr) +{ + const trtp_rtp_header_t* rtp_hdr = TRTP_RTP_HEADER(proto_hdr); + int ret; + + if(!self || !data || !self->jitterbuffer || !rtp_hdr){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + tsk_safeobj_lock(self); + + if(!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){ + uint32_t frame_duration = (1000 / TMEDIA_CONSUMER(self)->video.fps); + uint32_t rate = 90000;//FIXME + if((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate))){ + TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret); + tsk_safeobj_unlock(self); + return ret; + } + } + ret = tmedia_jitterbuffer_put(TMEDIA_JITTER_BUFFER(self->jitterbuffer), (void*)data, data_size, proto_hdr); + + tsk_safeobj_unlock(self); + + return ret; +} + +/* get data drom the jitter buffer (consumers should always have ptime of 20ms) */ +tsk_size_t tdav_consumer_video_get(tdav_consumer_video_t* self, void* out_data, tsk_size_t out_size) +{ + tsk_size_t ret_size = 0; + if(!self && self->jitterbuffer){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0; + } + + tsk_safeobj_lock(self); + + if(!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){ + int ret; + uint32_t frame_duration = (1000 / TMEDIA_CONSUMER(self)->video.fps); + uint32_t rate = 90000;//FIXME + if((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate))){ + TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret); + tsk_safeobj_unlock(self); + return 0; + } + } + ret_size = tmedia_jitterbuffer_get(TMEDIA_JITTER_BUFFER(self->jitterbuffer), out_data, out_size); + + tsk_safeobj_unlock(self); + + + + + return ret_size; +} + +int tdav_consumer_video_tick(tdav_consumer_video_t* self) +{ + if(!self){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0; + } + return tmedia_jitterbuffer_tick(TMEDIA_JITTER_BUFFER(self->jitterbuffer)); +} + +/** Reset jitterbuffer */ +int tdav_consumer_video_reset(tdav_consumer_video_t* self){ + int ret; + if(!self){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + tsk_safeobj_lock(self); + ret = tmedia_jitterbuffer_reset(TMEDIA_JITTER_BUFFER(self->jitterbuffer)); + tsk_safeobj_unlock(self); + + return ret; +} + +/* tsk_safeobj_lock(self); */ +/* tsk_safeobj_unlock(self); */ + +/** DeInitialize video consumer */ +int tdav_consumer_video_deinit(tdav_consumer_video_t* self) +{ + int ret; + + if(!self){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + /* base */ + if((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){ + /* return ret; */ + } + + /* self */ + TSK_OBJECT_SAFE_FREE(self->jitterbuffer); + + tsk_safeobj_deinit(self); + + return 0; +} \ No newline at end of file diff --git a/branches/2.0/doubango/tinyDAV/src/video/tdav_converter_video.c b/branches/2.0/doubango/tinyDAV/src/video/tdav_converter_video.c index 9343994c..6dc8c4b9 100644 --- a/branches/2.0/doubango/tinyDAV/src/video/tdav_converter_video.c +++ b/branches/2.0/doubango/tinyDAV/src/video/tdav_converter_video.c @@ -44,50 +44,51 @@ } \ } +static inline enum PixelFormat _tdav_converter_video_get_pixfmt(tmedia_chroma_t chroma) +{ + switch(chroma){ + case tmedia_chroma_rgb24: + return PIX_FMT_RGB24; + case tmedia_chroma_bgr24: + return PIX_FMT_BGR24; + case tmedia_chroma_rgb32: + return PIX_FMT_RGB32; + case tmedia_chroma_rgb565le: + return PIX_FMT_RGB565LE; + case tmedia_chroma_rgb565be: + return PIX_FMT_RGB565BE; + case tmedia_chroma_nv21: + return PIX_FMT_NV21; + case tmedia_chroma_nv12: + return PIX_FMT_NV12; + case tmedia_chroma_yuv422p: + return PIX_FMT_YUV422P; + case tmedia_chroma_uyvy422: + return PIX_FMT_UYVY422; + case tmedia_chroma_yuv420p: + return PIX_FMT_YUV420P; + default: + TSK_DEBUG_ERROR("Invalid chroma %d", (int)chroma); + return PIX_FMT_NONE; + } +} -tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t chroma, tsk_bool_t toYUV420) +tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tmedia_chroma_t srcChroma, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t dstChroma) { #if HAVE_FFMPEG || HAVE_SWSSCALE tdav_converter_video_t* converter; - enum PixelFormat pixfmt; + enum PixelFormat srcPixfmt, dstPixfmt; TSK_DEBUG_INFO("Creating new Video Converter src=(%dx%d) dst=(%dx%d)", srcWidth, srcHeight, dstWidth, dstHeight); - switch(chroma){ - case tmedia_rgb24: - pixfmt = PIX_FMT_RGB24; - break; - case tmedia_bgr24: - pixfmt = PIX_FMT_BGR24; - break; - case tmedia_rgb32: - pixfmt = PIX_FMT_RGB32; - break; - case tmedia_rgb565le: - pixfmt = PIX_FMT_RGB565LE; - break; - case tmedia_rgb565be: - pixfmt = PIX_FMT_RGB565BE; - break; - case tmedia_nv21: - pixfmt = PIX_FMT_NV21; - break; - case tmedia_nv12: - pixfmt = PIX_FMT_NV12; - break; - case tmedia_yuv422p: - pixfmt = PIX_FMT_YUV422P; - break; - case tmedia_uyvy422: - pixfmt = PIX_FMT_UYVY422; - break; - case tmedia_yuv420p: - pixfmt = PIX_FMT_YUV420P; - break; - default: - TSK_DEBUG_ERROR("Invalid chroma"); - return tsk_null; + if((srcPixfmt = _tdav_converter_video_get_pixfmt(srcChroma)) == PIX_FMT_NONE){ + TSK_DEBUG_ERROR("Invalid source chroma"); + return tsk_null; + } + if((dstPixfmt = _tdav_converter_video_get_pixfmt(dstChroma)) == PIX_FMT_NONE){ + TSK_DEBUG_ERROR("Invalid destination chroma"); + return tsk_null; } if(!(converter = tsk_object_new(tdav_converter_video_def_t))){ @@ -96,8 +97,8 @@ tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_siz } // Set values - converter->toYUV420 = toYUV420; - converter->pixfmt = pixfmt; + converter->srcFormat = srcPixfmt; + converter->dstFormat = dstPixfmt; converter->srcWidth = srcWidth ? srcWidth : dstWidth; converter->srcHeight = srcHeight ? srcHeight : dstHeight; converter->dstWidth = dstWidth ? dstWidth : srcWidth; @@ -113,17 +114,12 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void { #if HAVE_FFMPEG || HAVE_SWSSCALE int ret, size; - enum PixelFormat srcFormat, dstFormat; if(!self || !buffer || !output){ TSK_DEBUG_ERROR("Invalid parameter"); return 0; } - /* Formats */ - srcFormat = self->toYUV420 ? self->pixfmt : PIX_FMT_YUV420P; - dstFormat = self->toYUV420 ? PIX_FMT_YUV420P : self->pixfmt; - /* Pictures */ if(!self->srcFrame){ if(!(self->srcFrame = avcodec_alloc_frame())){ @@ -138,7 +134,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void } } - size = avpicture_get_size(dstFormat, self->dstWidth, self->dstHeight); + size = avpicture_get_size(self->dstFormat, self->dstWidth, self->dstHeight); if((int)*output_max_size srcFrame, (uint8_t*)buffer, srcFormat, self->srcWidth, self->srcHeight); + ret = avpicture_fill((AVPicture *)self->srcFrame, (uint8_t*)buffer, self->srcFormat, self->srcWidth, self->srcHeight); /* Wrap the destination buffer */ - ret = avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, dstFormat, self->dstWidth, self->dstHeight); + ret = avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, self->dstFormat, self->dstWidth, self->dstHeight); /* === performs conversion === */ /* Context */ if(!self->context){ self->context = sws_getContext( - self->srcWidth, self->srcHeight, srcFormat, - self->dstWidth, self->dstHeight, dstFormat, + self->srcWidth, self->srcHeight, self->srcFormat, + self->dstWidth, self->dstHeight, self->dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL); if(!self->context){ @@ -166,6 +162,11 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void return 0; } } + // flip + if(self->flip){ + tdav_converter_video_flip(self->srcFrame, self->srcHeight); + } + // chroma conversion ret = sws_scale(self->context, (const uint8_t* const*)self->srcFrame->data, self->srcFrame->linesize, 0, self->srcHeight, self->dstFrame->data, self->dstFrame->linesize); @@ -175,7 +176,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void } // Rotation - if(self->rotation && (PIX_FMT_YUV420P == dstFormat) && self->rotation==90/*FIXME: For now only 90° rotation is supported */){ + if(self->rotation && (PIX_FMT_YUV420P == self->dstFormat) && self->rotation==90/*FIXME: For now only 90° rotation is supported */){ // because we rotated 90 width = original height, height = original width int w = self->dstHeight; int h = self->dstWidth; @@ -189,7 +190,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void // allocate rotation temporary buffer if(!self->rot.buffer){ - int buff_size = avpicture_get_size(dstFormat, w, h); + int buff_size = avpicture_get_size(self->dstFormat, w, h); if (!(self->rot.buffer = (uint8_t *)av_malloc(buff_size))){ TSK_DEBUG_ERROR("failed to allocate new buffer for the frame"); TSK_FREE(*output); @@ -198,7 +199,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void } //wrap - avpicture_fill((AVPicture *)self->rot.frame, self->rot.buffer, dstFormat, w, h); + avpicture_fill((AVPicture *)self->rot.frame, self->rot.buffer, self->dstFormat, w, h); // rotate rotate90(self->dstWidth, self->dstHeight, self->dstFrame->data[0], self->rot.frame->data[0]); rotate90(self->dstWidth/2, self->dstHeight/2, self->dstFrame->data[1], self->rot.frame->data[1]); @@ -220,14 +221,14 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void top_band = (r_h-self->dstHeight)/3; if(!self->rot.context){ - if(!(self->rot.context = sws_getContext(w, h, dstFormat, r_w, r_h, dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){ + if(!(self->rot.context = sws_getContext(w, h, self->dstFormat, r_w, r_h, self->dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){ TSK_DEBUG_ERROR("Failed to create context"); TSK_FREE(*output); return 0; } } - r_size = avpicture_get_size(dstFormat, r_w, r_h); + r_size = avpicture_get_size(self->dstFormat, r_w, r_h); if((int)*output_max_size dstFrame, (uint8_t*)*output, dstFormat, r_w, r_h); + avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, self->dstFormat, r_w, r_h); // pad sws_scale(self->rot.context, (const uint8_t* const*)self->rot.frame->data, self->rot.frame->linesize, @@ -249,15 +250,15 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void self->dstFrame->data[1] = self->dstFrame->data[1] + ((top_band >> y_shift) * self->dstFrame->linesize[1]) + (left_band >> x_shift); self->dstFrame->data[2] = self->dstFrame->data[2] + ((top_band >> y_shift) * self->dstFrame->linesize[2]) + (left_band >> x_shift); - avpicture_layout((const AVPicture*)self->dstFrame, dstFormat, self->dstWidth, self->dstHeight, *output, *output_max_size); + avpicture_layout((const AVPicture*)self->dstFrame, self->dstFormat, self->dstWidth, self->dstHeight, *output, *output_max_size); } -#else // Crash +#else // Context if(!self->rot.context){ - if(!(self->rot.context = sws_getContext(w, h, dstFormat, h, w, dstFormat, SWS_BICUBIC, NULL, NULL, NULL))){ + if(!(self->rot.context = sws_getContext(w, h,self->dstFormat, h, w, self->dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){ TSK_DEBUG_ERROR("Failed to create context"); TSK_FREE(*output); return 0; diff --git a/branches/2.0/doubango/tinyDAV/src/video/tdav_session_video.c b/branches/2.0/doubango/tinyDAV/src/video/tdav_session_video.c index d986dfe5..3c92b4b0 100644 --- a/branches/2.0/doubango/tinyDAV/src/video/tdav_session_video.c +++ b/branches/2.0/doubango/tinyDAV/src/video/tdav_session_video.c @@ -25,7 +25,6 @@ * * @author Mamadou Diop * - */ #include "tinydav/video/tdav_session_video.h" @@ -81,16 +80,17 @@ static int tdav_session_video_rtp_cb(const void* callback_data, const struct trt } // Convert decoded data to the consumer chroma and size -#define CONSUMER_INSIZE_CHANGED ((session->consumer->video.in.width * session->consumer->video.in.height * 3)/2 != out_size)// we have good reasons not to use 1.5f -#define CONSUMER_DISPLAY_NEED_RESIZE (session->consumer->video.in.width != session->consumer->video.display.width || session->consumer->video.in.height != session->consumer->video.display.height) -#define CONSUMER_DECODED_HAS_DIFF_SIZE (session->consumer->video.display.width != TMEDIA_CODEC_VIDEO(codec)->in.width || session->consumer->video.display.height != TMEDIA_CODEC_VIDEO(codec)->in.height) -#define CONSUMER_DISPLAY_NEED_CHROMACHANGE (session->consumer->video.display.chroma != tmedia_yuv420p) +#define CONSUMER_INSIZE_MISMATCH ((session->consumer->video.in.width * session->consumer->video.in.height * 3)>>1 != out_size)// we have good reasons not to use 1.5f +#define CONSUMER_IN_N_DISPLAY_MISMATCH (session->consumer->video.in.width != session->consumer->video.display.width || session->consumer->video.in.height != session->consumer->video.display.height) +#define CONSUMER_DISPLAY_N_CODEC_MISMATCH (session->consumer->video.display.width != TMEDIA_CODEC_VIDEO(codec)->in.width || session->consumer->video.display.height != TMEDIA_CODEC_VIDEO(codec)->in.height) +#define CONSUMER_DISPLAY_N_CONVERTER_MISMATCH ( (session->conv.fromYUV420 && session->conv.fromYUV420->dstWidth != session->consumer->video.display.width) || (session->conv.fromYUV420 && session->conv.fromYUV420->dstHeight != session->consumer->video.display.height) ) +#define CONSUMER_CHROMA_MISMATCH (session->consumer->video.display.chroma != TMEDIA_CODEC_VIDEO(codec)->in.chroma) +#define DECODED_NEED_FLIP (TMEDIA_CODEC_VIDEO(codec)->in.flip) - if((CONSUMER_DISPLAY_NEED_CHROMACHANGE || CONSUMER_DECODED_HAS_DIFF_SIZE || CONSUMER_DISPLAY_NEED_RESIZE || CONSUMER_INSIZE_CHANGED)){ - tsk_size_t _output_size; + if((CONSUMER_CHROMA_MISMATCH || CONSUMER_DISPLAY_N_CODEC_MISMATCH || CONSUMER_IN_N_DISPLAY_MISMATCH || CONSUMER_INSIZE_MISMATCH || CONSUMER_DISPLAY_N_CONVERTER_MISMATCH || DECODED_NEED_FLIP)){ // Create video converter if not already done - if(!session->conv.fromYUV420 || CONSUMER_DECODED_HAS_DIFF_SIZE || CONSUMER_INSIZE_CHANGED){ + if(!session->conv.fromYUV420 || CONSUMER_DISPLAY_N_CONVERTER_MISMATCH || CONSUMER_INSIZE_MISMATCH){ TSK_OBJECT_SAFE_FREE(session->conv.fromYUV420); // update in (set by the codec) session->consumer->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width;//decoded width @@ -102,22 +102,27 @@ static int tdav_session_video_rtp_cb(const void* callback_data, const struct trt session->consumer->video.display.height = session->consumer->video.in.height; } // create converter - if(!(session->conv.fromYUV420 = tdav_converter_video_create(TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, session->consumer->video.display.width, session->consumer->video.display.height, - session->consumer->video.display.chroma, tsk_false))){ + if(!(session->conv.fromYUV420 = tdav_converter_video_create(TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, TMEDIA_CODEC_VIDEO(codec)->in.chroma, session->consumer->video.display.width, session->consumer->video.display.height, + session->consumer->video.display.chroma))){ TSK_DEBUG_ERROR("Failed to create video converter"); ret = -3; goto bail; } } + } + + if(session->conv.fromYUV420){ + // update one-shot parameters + tdav_converter_video_init(session->conv.fromYUV420, 0/*rotation*/, TMEDIA_CODEC_VIDEO(codec)->in.flip); // convert data to the consumer's chroma - _output_size = tdav_converter_video_convert(session->conv.fromYUV420, session->decoder.buffer, &session->decoder.conv_buffer, &session->decoder.conv_buffer_size); - if(!_output_size || !session->decoder.conv_buffer){ + out_size = tdav_converter_video_convert(session->conv.fromYUV420, session->decoder.buffer, &session->decoder.conv_buffer, &session->decoder.conv_buffer_size); + if(!out_size || !session->decoder.conv_buffer){ TSK_DEBUG_ERROR("Failed to convert YUV420 buffer to consumer's chroma"); ret = -4; goto bail; } - tmedia_consumer_consume(session->consumer, session->decoder.conv_buffer, _output_size, packet->header); + tmedia_consumer_consume(session->consumer, session->decoder.conv_buffer, out_size, packet->header); if(!session->decoder.conv_buffer){ /* taken by the consumer */ session->decoder.conv_buffer_size = 0; @@ -189,8 +194,9 @@ static int tdav_session_video_producer_enc_cb(const void* callback_data, const v #define PRODUCER_SIZE_CHANGED (session->conv.producerWidth != session->producer->video.width) || (session->conv.producerHeight != session->producer->video.height) \ || (session->conv.xProducerSize != size) +#define ENCODED_NEED_FLIP TMEDIA_CODEC_VIDEO(codec)->out.flip // Video codecs only accept YUV420P buffers ==> do conversion if needed or producer doesn't have the right size - if((session->producer->video.chroma != tmedia_yuv420p) || PRODUCER_SIZE_CHANGED){ + if((session->producer->video.chroma != TMEDIA_CODEC_VIDEO(codec)->out.chroma) || PRODUCER_SIZE_CHANGED || ENCODED_NEED_FLIP){ // Create video converter if not already done or producer size has changed if(!session->conv.toYUV420 || PRODUCER_SIZE_CHANGED){ TSK_OBJECT_SAFE_FREE(session->conv.toYUV420); @@ -198,15 +204,18 @@ static int tdav_session_video_producer_enc_cb(const void* callback_data, const v session->conv.producerHeight = session->producer->video.height; session->conv.xProducerSize = size; - if(!(session->conv.toYUV420 = tdav_converter_video_create(session->producer->video.width, session->producer->video.height, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height, - session->producer->video.chroma, tsk_true))){ + if(!(session->conv.toYUV420 = tdav_converter_video_create(session->producer->video.width, session->producer->video.height, session->producer->video.chroma, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height, + TMEDIA_CODEC_VIDEO(codec)->out.chroma))){ TSK_DEBUG_ERROR("Failed to create video converter"); ret = -5; goto bail; } } + } + + if(session->conv.toYUV420){ // update one-shot parameters - tdav_converter_video_init(session->conv.toYUV420, session->producer->video.rotation); + tdav_converter_video_init(session->conv.toYUV420, session->producer->video.rotation, TMEDIA_CODEC_VIDEO(codec)->out.flip); // convert data to yuv420p yuv420p_size = tdav_converter_video_convert(session->conv.toYUV420, buffer, &session->encoder.conv_buffer, &session->encoder.conv_buffer_size); if(!yuv420p_size || !session->encoder.conv_buffer){ @@ -267,7 +276,7 @@ int tmedia_session_video_set(tmedia_session_t* self, const tmedia_param_t* param tsk_bool_t flip = (tsk_bool_t)TSK_TO_INT32((uint8_t*)param->value); tmedia_codecs_L_t *codecs = tsk_object_ref(self->codecs); tsk_list_foreach(item, codecs){ - ((tmedia_codec_t*)item->data)->video.flip.decoded = flip; + TMEDIA_CODEC_VIDEO(item->data)->in.flip = flip; } tsk_object_unref(codecs); } @@ -285,7 +294,7 @@ int tmedia_session_video_set(tmedia_session_t* self, const tmedia_param_t* param tsk_bool_t flip = (tsk_bool_t)TSK_TO_INT32((uint8_t*)param->value); tmedia_codecs_L_t *codecs = tsk_object_ref(self->codecs); tsk_list_foreach(item, codecs){ - ((tmedia_codec_t*)item->data)->video.flip.encoded = flip; + TMEDIA_CODEC_VIDEO(item->data)->out.flip = flip; } tsk_object_unref(codecs); } diff --git a/branches/2.0/doubango/tinyDAV/src/video/tdav_video_jitterbuffer.c b/branches/2.0/doubango/tinyDAV/src/video/tdav_video_jitterbuffer.c new file mode 100644 index 00000000..4e53c6e9 --- /dev/null +++ b/branches/2.0/doubango/tinyDAV/src/video/tdav_video_jitterbuffer.c @@ -0,0 +1,296 @@ +/* +* Copyright (C) 2011 Doubango Telecom +* +* Contact: Mamadou Diop +* +* This file is part of Open Source Doubango Framework. +* +* DOUBANGO is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* DOUBANGO is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with DOUBANGO. +* +*/ + +/**@file tdav_video_jitterbuffer.c + * @brief Video Jitter Buffer + * + * @author Mamadou Diop + */ +#include "tinydav/video/tdav_video_jitterbuffer.h" + +#include "tinyrtp/rtp/trtp_rtp_header.h" + +#include "tsk_debug.h" + +#define TDAV_VIDEO_JB_TAIL 1000 // in milliseconds + +// Internal functions +static tdav_video_jitterbuffer_packet_t* _tdav_video_jitterbuffer_packet_create(void* data, tsk_size_t data_size); + + + +static int tdav_video_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param) +{ + TSK_DEBUG_ERROR("Not implemented"); + return -2; +} + +static int tdav_video_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate) +{ + tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self; + + if(!jb->packets && !(jb->packets = tsk_list_create())){ + TSK_DEBUG_ERROR("Failed to create list"); + return -2; + } + if(!jb->timer && !(jb->timer = tsk_timer_manager_create())){ + TSK_DEBUG_ERROR("Failed to create list"); + return -3; + } + + jb->frame_duration = frame_duration; + jb->frame_max_count = (jb->tail / jb->frame_duration); + + return 0; +} + +static int tdav_video_jitterbuffer_tick(tmedia_jitterbuffer_t* self) +{ + tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self; + + + // do nothing + // only useful for audio + + return 0; +} + +static int tdav_video_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr) +{ + tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self; + const trtp_rtp_header_t* rtp_hdr; + register uint32_t i = 0; + tsk_bool_t item_found = tsk_false; + const tsk_list_item_t *item; + tdav_video_jitterbuffer_packet_t *jb_packet = tsk_null; + int ret = 0; + + if(!data || !data_size || !proto_hdr){ + TSK_DEBUG_ERROR("Invalid parameter"); + return -1; + } + + rtp_hdr = TRTP_RTP_HEADER(proto_hdr); + + tsk_safeobj_lock(jb); + + tsk_list_foreach(item, jb->packets){ + if(i++ == jb->frame_curr_index){ + if((jb_packet = (tdav_video_jitterbuffer_packet_t*)item->data)){ + if((ret = tsk_buffer_copy(jb_packet->data, 0, data, data_size))){ + TSK_DEBUG_ERROR("Failed to copy buffer"); + goto done; + } + } + else{ + TSK_DEBUG_ERROR("Item contains null data"); + goto done; + } + // update values + jb_packet->taken = tsk_false; + jb_packet->seq_num = rtp_hdr->seq_num; + item_found = tsk_true; + } + } + + if(!item_found && !jb_packet){ + if((jb_packet = _tdav_video_jitterbuffer_packet_create(data, data_size))){ + // update values (use constructor) + jb_packet->seq_num = rtp_hdr->seq_num; + + tsk_list_push_ascending_data(jb->packets, (void**)&jb_packet); + TSK_OBJECT_SAFE_FREE(jb_packet); + } + else{ + TSK_DEBUG_ERROR("Failed to create jb packet"); + ret = -2; + goto done; + } + } + +done: + if(ret == 0){ + jb->frame_curr_index = (++jb->frame_curr_index % jb->frame_max_count); + } + tsk_safeobj_unlock(jb); + + return ret; +} + +static tsk_size_t tdav_video_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size) +{ + tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self; + const tsk_list_item_t *item; + tdav_video_jitterbuffer_packet_t *jb_packet = tsk_null; + tsk_size_t ret_size = 0; + + if(!out_data || !out_size){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0; + } + + tsk_safeobj_lock(jb); + + tsk_list_foreach(item, jb->packets){ + if((jb_packet = (tdav_video_jitterbuffer_packet_t*)item->data) && !jb_packet->taken){ + ret_size = TSK_MIN(TSK_BUFFER_SIZE(jb_packet->data), out_size); + memcpy(out_data, TSK_BUFFER_DATA(jb_packet->data), ret_size); + jb_packet->taken = tsk_true; + break; + } + } + + tsk_safeobj_unlock(jb); + + return ret_size; +} + +static int tdav_video_jitterbuffer_reset(tmedia_jitterbuffer_t* self) +{ + tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self; + + + return 0; +} + +static int tdav_video_jitterbuffer_close(tmedia_jitterbuffer_t* self) +{ + tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self; + + + return 0; +} + + +// +// Speex jitterbuffer Packet Object definition +// +static tdav_video_jitterbuffer_packet_t* _tdav_video_jitterbuffer_packet_create(void* data, tsk_size_t data_size) +{ + tdav_video_jitterbuffer_packet_t *jb_packet; + if((jb_packet = tsk_object_new(tdav_video_jitterbuffer_packet_def_t))){ + jb_packet->data = tsk_buffer_create(data, data_size); + } + return jb_packet; +} + +/* constructor */ +static tsk_object_t* tdav_video_jitterbuffer_packet_ctor(tsk_object_t * self, va_list * app) +{ + tdav_video_jitterbuffer_packet_t *jb_packet = self; + if(jb_packet){ + TSK_OBJECT_SAFE_FREE(jb_packet->data); + tsk_safeobj_init(jb_packet); + } + return self; +} + +/* destructor */ +static tsk_object_t* tdav_video_jitterbuffer_packet_dtor(tsk_object_t * self) +{ + tdav_video_jitterbuffer_packet_t *jb_packet = self; + if(jb_packet){ + tsk_safeobj_deinit(jb_packet); + } + + return self; +} +/* comparator */ +static int tdav_video_jitterbuffer_packet_cmp(const tsk_object_t *_p1, const tsk_object_t *_p2) +{ + const tdav_video_jitterbuffer_packet_t *p1 = _p1; + const tdav_video_jitterbuffer_packet_t *p2 = _p2; + + if(p1 && p2){ + return (int)(p1->seq_num - p2->seq_num); + } + else if(!p1 && !p2) return 0; + else return -1; +} +/* object definition */ +static const tsk_object_def_t tdav_video_jitterbuffer_packet_def_s = +{ + sizeof(tdav_video_jitterbuffer_packet_t), + tdav_video_jitterbuffer_packet_ctor, + tdav_video_jitterbuffer_packet_dtor, + tdav_video_jitterbuffer_packet_cmp, +}; +const tsk_object_def_t *tdav_video_jitterbuffer_packet_def_t = &tdav_video_jitterbuffer_packet_def_s; + +// +// Speex jitterbuffer Plugin definition +// + +/* constructor */ +static tsk_object_t* tdav_video_jitterbuffer_ctor(tsk_object_t * self, va_list * app) +{ + tdav_video_jitterbuffer_t *jb = self; + if(jb){ + /* init base */ + tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jb)); + /* init self */ + tsk_safeobj_init(jb); + jb->tail = TDAV_VIDEO_JB_TAIL; + } + return self; +} +/* destructor */ +static tsk_object_t* tdav_video_jitterbuffer_dtor(tsk_object_t * self) +{ + tdav_video_jitterbuffer_t *jb = self; + if(jb){ + /* deinit base */ + tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jb)); + /* deinit self */ + TSK_OBJECT_SAFE_FREE(jb->packets); + if(jb->timer){ + tsk_timer_manager_destroy(&jb->timer); + } + tsk_safeobj_deinit(jb); + } + + return self; +} +/* object definition */ +static const tsk_object_def_t tdav_video_jitterbuffer_def_s = +{ + sizeof(tdav_video_jitterbuffer_t), + tdav_video_jitterbuffer_ctor, + tdav_video_jitterbuffer_dtor, + tsk_null, +}; +/* plugin definition*/ +static const tmedia_jitterbuffer_plugin_def_t tdav_video_jitterbuffer_plugin_def_s = +{ + &tdav_video_jitterbuffer_def_s, + tmedia_video, + "Native Video JitterBuffer", + + tdav_video_jitterbuffer_set, + tdav_video_jitterbuffer_open, + tdav_video_jitterbuffer_tick, + tdav_video_jitterbuffer_put, + tdav_video_jitterbuffer_get, + tdav_video_jitterbuffer_reset, + tdav_video_jitterbuffer_close, +}; +const tmedia_jitterbuffer_plugin_def_t *tdav_video_jitterbuffer_plugin_def_t = &tdav_video_jitterbuffer_plugin_def_s; diff --git a/branches/2.0/doubango/tinyDAV/test/test_sessions.h b/branches/2.0/doubango/tinyDAV/test/test_sessions.h index d87e6af1..e4740ae6 100644 --- a/branches/2.0/doubango/tinyDAV/test/test_sessions.h +++ b/branches/2.0/doubango/tinyDAV/test/test_sessions.h @@ -54,7 +54,7 @@ "a=rtpmap:115 BV16/8000\r\n" \ "a=fmtp:102 octet-align=0; mode-set=0,1,2,3,4,5,6,7; mode-change-period=1; mode-change-capability=2; mode-change-neighbor=0\r\n" \ "a=fmtp:103 octet-align=1; mode-set=0,1,2,3,4,5,6,7; mode-change-period=1; mode-change-capability=2; mode-change-neighbor=0\r\n" \ - "m=video 6060 RTP/AVP 125 111 98 121 31 126 34 32\r\n" \ + "m=video 6060 RTP/AVP 125 98 111 121 31 126 34 32\r\n" \ "i=Video line\r\n" \ "b=A-YZ:92\r\n" \ "b=B-YZ:256\r\n" \ diff --git a/branches/2.0/doubango/tinyDAV/tinyDAV.vcproj b/branches/2.0/doubango/tinyDAV/tinyDAV.vcproj index c3ff4366..6fcfc9cc 100644 --- a/branches/2.0/doubango/tinyDAV/tinyDAV.vcproj +++ b/branches/2.0/doubango/tinyDAV/tinyDAV.vcproj @@ -41,8 +41,8 @@ + + + + @@ -450,6 +458,10 @@ + + @@ -462,6 +474,10 @@ RelativePath=".\include\tinydav\video\tdav_session_video.h" > + + @@ -573,6 +589,10 @@ RelativePath=".\src\codecs\h264\tdav_codec_h264.c" > + + @@ -758,6 +778,10 @@ + + @@ -770,6 +794,10 @@ RelativePath=".\src\video\tdav_session_video.c" > + + diff --git a/branches/2.0/doubango/tinyDSHOW/include/tinydshow/DSCaptureGraph.h b/branches/2.0/doubango/tinyDSHOW/include/tinydshow/DSCaptureGraph.h index 51e61796..e8593cd5 100644 --- a/branches/2.0/doubango/tinyDSHOW/include/tinydshow/DSCaptureGraph.h +++ b/branches/2.0/doubango/tinyDSHOW/include/tinydshow/DSCaptureGraph.h @@ -83,7 +83,6 @@ private: #ifdef _WIN32_WCE IBaseFilter *colorConvertor565; //http://msdn.microsoft.com/en-us/library/aa926076.aspx #else - IBaseFilter *decompressorFilter; DSFrameRateFilter *frameRateFilter; #endif diff --git a/branches/2.0/doubango/tinyDSHOW/src/DSCaptureGraph.cxx b/branches/2.0/doubango/tinyDSHOW/src/DSCaptureGraph.cxx index 478dc460..065f1436 100644 --- a/branches/2.0/doubango/tinyDSHOW/src/DSCaptureGraph.cxx +++ b/branches/2.0/doubango/tinyDSHOW/src/DSCaptureGraph.cxx @@ -48,7 +48,6 @@ DSCaptureGraph::DSCaptureGraph(ISampleGrabberCB* callback, HRESULT *hr) this->colorConvertor565 = NULL; #else this->frameRateFilter = NULL; - this->decompressorFilter = NULL; #endif this->nullRendererFilter = NULL; @@ -75,7 +74,6 @@ DSCaptureGraph::~DSCaptureGraph() #ifdef _WIN32_WCE SAFE_RELEASE(this->colorConvertor565); #else - SAFE_RELEASE(this->decompressorFilter); #endif SAFE_RELEASE(this->nullRendererFilter); @@ -212,8 +210,9 @@ HRESULT DSCaptureGraph::connect() hr = ConnectFilters(this->graphBuilder, this->colorConvertor565, this->sampleGrabberFilter) ; if(FAILED(hr))return hr; hr = ConnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); if(FAILED(hr))return hr; #else - hr = ConnectFilters(this->graphBuilder, this->sourceFilter, this->decompressorFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; } - hr = ConnectFilters(this->graphBuilder, this->decompressorFilter, this->frameRateFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; } + // No convertor needed + // AVI Decompressor Filter is automatically by the Filter Graph Manager when needed + hr = ConnectFilters(this->graphBuilder, this->sourceFilter, this->frameRateFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; } hr = ConnectFilters(this->graphBuilder, this->frameRateFilter, this->sampleGrabberFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; } hr = ConnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; } #endif @@ -255,8 +254,7 @@ HRESULT DSCaptureGraph::disconnect() hr = DisconnectFilters(this->graphBuilder, this->colorConvertor565, this->sampleGrabberFilter); if(FAILED(hr))return hr; hr = DisconnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); if(FAILED(hr))return hr; #else - hr = DisconnectFilters(this->graphBuilder, this->sourceFilter, this->decompressorFilter); - hr = DisconnectFilters(this->graphBuilder, this->decompressorFilter, this->frameRateFilter); + hr = DisconnectFilters(this->graphBuilder, this->sourceFilter, this->frameRateFilter); hr = DisconnectFilters(this->graphBuilder, this->frameRateFilter, this->sampleGrabberFilter); hr = DisconnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); #endif @@ -375,10 +373,6 @@ HRESULT DSCaptureGraph::createCaptureGraph() hr = COCREATE(CLSID_SampleGrabber, IID_IBaseFilter, this->sampleGrabberFilter); if(FAILED(hr)) return hr; - // Create the AVI decoder filter - hr = COCREATE(CLSID_AVIDec, IID_IBaseFilter, this->decompressorFilter); - if(FAILED(hr)) return hr; - // Create tdshow filter LPUNKNOWN pUnk = NULL; this->frameRateFilter = new DSFrameRateFilter(FILTER_FRAMERATE, pUnk, &hr); @@ -400,22 +394,10 @@ HRESULT DSCaptureGraph::createCaptureGraph() hr = this->graphBuilder->AddFilter(this->frameRateFilter, FILTER_FRAMERATE); if(FAILED(hr)) return hr; - // Add AVIDec to the graph - hr = this->graphBuilder->AddFilter(this->decompressorFilter, FILTER_AVI_DECOMPRESSOR); - if(FAILED(hr)) return hr; - // Find media control hr = QUERY(this->graphBuilder, IID_IMediaControl, this->mediaController); if(FAILED(hr)) return hr; - // Disable timing - /*IMediaFilter *mediaFilterController; - hr = QUERY(this->graphBuilder, IID_IMediaFilter, mediaFilterController); - if(FAILED(hr)) return hr; - - mediaFilterController->SetSyncSource(NULL); - SAFE_RELEASE(mediaFilterController);*/ - // Create the sample grabber hr = QUERY(this->sampleGrabberFilter, IID_ISampleGrabber, this->grabberController); if(FAILED(hr)) return hr; diff --git a/branches/2.0/doubango/tinyDSHOW/src/DSGrabber.cxx b/branches/2.0/doubango/tinyDSHOW/src/DSGrabber.cxx index cfae87c0..967bf145 100644 --- a/branches/2.0/doubango/tinyDSHOW/src/DSGrabber.cxx +++ b/branches/2.0/doubango/tinyDSHOW/src/DSGrabber.cxx @@ -50,8 +50,8 @@ DSGrabber::DSGrabber(HRESULT *hr) // Init the bitmap info header with default values memset(&(this->bitmapInfo), 0, sizeof(BITMAPINFOHEADER)); this->bitmapInfo.biSize = sizeof(BITMAPINFOHEADER); - this->bitmapInfo.biWidth = 176; - this->bitmapInfo.biHeight = 144; + this->bitmapInfo.biWidth = 352; + this->bitmapInfo.biHeight = 288; this->bitmapInfo.biPlanes = 1; this->bitmapInfo.biBitCount = 24; this->bitmapInfo.biCompression = 0; @@ -89,7 +89,9 @@ void DSGrabber::start() if (!this->graph->isRunning()){ first_buffer = true; - this->preview->start(); + if(this->preview){ + this->preview->start(); + } this->graph->connect(); this->graph->start(); } @@ -97,7 +99,9 @@ void DSGrabber::start() void DSGrabber::stop() { if (this->graph->isRunning()){ - this->preview->stop(); + if(this->preview){ + this->preview->stop(); + } this->graph->stop(); this->graph->disconnect(); } @@ -140,8 +144,10 @@ bool DSGrabber::setCaptureParameters(int w, int h, int f) // Setup source filter in the graph HRESULT hr = this->graph->setParameters(fmt, this->fps); // Set preview parameters - this->preview->setFps(this->fps); - this->preview->setSize(this->width, this->height); + if(this->preview){ + this->preview->setFps(this->fps); + this->preview->setSize(this->width, this->height); + } tsk_mutex_unlock(this->mutex_buffer); diff --git a/branches/2.0/doubango/tinyDSHOW/src/DSOutputStream.cxx b/branches/2.0/doubango/tinyDSHOW/src/DSOutputStream.cxx index 0e6ca5af..6dbab403 100644 --- a/branches/2.0/doubango/tinyDSHOW/src/DSOutputStream.cxx +++ b/branches/2.0/doubango/tinyDSHOW/src/DSOutputStream.cxx @@ -50,8 +50,8 @@ DSOutputStream::DSOutputStream(HRESULT *phr, DSOutputFilter *pParent, LPCWSTR pP this->frameLength = (1000)/DEFAULT_FPS; this->fps = DEFAULT_FPS; - this->width = 176; - this->height = 144; + this->width = 352; + this->height = 288; this->overlay = false; diff --git a/branches/2.0/doubango/tinyDSHOW/src/plugin/DSConsumer.cxx b/branches/2.0/doubango/tinyDSHOW/src/plugin/DSConsumer.cxx index 3000ac29..259cc1ca 100644 --- a/branches/2.0/doubango/tinyDSHOW/src/plugin/DSConsumer.cxx +++ b/branches/2.0/doubango/tinyDSHOW/src/plugin/DSConsumer.cxx @@ -214,7 +214,7 @@ static tsk_object_t* tdshow_consumer_ctor(tsk_object_t * self, va_list * app) /* init base */ tmedia_consumer_init(TMEDIA_CONSUMER(consumer)); - TMEDIA_CONSUMER(consumer)->video.display.chroma = tmedia_bgr24; // RGB24 on x86 (little endians) stored as BGR24 + TMEDIA_CONSUMER(consumer)->video.display.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24 /* init self */ TMEDIA_CONSUMER(consumer)->video.fps = 15; diff --git a/branches/2.0/doubango/tinyDSHOW/src/plugin/DSProducer.cxx b/branches/2.0/doubango/tinyDSHOW/src/plugin/DSProducer.cxx index 754133b1..bfb4d048 100644 --- a/branches/2.0/doubango/tinyDSHOW/src/plugin/DSProducer.cxx +++ b/branches/2.0/doubango/tinyDSHOW/src/plugin/DSProducer.cxx @@ -88,7 +88,6 @@ int tdshow_producer_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec } TMEDIA_PRODUCER(producer)->video.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps; - // FIXME TMEDIA_PRODUCER(producer)->video.width = TMEDIA_CODEC_VIDEO(codec)->out.width; TMEDIA_PRODUCER(producer)->video.height = TMEDIA_CODEC_VIDEO(codec)->out.height; @@ -207,11 +206,11 @@ static tsk_object_t* tdshow_producer_ctor(tsk_object_t * self, va_list * app) /* init base */ tmedia_producer_init(TMEDIA_PRODUCER(producer)); - TMEDIA_PRODUCER(producer)->video.chroma = tmedia_bgr24; // RGB24 on x86 (little endians) stored as BGR24 + TMEDIA_PRODUCER(producer)->video.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24 /* init self with default values*/ TMEDIA_PRODUCER(producer)->video.fps = 15; - TMEDIA_PRODUCER(producer)->video.width = 176; - TMEDIA_PRODUCER(producer)->video.height = 144; + TMEDIA_PRODUCER(producer)->video.width = 352; + TMEDIA_PRODUCER(producer)->video.height = 288; if(IsMainThread()){ producer->grabber = new DSGrabber(&hr); diff --git a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_codec.h b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_codec.h index 5391a4f2..3826adca 100644 --- a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_codec.h +++ b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_codec.h @@ -147,17 +147,6 @@ typedef struct tmedia_codec_s //! the negociated format (only useful for codecs with dyn. payload type) char* neg_format; - struct { - unsigned __FIXME__:1; - } audio; - - struct { - struct { - unsigned encoded:1; - unsigned decoded:1; - } flip; - } video; - //! plugin used to create the codec const struct tmedia_codec_plugin_def_s* plugin; } @@ -272,6 +261,8 @@ typedef struct tmedia_codec_video_s unsigned fps; unsigned max_br; unsigned max_mbps; + tmedia_chroma_t chroma; + tsk_bool_t flip; }in;// decoded struct{ unsigned width; @@ -279,6 +270,8 @@ typedef struct tmedia_codec_video_s unsigned fps; unsigned max_br; unsigned max_mbps; + tmedia_chroma_t chroma; + tsk_bool_t flip; }out;// encoded diff --git a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_common.h b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_common.h index 75606b25..0fcb02d0 100644 --- a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_common.h +++ b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_common.h @@ -89,16 +89,17 @@ tmedia_video_size_t; // used by tinyWRAP typedef enum tmedia_chroma_e { - tmedia_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7 - tmedia_bgr24, // used by windows consumer (DirectShow) - - tmedia_rgb32, // used by iOS4 consumer (iPhone and iPod touch) - tmedia_rgb565le, // (used by both android and wince consumers) - tmedia_rgb565be, - tmedia_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4) - tmedia_nv21, // Yuv420 SP (used by android producer) - tmedia_yuv422p, - tmedia_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G) - tmedia_yuv420p, // Default + tmedia_chroma_none=0, + tmedia_chroma_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7 + tmedia_chroma_bgr24, // used by windows consumer (DirectShow) - + tmedia_chroma_rgb32, // used by iOS4 consumer (iPhone and iPod touch) + tmedia_chroma_rgb565le, // (used by both android and wince consumers) + tmedia_chroma_rgb565be, + tmedia_chroma_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4) + tmedia_chroma_nv21, // Yuv420 SP (used by android producer) + tmedia_chroma_yuv422p, + tmedia_chroma_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G) + tmedia_chroma_yuv420p, // Default } tmedia_chroma_t; diff --git a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_consumer.h b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_consumer.h index f62d1ffb..d072ff71 100644 --- a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_consumer.h +++ b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_consumer.h @@ -44,13 +44,17 @@ TMEDIA_BEGIN_DECLS #define TMEDIA_CONSUMER_PTIME_DEFAULT 20 /**Max number of plugins (consumer types) we can create */ -#define TMED_CONSUMER_MAX_PLUGINS 0x0F +#if !defined(TMED_CONSUMER_MAX_PLUGINS) +# define TMED_CONSUMER_MAX_PLUGINS 0x0F +#endif /** cast any pointer to @ref tmedia_consumer_t* object */ #define TMEDIA_CONSUMER(self) ((tmedia_consumer_t*)(self)) /** Default Video chroma */ -#define TMEDIA_CONSUMER_CHROMA_DEFAULT tmedia_yuv420p +#if !defined(TMEDIA_CONSUMER_CHROMA_DEFAULT) +# define TMEDIA_CONSUMER_CHROMA_DEFAULT tmedia_chroma_yuv420p +#endif /** Base object for all Consumers */ typedef struct tmedia_consumer_s diff --git a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_jitterbuffer.h b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_jitterbuffer.h index 1cda1361..f954f8a2 100644 --- a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_jitterbuffer.h +++ b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_jitterbuffer.h @@ -21,7 +21,7 @@ */ /**@file tmedia_jitterbuffer.h - * @brief JitterBuffer Plugin + * @brief Audio/Video JitterBuffer Plugin * * @author Mamadou Diop */ @@ -30,6 +30,10 @@ #include "tinymedia_config.h" +#include "tinymedia/tmedia_params.h" +#include "tmedia_common.h" + + #include "tsk_object.h" TMEDIA_BEGIN_DECLS @@ -37,6 +41,11 @@ TMEDIA_BEGIN_DECLS /** cast any pointer to @ref tmedia_jitterbuffer_t* object */ #define TMEDIA_JITTER_BUFFER(self) ((tmedia_jitterbuffer_t*)(self)) +/**Max number of plugins (jb types) we can create */ +#if !defined(TMED_JITTER_BUFFER_MAX_PLUGINS) +# define TMED_JITTER_BUFFER_MAX_PLUGINS 0x0F +#endif + /** Base object for all JitterBuffers */ typedef struct tmedia_jitterbuffer_s { @@ -56,9 +65,13 @@ typedef struct tmedia_jitterbuffer_plugin_def_s //! object definition used to create an instance of the jitterbufferr const tsk_object_def_t* objdef; + //! the type of the jitter buffer + tmedia_type_t type; + //! full description (usefull for debugging) const char* desc; + int (*set) (tmedia_jitterbuffer_t* , const tmedia_param_t*); int (* open) (tmedia_jitterbuffer_t*, uint32_t frame_duration, uint32_t rate); int (* tick) (tmedia_jitterbuffer_t*); int (* put) (tmedia_jitterbuffer_t*, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr); @@ -69,6 +82,7 @@ typedef struct tmedia_jitterbuffer_plugin_def_s tmedia_jitterbuffer_plugin_def_t; TINYMEDIA_API int tmedia_jitterbuffer_init(tmedia_jitterbuffer_t* self); +TINYMEDIA_API int tmedia_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param); TINYMEDIA_API int tmedia_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate); TINYMEDIA_API int tmedia_jitterbuffer_tick(tmedia_jitterbuffer_t* self); TINYMEDIA_API int tmedia_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr); @@ -79,7 +93,8 @@ TINYMEDIA_API int tmedia_jitterbuffer_deinit(tmedia_jitterbuffer_t* self); TINYMEDIA_API int tmedia_jitterbuffer_plugin_register(const tmedia_jitterbuffer_plugin_def_t* plugin); TINYMEDIA_API int tmedia_jitterbuffer_plugin_unregister(); -TINYMEDIA_API tmedia_jitterbuffer_t* tmedia_jitterbuffer_create(); +TINYMEDIA_API int tmedia_jitter_buffer_plugin_unregister_by_type(tmedia_type_t type); +TINYMEDIA_API tmedia_jitterbuffer_t* tmedia_jitterbuffer_create(tmedia_type_t type); TMEDIA_END_DECLS diff --git a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_producer.h b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_producer.h index d6f993b1..1d284987 100644 --- a/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_producer.h +++ b/branches/2.0/doubango/tinyMEDIA/include/tinymedia/tmedia_producer.h @@ -52,7 +52,7 @@ typedef int (*tmedia_producer_enc_cb_f)(const void* callback_data, const void* b typedef int (*tmedia_producer_raw_cb_f)(const void* callback_data, const void* buffer, tsk_size_t size, uint32_t duration, tsk_bool_t marker); /** Default Video chroma */ -#define TMEDIA_PRODUCER_CHROMA_DEFAULT tmedia_yuv420p +#define TMEDIA_PRODUCER_CHROMA_DEFAULT tmedia_chroma_yuv420p /** Base object for all Producers */ typedef struct tmedia_producer_s diff --git a/branches/2.0/doubango/tinyMEDIA/src/tmedia_codec.c b/branches/2.0/doubango/tinyMEDIA/src/tmedia_codec.c index f14764f1..782c8418 100644 --- a/branches/2.0/doubango/tinyMEDIA/src/tmedia_codec.c +++ b/branches/2.0/doubango/tinyMEDIA/src/tmedia_codec.c @@ -82,16 +82,14 @@ int tmedia_codec_init(tmedia_codec_t* self, tmedia_type_t type, const char* name // Video flipping: For backward compatibility we have to initialize the default values // according to the CFLAGS: 'FLIP_ENCODED_PICT' and 'FLIP_DECODED_PICT'. At any time you // can update thse values (e.g. when the device switch from landscape to portrait) using video_session->set(); + if(type & tmedia_video){ #if FLIP_ENCODED_PICT - self->video.flip.encoded = tsk_true; -#else - self->video.flip.encoded = tsk_false; + TMEDIA_CODEC_VIDEO(self)->out.flip = tsk_true; #endif #if FLIP_DECODED_PICT - self->video.flip.decoded = tsk_true; -#else - self->video.flip.decoded = tsk_false; + TMEDIA_CODEC_VIDEO(self)->in.flip = tsk_true; #endif + } return 0; } @@ -301,9 +299,11 @@ tmedia_codec_t* tmedia_codec_create(const char* format) { /* Video codec */ tmedia_codec_video_t* video = TMEDIA_CODEC_VIDEO(codec); tmedia_codec_video_init(TMEDIA_CODEC(video), plugin->name, plugin->desc, plugin->format); - video->in.width = video->out.width = plugin->video.width; - video->in.height = video->out.height = plugin->video.height; - video->in.fps = video->out.fps = plugin->video.fps; + if(!video->in.width)video->in.width = video->out.width = plugin->video.width; + if(!video->in.height)video->in.height = video->out.height = plugin->video.height; + if(!video->in.fps)video->in.fps = video->out.fps = plugin->video.fps; + if(video->in.chroma==tmedia_chroma_none)video->in.chroma = tmedia_chroma_yuv420p; + if(video->out.chroma==tmedia_chroma_none)video->out.chroma = tmedia_chroma_yuv420p; break; } case tmedia_msrp: diff --git a/branches/2.0/doubango/tinyMEDIA/src/tmedia_common.c b/branches/2.0/doubango/tinyMEDIA/src/tmedia_common.c index 0bd967d1..ccafc1c7 100644 --- a/branches/2.0/doubango/tinyMEDIA/src/tmedia_common.c +++ b/branches/2.0/doubango/tinyMEDIA/src/tmedia_common.c @@ -222,27 +222,27 @@ const tmedia_video_size_t* tmedia_get_video_size(tmedia_chroma_t chroma, tsk_siz tsk_size_t i; switch(chroma) { - case tmedia_rgb24: - case tmedia_bgr24: + case tmedia_chroma_rgb24: + case tmedia_chroma_bgr24: factor = 3.f; break; - case tmedia_rgb565le: - case tmedia_rgb565be: + case tmedia_chroma_rgb565le: + case tmedia_chroma_rgb565be: factor = 2.f; break; - case tmedia_rgb32: + case tmedia_chroma_rgb32: factor = 4.f; break; - case tmedia_nv21: - case tmedia_nv12: - case tmedia_yuv420p: + case tmedia_chroma_nv21: + case tmedia_chroma_nv12: + case tmedia_chroma_yuv420p: factor = 1.5f; break; - case tmedia_yuv422p: - case tmedia_uyvy422: + case tmedia_chroma_yuv422p: + case tmedia_chroma_uyvy422: factor = 2.f; break; } diff --git a/branches/2.0/doubango/tinyMEDIA/src/tmedia_consumer.c b/branches/2.0/doubango/tinyMEDIA/src/tmedia_consumer.c index d6007c8b..3af124a5 100644 --- a/branches/2.0/doubango/tinyMEDIA/src/tmedia_consumer.c +++ b/branches/2.0/doubango/tinyMEDIA/src/tmedia_consumer.c @@ -35,7 +35,7 @@ */ /* pointer to all registered consumers */ -const tmedia_consumer_plugin_def_t* __tmedia_consumer_plugins[TMED_CONSUMER_MAX_PLUGINS] = {0}; +static const tmedia_consumer_plugin_def_t* __tmedia_consumer_plugins[TMED_CONSUMER_MAX_PLUGINS] = {0}; /**@ingroup tmedia_consumer_group * Initialize the consumer. diff --git a/branches/2.0/doubango/tinyMEDIA/src/tmedia_jitterbuffer.c b/branches/2.0/doubango/tinyMEDIA/src/tmedia_jitterbuffer.c index 8d264bad..ceb7642d 100644 --- a/branches/2.0/doubango/tinyMEDIA/src/tmedia_jitterbuffer.c +++ b/branches/2.0/doubango/tinyMEDIA/src/tmedia_jitterbuffer.c @@ -21,7 +21,7 @@ */ /**@file tmedia_jitterbuffer.c - * @brief JitterBuffer plugin + * @brief Audio/Video JitterBuffer plugin * * @author Mamadou Diop */ @@ -29,7 +29,8 @@ #include "tsk_debug.h" -static const tmedia_jitterbuffer_plugin_def_t* __tmedia_jitterbuffer_plugin = tsk_null; +/* pointer to all registered jitter_buffers */ +static const tmedia_jitterbuffer_plugin_def_t* __tmedia_jitterbuffer_plugins[TMED_JITTER_BUFFER_MAX_PLUGINS] = {0}; int tmedia_jitterbuffer_init(tmedia_jitterbuffer_t* self) { @@ -40,6 +41,15 @@ int tmedia_jitterbuffer_init(tmedia_jitterbuffer_t* self) return 0; } +int tmedia_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param) +{ + if(!self || !self->plugin || !param){ + TSK_DEBUG_ERROR("Invalid parameter"); + return 0; + } + return self->plugin->set ? self->plugin->set(self, param) : 0; +} + int tmedia_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate) { int ret; @@ -164,30 +174,108 @@ int tmedia_jitterbuffer_deinit(tmedia_jitterbuffer_t* self) return 0; } +tmedia_jitterbuffer_t* tmedia_jitterbuffer_create(tmedia_type_t type) +{ + tmedia_jitterbuffer_t* jitter_buffer = tsk_null; + const tmedia_jitterbuffer_plugin_def_t* plugin; + tsk_size_t i = 0; + + while((i < TMED_JITTER_BUFFER_MAX_PLUGINS) && (plugin = __tmedia_jitterbuffer_plugins[i++])){ + if(plugin->objdef && plugin->type == type){ + if((jitter_buffer = tsk_object_new(plugin->objdef))){ + /* initialize the newly created jitter_buffer */ + jitter_buffer->plugin = plugin; + break; + } + } + } + + return jitter_buffer; +} + int tmedia_jitterbuffer_plugin_register(const tmedia_jitterbuffer_plugin_def_t* plugin) { + tsk_size_t i; if(!plugin){ TSK_DEBUG_ERROR("Invalid parameter"); return -1; } - __tmedia_jitterbuffer_plugin = plugin; - return 0; -} -int tmedia_jitterbuffer_plugin_unregister() -{ - __tmedia_jitterbuffer_plugin = tsk_null; - return 0; -} - -tmedia_jitterbuffer_t* tmedia_jitterbuffer_create() -{ - tmedia_jitterbuffer_t* jitterbuffer = tsk_null; - - if(__tmedia_jitterbuffer_plugin){ - if((jitterbuffer = tsk_object_new(__tmedia_jitterbuffer_plugin->objdef))){ - jitterbuffer->plugin = __tmedia_jitterbuffer_plugin; + /* add or replace the plugin */ + for(i = 0; itype & type) == __tmedia_jitterbuffer_plugins[i]->type){ + __tmedia_jitterbuffer_plugins[i] = tsk_null; + found = tsk_true; + break; + } + } + + /* compact */ + if(found){ + for(; i<(TMED_JITTER_BUFFER_MAX_PLUGINS - 1); i++){ + if(__tmedia_jitterbuffer_plugins[i+1]){ + __tmedia_jitterbuffer_plugins[i] = __tmedia_jitterbuffer_plugins[i+1]; + } + else{ + break; + } + } + __tmedia_jitterbuffer_plugins[i] = tsk_null; + } + return (found ? 0 : -2); } diff --git a/branches/2.0/doubango/tinyNET/src/tnet_socket.c b/branches/2.0/doubango/tinyNET/src/tnet_socket.c index df062b99..70158787 100644 --- a/branches/2.0/doubango/tinyNET/src/tnet_socket.c +++ b/branches/2.0/doubango/tinyNET/src/tnet_socket.c @@ -216,7 +216,7 @@ static tsk_object_t* tnet_socket_ctor(tsk_object_t * self, va_list * app) /* Find our address. */ for(ptr = result; ptr; ptr = ptr->ai_next){ - sock->fd = socket(ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol); + sock->fd = tnet_soccket(ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol); if(ptr->ai_family != AF_INET6 && ptr->ai_family != AF_INET){ continue; } diff --git a/branches/2.0/doubango/tinyNET/src/tnet_transport_win32.c b/branches/2.0/doubango/tinyNET/src/tnet_transport_win32.c index 5b6fb0e0..6703c30a 100644 --- a/branches/2.0/doubango/tinyNET/src/tnet_transport_win32.c +++ b/branches/2.0/doubango/tinyNET/src/tnet_transport_win32.c @@ -516,8 +516,6 @@ void *tnet_transport_mainthread(void *param) transport_socket_t* active_socket; int index; - SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS); - TSK_DEBUG_INFO("Starting [%s] server with IP {%s} on port {%d}...", transport->description, transport->master->ip, transport->master->port); while(TSK_RUNNABLE(transport)->running || TSK_RUNNABLE(transport)->started) diff --git a/branches/2.0/doubango/tinyNET/src/tnet_utils.h b/branches/2.0/doubango/tinyNET/src/tnet_utils.h index 776d6044..89a1d52c 100644 --- a/branches/2.0/doubango/tinyNET/src/tnet_utils.h +++ b/branches/2.0/doubango/tinyNET/src/tnet_utils.h @@ -114,6 +114,14 @@ TINYNET_API int tnet_get_peerip_n_port(tnet_fd_t localFD, tnet_ip_t *ip, tnet_po # define tnet_get_sockaddr_size(psockaddr) ((psockaddr)->sa_family == AF_INET6 ? sizeof(struct sockaddr_in6): ((psockaddr)->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(*(psockaddr)))) #endif +#if TNET_UNDER_WINDOWS +# define tnet_ioctlt ioctlsocket /* FIXME: use WSAIoctl */ +# define tnet_soccket(family, type, protocol) WSASocket((family), (type), (protocol), NULL, 0, WSA_FLAG_OVERLAPPED) +#else +# define tnet_ioctlt ioctl +# define tnet_soccket(family, type, protocol) socket((family), (type), (protocol)) +#endif + TINYNET_API int tnet_getnameinfo(const struct sockaddr *sa, socklen_t salen, char* node, socklen_t nodelen, char* service, socklen_t servicelen, int flags); TINYNET_API int tnet_gethostname(tnet_host_t* result); @@ -156,13 +164,6 @@ TINYNET_API int tnet_sockfd_close(tnet_fd_t *fd); } -#if TSK_UNDER_WINDOWS -# define tnet_ioctlt ioctlsocket /* FIXME: use WSAIoctl */ -#else -# define tnet_ioctlt ioctl -#endif - - tnet_interface_t* tnet_interface_create(const char* description, const void* mac_address, tsk_size_t mac_address_length); tnet_address_t* tnet_address_create(const char* ip); diff --git a/branches/2.0/doubango/tinyRTP/src/trtp_manager.c b/branches/2.0/doubango/tinyRTP/src/trtp_manager.c index 34cd1ad8..f0bf5689 100644 --- a/branches/2.0/doubango/tinyRTP/src/trtp_manager.c +++ b/branches/2.0/doubango/tinyRTP/src/trtp_manager.c @@ -35,8 +35,8 @@ #include "tsk_debug.h" #define TINY_RCVBUF (256/2/*Will be doubled and min on linux is 256*/) /* tiny buffer used to disable receiving */ -#define BIG_RCVBUF 64000 -#define BIG_SNDBUF 64000 +#define BIG_RCVBUF (64 * 1024) +#define BIG_SNDBUF (64 * 1024) // TODO: Add support for outbound DTMF (http://www.ietf.org/rfc/rfc2833.txt) @@ -317,7 +317,7 @@ int trtp_manager_start(trtp_manager_t* self) /* Flush buffers and re-enable sockets */ { - char buff[1024]; + char buff[2048]; // re-enable sockets _trtp_manager_enable_sockets(self); diff --git a/branches/2.0/doubango/tinySAK/src/tsk_common.h b/branches/2.0/doubango/tinySAK/src/tsk_common.h index e917967d..14a6c9aa 100644 --- a/branches/2.0/doubango/tinySAK/src/tsk_common.h +++ b/branches/2.0/doubango/tinySAK/src/tsk_common.h @@ -43,9 +43,10 @@ typedef int tsk_boolean_t; #define tsk_true 1 #define tsk_false 0 -#define TSK_MIN(a,b) (((a) < (b)) ? (a) : (b)) -#define TSK_MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define TSK_ABS(a) (((a)< 0) ? -(a) : (a)) +#define TSK_MIN(a,b) (((a) < (b)) ? (a) : (b)) +#define TSK_MAX(a,b) (((a) > (b)) ? (a) : (b)) +#define TSK_ABS(a) (((a)< 0) ? -(a) : (a)) +#define TSK_CLAMP(nMin, nVal, nMax) ((nVal) > (nMax)) ? (nMax) : (((nVal) < (nMin)) ? (nMin) : (nVal)) // used to avoid doing *((uint32_t*)ptr) which don't respect memory alignment on // some embedded (ARM,?...) platforms diff --git a/branches/2.0/doubango/tinySAK/src/tsk_semaphore.c b/branches/2.0/doubango/tinySAK/src/tsk_semaphore.c index 107c04bd..6cd90ff9 100644 --- a/branches/2.0/doubango/tinySAK/src/tsk_semaphore.c +++ b/branches/2.0/doubango/tinySAK/src/tsk_semaphore.c @@ -89,22 +89,27 @@ * @sa @ref tsk_semaphore_destroy */ tsk_semaphore_handle_t* tsk_semaphore_create() +{ + return tsk_semaphore_create_2(0); +} + +tsk_semaphore_handle_t* tsk_semaphore_create_2(int initial_val) { SEMAPHORE_T handle = 0; #if TSK_UNDER_WINDOWS - handle = CreateSemaphore(NULL, 0, 0x7FFFFFFF, NULL); + handle = CreateSemaphore(NULL, initial_val, 0x7FFFFFFF, NULL); #else handle = tsk_calloc(1, sizeof(SEMAPHORE_S)); #if TSK_USE_NAMED_SEM named_sem_t * nsem = (named_sem_t*)handle; tsk_sprintf(&(nsem->name), "/sem-%d", sem_count++); - if((nsem->sem = sem_open(nsem->name, O_CREAT /*| O_EXCL*/, S_IRUSR | S_IWUSR, 0)) == SEM_FAILED) + if((nsem->sem = sem_open(nsem->name, O_CREAT /*| O_EXCL*/, S_IRUSR | S_IWUSR, initial_val)) == SEM_FAILED) { TSK_FREE(nsem->name); #else - if(sem_init((SEMAPHORE_T)handle, 0, 0)) + if(sem_init((SEMAPHORE_T)handle, 0, initial_val)) { #endif TSK_FREE(handle); diff --git a/branches/2.0/doubango/tinySAK/src/tsk_semaphore.h b/branches/2.0/doubango/tinySAK/src/tsk_semaphore.h index 8d2ce5e8..f07247a6 100644 --- a/branches/2.0/doubango/tinySAK/src/tsk_semaphore.h +++ b/branches/2.0/doubango/tinySAK/src/tsk_semaphore.h @@ -37,6 +37,7 @@ TSK_BEGIN_DECLS typedef void tsk_semaphore_handle_t; TINYSAK_API tsk_semaphore_handle_t* tsk_semaphore_create(); +TINYSAK_API tsk_semaphore_handle_t* tsk_semaphore_create_2(int initial_val); TINYSAK_API int tsk_semaphore_increment(tsk_semaphore_handle_t* handle); TINYSAK_API int tsk_semaphore_decrement(tsk_semaphore_handle_t* handle); TINYSAK_API void tsk_semaphore_destroy(tsk_semaphore_handle_t** handle); diff --git a/branches/2.0/doubango/tinySIP/include/tinysip/dialogs/tsip_dialog.h b/branches/2.0/doubango/tinySIP/include/tinysip/dialogs/tsip_dialog.h index 7d526b47..b21abd93 100644 --- a/branches/2.0/doubango/tinySIP/include/tinysip/dialogs/tsip_dialog.h +++ b/branches/2.0/doubango/tinySIP/include/tinysip/dialogs/tsip_dialog.h @@ -138,7 +138,8 @@ typedef struct tsip_dialog_s tsip_uri_t* uri_remote; tsip_uri_t* uri_remote_target; - + struct sockaddr_storage remote_addr; // Only valid for Dgram + uint32_t cseq_value; char* cseq_method; @@ -162,7 +163,7 @@ typedef tsk_list_t tsip_dialogs_L_t; tsip_request_t *tsip_dialog_request_new(const tsip_dialog_t *self, const char* method); int tsip_dialog_request_send(const tsip_dialog_t *self, tsip_request_t* request); -tsip_response_t *tsip_dialog_response_new(const tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request); +tsip_response_t *tsip_dialog_response_new(tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request); int tsip_dialog_response_send(const tsip_dialog_t *self, tsip_response_t* response); int tsip_dialog_apply_action(tsip_message_t* message, const tsip_action_t* action); diff --git a/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog.c b/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog.c index 42c9de56..ffb8c9dd 100644 --- a/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog.c +++ b/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog.c @@ -391,6 +391,8 @@ tsip_request_t *tsip_dialog_request_new(const tsip_dialog_t *self, const char* m request->sigcomp_id = tsk_strdup(self->ss->sigcomp_id); } + /* Remote Address: Used if "Server mode" otherwise Proxy-CSCF will be used */ + request->remote_addr = self->remote_addr; TSK_OBJECT_SAFE_FREE(request_uri); TSK_OBJECT_SAFE_FREE(from_uri); @@ -441,7 +443,7 @@ int tsip_dialog_request_send(const tsip_dialog_t *self, tsip_request_t* request) return ret; } -tsip_response_t *tsip_dialog_response_new(const tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request) +tsip_response_t *tsip_dialog_response_new(tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request) { /* Reponse is created as per RFC 3261 subclause 8.2.6 and (headers+tags) are copied * as per subclause 8.2.6.2. @@ -480,6 +482,8 @@ tsip_response_t *tsip_dialog_response_new(const tsip_dialog_t *self, short statu * it's up to the transport layer to copy it to these headers */ response->sigcomp_id = tsk_strdup(self->ss->sigcomp_id); } + /* Remote Addr: used to send requests if "Server Mode" otherwise Proxy-CSCF address will be used */ + self->remote_addr = request->remote_addr; } return response; } diff --git a/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog_invite.c b/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog_invite.c index c3ddbd3d..00400408 100644 --- a/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog_invite.c +++ b/branches/2.0/doubango/tinySIP/src/dialogs/tsip_dialog_invite.c @@ -698,12 +698,19 @@ int x0000_Any_2_Any_X_i2xxINVITEorUPDATE(va_list *app) int x0000_Any_2_Trying_X_oBYE(va_list *app) { tsip_dialog_invite_t *self = va_arg(*app, tsip_dialog_invite_t *); + int ret; /* Alert the user */ TSIP_DIALOG_SIGNAL(self, tsip_event_code_dialog_terminating, "Terminating dialog"); /* send BYE */ - return send_BYE(self); + if((ret = send_BYE(self)) == 0){ + // stop session manager + if(self->msession_mgr && self->msession_mgr->started){ + tmedia_session_mgr_stop(self->msession_mgr); + } + } + return ret; } /* Any -> (iBYE) -> Terminated */ diff --git a/branches/2.0/doubango/tinySIP/src/transports/tsip_transport.c b/branches/2.0/doubango/tinySIP/src/transports/tsip_transport.c index 5ba78dac..054cd765 100644 --- a/branches/2.0/doubango/tinySIP/src/transports/tsip_transport.c +++ b/branches/2.0/doubango/tinySIP/src/transports/tsip_transport.c @@ -296,10 +296,11 @@ tsk_size_t tsip_transport_send(const tsip_transport_t* self, const char *branch, } } else{ - if(self->stack->network.mode_server && TSIP_MESSAGE_IS_RESPONSE(msg)){ // In server mode we will never send request. At least for now ;) + if(self->stack->network.mode_server){ ret = tsip_transport_send_raw(self, (const struct sockaddr*)&msg->remote_addr, buffer->data, buffer->size); } else{ + // always send to the Proxy-CSCF ret = tsip_transport_send_raw(self, tsk_null/* Use P-CSCF addr */, buffer->data, buffer->size); }