- Add support for H.264 Full HD (1080p) using NVIDIA Cuda

- Begin adding support for video jitter buffer (will be used to give feedbacks for packet loss-FEC-)
- Move video flipping code to the converter (refactoring)
- Fix issue 62, issue 41 and issue 66
- Fix issues (workaround) on VP8 (frame corruption)
- Update contribution list
This commit is contained in:
bossiel 2011-09-07 18:30:46 +00:00
parent 4ac9b38614
commit 6af08c309e
80 changed files with 3446 additions and 620 deletions

View File

@ -35,6 +35,8 @@
#include "tsk_debug.h"
#include "tinydav/audio/tdav_consumer_audio.h"
#include "tinydav/video/tdav_consumer_video.h"
/* ============ Audio Consumer Interface ================= */
@ -267,7 +269,7 @@ bool ProxyAudioConsumer::setPullBuffer(const void* pPullBufferPtr, unsigned nPul
unsigned ProxyAudioConsumer::pull(void* _pOutput/*=tsk_null*/, unsigned _nSize/*=0*/)
{
if(m_pWrappedPlugin){
if((m_pWrappedPlugin = (twrap_consumer_proxy_audio_t*)tsk_object_ref(m_pWrappedPlugin))){
void* pOutput;
unsigned nSize;
if(_pOutput && _nSize){
@ -290,6 +292,8 @@ unsigned ProxyAudioConsumer::pull(void* _pOutput/*=tsk_null*/, unsigned _nSize/*
}
tdav_consumer_audio_tick(TDAV_CONSUMER_AUDIO(m_pWrappedPlugin));
m_pWrappedPlugin = (twrap_consumer_proxy_audio_t*)tsk_object_unref(m_pWrappedPlugin);
return nRetSize;
}
return 0;
@ -364,7 +368,7 @@ bool ProxyAudioConsumer::registerPlugin()
typedef struct twrap_consumer_proxy_video_s
{
TMEDIA_DECLARE_CONSUMER;
TDAV_DECLARE_CONSUMER_VIDEO;
uint64_t id;
tsk_bool_t started;
@ -386,7 +390,7 @@ int twrap_consumer_proxy_video_prepare(tmedia_consumer_t* self, const tmedia_cod
if((videoConsumer = manager->findVideoConsumer(TWRAP_CONSUMER_PROXY_VIDEO(self)->id)) && videoConsumer->getCallback()){
self->video.fps = TMEDIA_CODEC_VIDEO(codec)->in.fps;
// in
self->video.in.chroma = tmedia_yuv420p;
self->video.in.chroma = tmedia_chroma_yuv420p;
self->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width;
self->video.in.height = TMEDIA_CODEC_VIDEO(codec)->in.height;
// display (out)
@ -433,14 +437,19 @@ int twrap_consumer_proxy_video_consume(tmedia_consumer_t* self, const void* buff
if((manager = ProxyPluginMgr::getInstance())){
const ProxyVideoConsumer* videoConsumer;
if((videoConsumer = manager->findVideoConsumer(TWRAP_CONSUMER_PROXY_VIDEO(self)->id)) && videoConsumer->getCallback()){
if(videoConsumer->hasConsumeBuffer()){
unsigned nCopiedSize = videoConsumer->copyBuffer(buffer, size);
ret = videoConsumer->getCallback()->bufferCopied(nCopiedSize, size);
if(tdav_consumer_video_has_jb(TDAV_CONSUMER_VIDEO(self))){
ret = tdav_consumer_video_put(TDAV_CONSUMER_VIDEO(self), buffer, size, proto_hdr);
}
else{
ProxyVideoFrame* frame = new ProxyVideoFrame(buffer, size);
ret = videoConsumer->getCallback()->consume(frame);
delete frame, frame = tsk_null;
if(videoConsumer->hasConsumeBuffer()){
unsigned nCopiedSize = videoConsumer->copyBuffer(buffer, size);
ret = videoConsumer->getCallback()->bufferCopied(nCopiedSize, size);
}
else{
ProxyVideoFrame* frame = new ProxyVideoFrame(buffer, size);
ret = videoConsumer->getCallback()->consume(frame);
delete frame, frame = tsk_null;
}
}
}
else{
@ -490,7 +499,7 @@ static tsk_object_t* twrap_consumer_proxy_video_ctor(tsk_object_t * self, va_lis
twrap_consumer_proxy_video_t *consumer = (twrap_consumer_proxy_video_t *)self;
if(consumer){
/* init base */
tmedia_consumer_init(TMEDIA_CONSUMER(consumer));
tdav_consumer_video_init(TDAV_CONSUMER_VIDEO(consumer));
/* init self */
/* Add the plugin to the manager */
@ -516,7 +525,7 @@ static tsk_object_t* twrap_consumer_proxy_video_dtor(tsk_object_t * self)
}
/* deinit base */
tmedia_consumer_deinit(TMEDIA_CONSUMER(consumer));
tdav_consumer_video_deinit(TDAV_CONSUMER_VIDEO(consumer));
/* deinit self */
/* Remove plugin from the manager */
@ -558,7 +567,7 @@ TINYWRAP_GEXTERN const tmedia_consumer_plugin_def_t *twrap_consumer_proxy_video_
/* ============ ProxyVideoConsumer Class ================= */
tmedia_chroma_t ProxyVideoConsumer::s_eDefaultChroma = tmedia_rgb565le;
tmedia_chroma_t ProxyVideoConsumer::s_eDefaultChroma = tmedia_chroma_rgb565le;
bool ProxyVideoConsumer::s_bAutoResizeDisplay = false;
ProxyVideoConsumer::ProxyVideoConsumer(tmedia_chroma_t eChroma, struct twrap_consumer_proxy_video_s* pConsumer)
@ -653,6 +662,44 @@ unsigned ProxyVideoConsumer::copyBuffer(const void* pBuffer, unsigned nSize)cons
return nRetsize;
}
unsigned ProxyVideoConsumer::pull(void* pOutput, unsigned nSize)
{
if(pOutput && nSize && (m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_ref(m_pWrappedPlugin))){
tsk_size_t nRetSize = 0;
if(!tdav_consumer_video_has_jb(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin))){
TSK_DEBUG_ERROR("This consumer doesn't hold any jitter buffer.\n\nTo pull a buffer you must register a callback ('class ProxyVideoConsumerCallback') and listen for either 'consume' or 'bufferCopied' functions");
goto done;
}
nRetSize = tdav_consumer_video_get(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin), pOutput, nSize);
tdav_consumer_video_tick(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin));
done:
m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_unref(m_pWrappedPlugin);
return nRetSize;
}
return 0;
}
bool ProxyVideoConsumer::reset()
{
bool ret = false;
if((m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_ref(m_pWrappedPlugin))){
if(tdav_consumer_video_has_jb(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin))){
ret = (tdav_consumer_video_reset(TDAV_CONSUMER_VIDEO(m_pWrappedPlugin)) == 0);
}
else{
TSK_DEBUG_ERROR("This consumer doesn't hold any jitter buffer");
}
m_pWrappedPlugin = (twrap_consumer_proxy_video_t*)tsk_object_unref(m_pWrappedPlugin);
}
TSK_DEBUG_ERROR("This consumer doesn't wrap any plugin");
return ret;
}
bool ProxyVideoConsumer::registerPlugin()
{
/* HACK: Unregister all other video plugins */

View File

@ -105,6 +105,7 @@ public:
virtual int prepare(int nWidth, int nHeight, int nFps) { return -1; }
virtual int consume(const ProxyVideoFrame* frame) { return -1; }
// only called if a buffer is registered using setPullBuffer(). Otherwise, consume() will be called
virtual int bufferCopied(unsigned nCopiedSize, unsigned nAvailableSize) { return -1; }
virtual int start() { return -1; }
virtual int pause() { return -1; }
@ -128,6 +129,8 @@ public:
bool setAutoResizeDisplay(bool bAutoResizeDisplay);
bool getAutoResizeDisplay()const;
bool setConsumeBuffer(const void* pConsumeBufferPtr, unsigned nConsumeBufferSize);
unsigned pull(void* pOutput, unsigned nSize);
bool reset();
#if !defined(SWIG)
bool hasConsumeBuffer()const { return m_ConsumeBuffer.pConsumeBufferPtr && m_ConsumeBuffer.nConsumeBufferSize; }

View File

@ -426,7 +426,7 @@ TINYWRAP_GEXTERN const tmedia_producer_plugin_def_t *twrap_producer_proxy_video_
/* ============ ProxyVideoProducer Class ================= */
tmedia_chroma_t ProxyVideoProducer::s_eDefaultChroma = tmedia_nv21;
tmedia_chroma_t ProxyVideoProducer::s_eDefaultChroma = tmedia_chroma_nv21;
ProxyVideoProducer::ProxyVideoProducer(tmedia_chroma_t eChroma, struct twrap_producer_proxy_video_s* pProducer)
:m_pCallback(tsk_null), m_eChroma(eChroma), m_nRotation(0), m_pWrappedPlugin(pProducer), ProxyPlugin(twrap_proxy_plugin_video_producer)

View File

@ -178,16 +178,17 @@ tsip_invite_event_type_t;
// used by tinyWRAP
typedef enum tmedia_chroma_e
{
tmedia_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7
tmedia_bgr24, // used by windows consumer (DirectShow) -
tmedia_rgb32, // used by iOS4 consumer (iPhone and iPod touch)
tmedia_rgb565le, // (used by both android and wince consumers)
tmedia_rgb565be,
tmedia_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4)
tmedia_nv21, // Yuv420 SP (used by android producer)
tmedia_yuv422p,
tmedia_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G)
tmedia_yuv420p, // Default
tmedia_chroma_none=0,
tmedia_chroma_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7
tmedia_chroma_bgr24, // used by windows consumer (DirectShow) -
tmedia_chroma_rgb32, // used by iOS4 consumer (iPhone and iPod touch)
tmedia_chroma_rgb565le, // (used by both android and wince consumers)
tmedia_chroma_rgb565be,
tmedia_chroma_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4)
tmedia_chroma_nv21, // Yuv420 SP (used by android producer)
tmedia_chroma_yuv422p,
tmedia_chroma_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G)
tmedia_chroma_yuv420p, // Default
}
tmedia_chroma_t;
@ -255,8 +256,9 @@ typedef enum tdav_codec_id_e
tdav_codec_id_h264_bp10 = 0x00010000<<4,
tdav_codec_id_h264_bp20 = 0x00010000<<5,
tdav_codec_id_h264_bp30 = 0x00010000<<6,
tdav_codec_id_theora = 0x00010000<<7,
tdav_codec_id_mp4ves_es = 0x00010000<<8,
tdav_codec_id_vp8 = 0x00010000<<9,
tdav_codec_id_h264_svc = 0x00010000<<7,
tdav_codec_id_theora = 0x00010000<<8,
tdav_codec_id_mp4ves_es = 0x00010000<<9,
tdav_codec_id_vp8 = 0x00010000<<10,
}
tdav_codec_id_t;

View File

@ -72,6 +72,16 @@ public class ProxyVideoConsumer : ProxyPlugin {
return ret;
}
public uint pull(byte[] pOutput, uint nSize) {
uint ret = tinyWRAPPINVOKE.ProxyVideoConsumer_pull(swigCPtr, pOutput, nSize);
return ret;
}
public bool reset() {
bool ret = tinyWRAPPINVOKE.ProxyVideoConsumer_reset(swigCPtr);
return ret;
}
public virtual ulong getMediaSessionId() {
ulong ret = tinyWRAPPINVOKE.ProxyVideoConsumer_getMediaSessionId(swigCPtr);
return ret;

View File

@ -33,9 +33,10 @@ public enum tdav_codec_id_t {
tdav_codec_id_h264_bp10 = 0x00010000 << 4,
tdav_codec_id_h264_bp20 = 0x00010000 << 5,
tdav_codec_id_h264_bp30 = 0x00010000 << 6,
tdav_codec_id_theora = 0x00010000 << 7,
tdav_codec_id_mp4ves_es = 0x00010000 << 8,
tdav_codec_id_vp8 = 0x00010000 << 9
tdav_codec_id_h264_svc = 0x00010000 << 7,
tdav_codec_id_theora = 0x00010000 << 8,
tdav_codec_id_mp4ves_es = 0x00010000 << 9,
tdav_codec_id_vp8 = 0x00010000 << 10
}
}

View File

@ -1028,6 +1028,12 @@ class tinyWRAPPINVOKE {
[DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_setConsumeBuffer")]
public static extern bool ProxyVideoConsumer_setConsumeBuffer(HandleRef jarg1, byte[] jarg2, uint jarg3);
[DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_pull")]
public static extern uint ProxyVideoConsumer_pull(HandleRef jarg1, byte[] jarg2, uint jarg3);
[DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_reset")]
public static extern bool ProxyVideoConsumer_reset(HandleRef jarg1);
[DllImport("tinyWRAP", EntryPoint="CSharp_ProxyVideoConsumer_getMediaSessionId")]
public static extern ulong ProxyVideoConsumer_getMediaSessionId(HandleRef jarg1);

View File

@ -4668,6 +4668,34 @@ SWIGEXPORT unsigned int SWIGSTDCALL CSharp_ProxyVideoConsumer_setConsumeBuffer(v
}
SWIGEXPORT unsigned int SWIGSTDCALL CSharp_ProxyVideoConsumer_pull(void * jarg1, void * jarg2, unsigned int jarg3) {
unsigned int jresult ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *arg2 = (void *) 0 ;
unsigned int arg3 ;
unsigned int result;
arg1 = (ProxyVideoConsumer *)jarg1;
arg2 = jarg2;
arg3 = (unsigned int)jarg3;
result = (unsigned int)(arg1)->pull(arg2,arg3);
jresult = result;
return jresult;
}
SWIGEXPORT unsigned int SWIGSTDCALL CSharp_ProxyVideoConsumer_reset(void * jarg1) {
unsigned int jresult ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
bool result;
arg1 = (ProxyVideoConsumer *)jarg1;
result = (bool)(arg1)->reset();
jresult = result;
return jresult;
}
SWIGEXPORT unsigned long long SWIGSTDCALL CSharp_ProxyVideoConsumer_getMediaSessionId(void * jarg1) {
unsigned long long jresult ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;

View File

@ -9,16 +9,17 @@
namespace org.doubango.tinyWRAP {
public enum tmedia_chroma_t {
tmedia_rgb24,
tmedia_bgr24,
tmedia_rgb32,
tmedia_rgb565le,
tmedia_rgb565be,
tmedia_nv12,
tmedia_nv21,
tmedia_yuv422p,
tmedia_uyvy422,
tmedia_yuv420p
tmedia_chroma_none = 0,
tmedia_chroma_rgb24,
tmedia_chroma_bgr24,
tmedia_chroma_rgb32,
tmedia_chroma_rgb565le,
tmedia_chroma_rgb565be,
tmedia_chroma_nv12,
tmedia_chroma_nv21,
tmedia_chroma_yuv422p,
tmedia_chroma_uyvy422,
tmedia_chroma_yuv420p
}
}

View File

@ -61,6 +61,14 @@ public class ProxyVideoConsumer extends ProxyPlugin {
return tinyWRAPJNI.ProxyVideoConsumer_setConsumeBuffer(swigCPtr, this, pConsumeBufferPtr, nConsumeBufferSize);
}
public long pull(java.nio.ByteBuffer pOutput, long nSize) {
return tinyWRAPJNI.ProxyVideoConsumer_pull(swigCPtr, this, pOutput, nSize);
}
public boolean reset() {
return tinyWRAPJNI.ProxyVideoConsumer_reset(swigCPtr, this);
}
public java.math.BigInteger getMediaSessionId() {
return tinyWRAPJNI.ProxyVideoConsumer_getMediaSessionId(swigCPtr, this);
}

View File

@ -61,6 +61,14 @@ public class ProxyVideoConsumer extends ProxyPlugin {
return tinyWRAPJNI.ProxyVideoConsumer_setConsumeBuffer(swigCPtr, this, pConsumeBufferPtr, nConsumeBufferSize);
}
public long pull(java.nio.ByteBuffer pOutput, long nSize) {
return tinyWRAPJNI.ProxyVideoConsumer_pull(swigCPtr, this, pOutput, nSize);
}
public boolean reset() {
return tinyWRAPJNI.ProxyVideoConsumer_reset(swigCPtr, this);
}
public java.math.BigInteger getMediaSessionId() {
return tinyWRAPJNI.ProxyVideoConsumer_getMediaSessionId(swigCPtr, this);
}

View File

@ -33,9 +33,10 @@ public enum tdav_codec_id_t {
tdav_codec_id_h264_bp10(0x00010000 << 4),
tdav_codec_id_h264_bp20(0x00010000 << 5),
tdav_codec_id_h264_bp30(0x00010000 << 6),
tdav_codec_id_theora(0x00010000 << 7),
tdav_codec_id_mp4ves_es(0x00010000 << 8),
tdav_codec_id_vp8(0x00010000 << 9);
tdav_codec_id_h264_svc(0x00010000 << 7),
tdav_codec_id_theora(0x00010000 << 8),
tdav_codec_id_mp4ves_es(0x00010000 << 9),
tdav_codec_id_vp8(0x00010000 << 10);
public final int swigValue() {
return swigValue;

View File

@ -294,6 +294,8 @@ class tinyWRAPJNI {
public final static native boolean ProxyVideoConsumer_setAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_, boolean jarg2);
public final static native boolean ProxyVideoConsumer_getAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_);
public final static native boolean ProxyVideoConsumer_setConsumeBuffer(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3);
public final static native long ProxyVideoConsumer_pull(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3);
public final static native boolean ProxyVideoConsumer_reset(long jarg1, ProxyVideoConsumer jarg1_);
public final static native java.math.BigInteger ProxyVideoConsumer_getMediaSessionId(long jarg1, ProxyVideoConsumer jarg1_);
public final static native boolean ProxyVideoConsumer_registerPlugin();
public final static native void ProxyVideoConsumer_setDefaultChroma(int jarg1);

View File

@ -6751,6 +6751,42 @@ SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoCon
}
SWIGEXPORT jlong JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1pull(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2, jlong jarg3) {
jlong jresult = 0 ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *arg2 = (void *) 0 ;
unsigned int arg3 ;
unsigned int result;
(void)jenv;
(void)jcls;
(void)jarg1_;
arg1 = *(ProxyVideoConsumer **)&jarg1;
arg2 = jenv->GetDirectBufferAddress(jarg2);
arg3 = (unsigned int)jarg3;
result = (unsigned int)(arg1)->pull(arg2,arg3);
jresult = (jlong)result;
return jresult;
}
SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1reset(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
jboolean jresult = 0 ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
bool result;
(void)jenv;
(void)jcls;
(void)jarg1_;
arg1 = *(ProxyVideoConsumer **)&jarg1;
result = (bool)(arg1)->reset();
jresult = (jboolean)result;
return jresult;
}
SWIGEXPORT jobject JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1getMediaSessionId(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
jobject jresult = 0 ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;

View File

@ -9,16 +9,17 @@
package org.doubango.tinyWRAP;
public enum tmedia_chroma_t {
tmedia_rgb24,
tmedia_bgr24,
tmedia_rgb32,
tmedia_rgb565le,
tmedia_rgb565be,
tmedia_nv12,
tmedia_nv21,
tmedia_yuv422p,
tmedia_uyvy422,
tmedia_yuv420p;
tmedia_chroma_none(0),
tmedia_chroma_rgb24,
tmedia_chroma_bgr24,
tmedia_chroma_rgb32,
tmedia_chroma_rgb565le,
tmedia_chroma_rgb565be,
tmedia_chroma_nv12,
tmedia_chroma_nv21,
tmedia_chroma_yuv422p,
tmedia_chroma_uyvy422,
tmedia_chroma_yuv420p;
public final int swigValue() {
return swigValue;

View File

@ -33,9 +33,10 @@ public enum tdav_codec_id_t {
tdav_codec_id_h264_bp10(0x00010000 << 4),
tdav_codec_id_h264_bp20(0x00010000 << 5),
tdav_codec_id_h264_bp30(0x00010000 << 6),
tdav_codec_id_theora(0x00010000 << 7),
tdav_codec_id_mp4ves_es(0x00010000 << 8),
tdav_codec_id_vp8(0x00010000 << 9);
tdav_codec_id_h264_svc(0x00010000 << 7),
tdav_codec_id_theora(0x00010000 << 8),
tdav_codec_id_mp4ves_es(0x00010000 << 9),
tdav_codec_id_vp8(0x00010000 << 10);
public final int swigValue() {
return swigValue;

View File

@ -294,6 +294,8 @@ class tinyWRAPJNI {
public final static native boolean ProxyVideoConsumer_setAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_, boolean jarg2);
public final static native boolean ProxyVideoConsumer_getAutoResizeDisplay(long jarg1, ProxyVideoConsumer jarg1_);
public final static native boolean ProxyVideoConsumer_setConsumeBuffer(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3);
public final static native long ProxyVideoConsumer_pull(long jarg1, ProxyVideoConsumer jarg1_, java.nio.ByteBuffer jarg2, long jarg3);
public final static native boolean ProxyVideoConsumer_reset(long jarg1, ProxyVideoConsumer jarg1_);
public final static native java.math.BigInteger ProxyVideoConsumer_getMediaSessionId(long jarg1, ProxyVideoConsumer jarg1_);
public final static native boolean ProxyVideoConsumer_registerPlugin();
public final static native void ProxyVideoConsumer_setDefaultChroma(int jarg1);

View File

@ -6751,6 +6751,42 @@ SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoCon
}
SWIGEXPORT jlong JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1pull(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2, jlong jarg3) {
jlong jresult = 0 ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *arg2 = (void *) 0 ;
unsigned int arg3 ;
unsigned int result;
(void)jenv;
(void)jcls;
(void)jarg1_;
arg1 = *(ProxyVideoConsumer **)&jarg1;
arg2 = jenv->GetDirectBufferAddress(jarg2);
arg3 = (unsigned int)jarg3;
result = (unsigned int)(arg1)->pull(arg2,arg3);
jresult = (jlong)result;
return jresult;
}
SWIGEXPORT jboolean JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1reset(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
jboolean jresult = 0 ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
bool result;
(void)jenv;
(void)jcls;
(void)jarg1_;
arg1 = *(ProxyVideoConsumer **)&jarg1;
result = (bool)(arg1)->reset();
jresult = (jboolean)result;
return jresult;
}
SWIGEXPORT jobject JNICALL Java_org_doubango_tinyWRAP_tinyWRAPJNI_ProxyVideoConsumer_1getMediaSessionId(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) {
jobject jresult = 0 ;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;

View File

@ -9,16 +9,17 @@
package org.doubango.tinyWRAP;
public enum tmedia_chroma_t {
tmedia_rgb24,
tmedia_bgr24,
tmedia_rgb32,
tmedia_rgb565le,
tmedia_rgb565be,
tmedia_nv12,
tmedia_nv21,
tmedia_yuv422p,
tmedia_uyvy422,
tmedia_yuv420p;
tmedia_chroma_none(0),
tmedia_chroma_rgb24,
tmedia_chroma_bgr24,
tmedia_chroma_rgb32,
tmedia_chroma_rgb565le,
tmedia_chroma_rgb565be,
tmedia_chroma_nv12,
tmedia_chroma_nv21,
tmedia_chroma_yuv422p,
tmedia_chroma_uyvy422,
tmedia_chroma_yuv420p;
public final int swigValue() {
return swigValue;

View File

@ -1368,6 +1368,8 @@ sub DESTROY {
*setAutoResizeDisplay = *tinyWRAPc::ProxyVideoConsumer_setAutoResizeDisplay;
*getAutoResizeDisplay = *tinyWRAPc::ProxyVideoConsumer_getAutoResizeDisplay;
*setConsumeBuffer = *tinyWRAPc::ProxyVideoConsumer_setConsumeBuffer;
*pull = *tinyWRAPc::ProxyVideoConsumer_pull;
*reset = *tinyWRAPc::ProxyVideoConsumer_reset;
*getMediaSessionId = *tinyWRAPc::ProxyVideoConsumer_getMediaSessionId;
*registerPlugin = *tinyWRAPc::ProxyVideoConsumer_registerPlugin;
*setDefaultChroma = *tinyWRAPc::ProxyVideoConsumer_setDefaultChroma;
@ -2262,16 +2264,17 @@ package tinyWRAP;
*tsip_m_local_resume_nok = *tinyWRAPc::tsip_m_local_resume_nok;
*tsip_m_remote_hold = *tinyWRAPc::tsip_m_remote_hold;
*tsip_m_remote_resume = *tinyWRAPc::tsip_m_remote_resume;
*tmedia_rgb24 = *tinyWRAPc::tmedia_rgb24;
*tmedia_bgr24 = *tinyWRAPc::tmedia_bgr24;
*tmedia_rgb32 = *tinyWRAPc::tmedia_rgb32;
*tmedia_rgb565le = *tinyWRAPc::tmedia_rgb565le;
*tmedia_rgb565be = *tinyWRAPc::tmedia_rgb565be;
*tmedia_nv12 = *tinyWRAPc::tmedia_nv12;
*tmedia_nv21 = *tinyWRAPc::tmedia_nv21;
*tmedia_yuv422p = *tinyWRAPc::tmedia_yuv422p;
*tmedia_uyvy422 = *tinyWRAPc::tmedia_uyvy422;
*tmedia_yuv420p = *tinyWRAPc::tmedia_yuv420p;
*tmedia_chroma_none = *tinyWRAPc::tmedia_chroma_none;
*tmedia_chroma_rgb24 = *tinyWRAPc::tmedia_chroma_rgb24;
*tmedia_chroma_bgr24 = *tinyWRAPc::tmedia_chroma_bgr24;
*tmedia_chroma_rgb32 = *tinyWRAPc::tmedia_chroma_rgb32;
*tmedia_chroma_rgb565le = *tinyWRAPc::tmedia_chroma_rgb565le;
*tmedia_chroma_rgb565be = *tinyWRAPc::tmedia_chroma_rgb565be;
*tmedia_chroma_nv12 = *tinyWRAPc::tmedia_chroma_nv12;
*tmedia_chroma_nv21 = *tinyWRAPc::tmedia_chroma_nv21;
*tmedia_chroma_yuv422p = *tinyWRAPc::tmedia_chroma_yuv422p;
*tmedia_chroma_uyvy422 = *tinyWRAPc::tmedia_chroma_uyvy422;
*tmedia_chroma_yuv420p = *tinyWRAPc::tmedia_chroma_yuv420p;
*tmedia_qos_stype_none = *tinyWRAPc::tmedia_qos_stype_none;
*tmedia_qos_stype_segmented = *tinyWRAPc::tmedia_qos_stype_segmented;
*tmedia_qos_stype_e2e = *tinyWRAPc::tmedia_qos_stype_e2e;
@ -2308,6 +2311,7 @@ package tinyWRAP;
*tdav_codec_id_h264_bp10 = *tinyWRAPc::tdav_codec_id_h264_bp10;
*tdav_codec_id_h264_bp20 = *tinyWRAPc::tdav_codec_id_h264_bp20;
*tdav_codec_id_h264_bp30 = *tinyWRAPc::tdav_codec_id_h264_bp30;
*tdav_codec_id_h264_svc = *tinyWRAPc::tdav_codec_id_h264_svc;
*tdav_codec_id_theora = *tinyWRAPc::tdav_codec_id_theora;
*tdav_codec_id_mp4ves_es = *tinyWRAPc::tdav_codec_id_mp4ves_es;
*tdav_codec_id_vp8 = *tinyWRAPc::tdav_codec_id_vp8;

View File

@ -14029,6 +14029,80 @@ XS(_wrap_ProxyVideoConsumer_setConsumeBuffer) {
}
XS(_wrap_ProxyVideoConsumer_pull) {
{
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *arg2 = (void *) 0 ;
unsigned int arg3 ;
void *argp1 = 0 ;
int res1 = 0 ;
int res2 ;
unsigned int val3 ;
int ecode3 = 0 ;
int argvi = 0;
unsigned int result;
dXSARGS;
if ((items < 3) || (items > 3)) {
SWIG_croak("Usage: ProxyVideoConsumer_pull(self,pOutput,nSize);");
}
res1 = SWIG_ConvertPtr(ST(0), &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 );
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_pull" "', argument " "1"" of type '" "ProxyVideoConsumer *""'");
}
arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1);
res2 = SWIG_ConvertPtr(ST(1),SWIG_as_voidptrptr(&arg2), 0, 0);
if (!SWIG_IsOK(res2)) {
SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "ProxyVideoConsumer_pull" "', argument " "2"" of type '" "void *""'");
}
ecode3 = SWIG_AsVal_unsigned_SS_int SWIG_PERL_CALL_ARGS_2(ST(2), &val3);
if (!SWIG_IsOK(ecode3)) {
SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "ProxyVideoConsumer_pull" "', argument " "3"" of type '" "unsigned int""'");
}
arg3 = static_cast< unsigned int >(val3);
result = (unsigned int)(arg1)->pull(arg2,arg3);
ST(argvi) = SWIG_From_unsigned_SS_int SWIG_PERL_CALL_ARGS_1(static_cast< unsigned int >(result)); argvi++ ;
XSRETURN(argvi);
fail:
SWIG_croak_null();
}
}
XS(_wrap_ProxyVideoConsumer_reset) {
{
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
int argvi = 0;
bool result;
dXSARGS;
if ((items < 1) || (items > 1)) {
SWIG_croak("Usage: ProxyVideoConsumer_reset(self);");
}
res1 = SWIG_ConvertPtr(ST(0), &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 );
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_reset" "', argument " "1"" of type '" "ProxyVideoConsumer *""'");
}
arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1);
result = (bool)(arg1)->reset();
ST(argvi) = SWIG_From_bool SWIG_PERL_CALL_ARGS_1(static_cast< bool >(result)); argvi++ ;
XSRETURN(argvi);
fail:
SWIG_croak_null();
}
}
XS(_wrap_ProxyVideoConsumer_getMediaSessionId) {
{
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
@ -21676,6 +21750,8 @@ static swig_command_info swig_commands[] = {
{"tinyWRAPc::ProxyVideoConsumer_setAutoResizeDisplay", _wrap_ProxyVideoConsumer_setAutoResizeDisplay},
{"tinyWRAPc::ProxyVideoConsumer_getAutoResizeDisplay", _wrap_ProxyVideoConsumer_getAutoResizeDisplay},
{"tinyWRAPc::ProxyVideoConsumer_setConsumeBuffer", _wrap_ProxyVideoConsumer_setConsumeBuffer},
{"tinyWRAPc::ProxyVideoConsumer_pull", _wrap_ProxyVideoConsumer_pull},
{"tinyWRAPc::ProxyVideoConsumer_reset", _wrap_ProxyVideoConsumer_reset},
{"tinyWRAPc::ProxyVideoConsumer_getMediaSessionId", _wrap_ProxyVideoConsumer_getMediaSessionId},
{"tinyWRAPc::ProxyVideoConsumer_registerPlugin", _wrap_ProxyVideoConsumer_registerPlugin},
{"tinyWRAPc::ProxyVideoConsumer_setDefaultChroma", _wrap_ProxyVideoConsumer_setDefaultChroma},
@ -22515,53 +22591,58 @@ XS(SWIG_init) {
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb24", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb24)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_none", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_none)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_bgr24", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_bgr24)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb24", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb24)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb32", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb32)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_bgr24", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_bgr24)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb565le", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb565le)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb32", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb32)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_rgb565be", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_rgb565be)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb565le", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb565le)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_nv12", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_nv12)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_rgb565be", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_rgb565be)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_nv21", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_nv21)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_nv12", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_nv12)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_yuv422p", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_yuv422p)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_nv21", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_nv21)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_uyvy422", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_uyvy422)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_yuv422p", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_yuv422p)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_yuv420p", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_yuv420p)));
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_uyvy422", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_uyvy422)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tmedia_chroma_yuv420p", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tmedia_chroma_yuv420p)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
@ -22744,6 +22825,11 @@ XS(SWIG_init) {
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tdav_codec_id_h264_bp30)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tdav_codec_id_h264_svc", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tdav_codec_id_h264_svc)));
SvREADONLY_on(sv);
} while(0) /*@SWIG@*/;
/*@SWIG:/usr/local/share/swig/1.3.39/perl5/perltypemaps.swg,65,%set_constant@*/ do {
SV *sv = get_sv((char*) SWIG_prefix "tdav_codec_id_theora", TRUE | 0x2 | GV_ADDMULTI);
sv_setsv(sv, SWIG_From_int SWIG_PERL_CALL_ARGS_1(static_cast< int >(tdav_codec_id_theora)));

View File

@ -899,6 +899,8 @@ class ProxyVideoConsumer(ProxyPlugin):
def setAutoResizeDisplay(self, *args): return _tinyWRAP.ProxyVideoConsumer_setAutoResizeDisplay(self, *args)
def getAutoResizeDisplay(self): return _tinyWRAP.ProxyVideoConsumer_getAutoResizeDisplay(self)
def setConsumeBuffer(self, *args): return _tinyWRAP.ProxyVideoConsumer_setConsumeBuffer(self, *args)
def pull(self, *args): return _tinyWRAP.ProxyVideoConsumer_pull(self, *args)
def reset(self): return _tinyWRAP.ProxyVideoConsumer_reset(self)
def getMediaSessionId(self): return _tinyWRAP.ProxyVideoConsumer_getMediaSessionId(self)
__swig_getmethods__["registerPlugin"] = lambda x: _tinyWRAP.ProxyVideoConsumer_registerPlugin
if _newclass:registerPlugin = staticmethod(_tinyWRAP.ProxyVideoConsumer_registerPlugin)
@ -1244,16 +1246,17 @@ tsip_m_local_resume_ok = _tinyWRAP.tsip_m_local_resume_ok
tsip_m_local_resume_nok = _tinyWRAP.tsip_m_local_resume_nok
tsip_m_remote_hold = _tinyWRAP.tsip_m_remote_hold
tsip_m_remote_resume = _tinyWRAP.tsip_m_remote_resume
tmedia_rgb24 = _tinyWRAP.tmedia_rgb24
tmedia_bgr24 = _tinyWRAP.tmedia_bgr24
tmedia_rgb32 = _tinyWRAP.tmedia_rgb32
tmedia_rgb565le = _tinyWRAP.tmedia_rgb565le
tmedia_rgb565be = _tinyWRAP.tmedia_rgb565be
tmedia_nv12 = _tinyWRAP.tmedia_nv12
tmedia_nv21 = _tinyWRAP.tmedia_nv21
tmedia_yuv422p = _tinyWRAP.tmedia_yuv422p
tmedia_uyvy422 = _tinyWRAP.tmedia_uyvy422
tmedia_yuv420p = _tinyWRAP.tmedia_yuv420p
tmedia_chroma_none = _tinyWRAP.tmedia_chroma_none
tmedia_chroma_rgb24 = _tinyWRAP.tmedia_chroma_rgb24
tmedia_chroma_bgr24 = _tinyWRAP.tmedia_chroma_bgr24
tmedia_chroma_rgb32 = _tinyWRAP.tmedia_chroma_rgb32
tmedia_chroma_rgb565le = _tinyWRAP.tmedia_chroma_rgb565le
tmedia_chroma_rgb565be = _tinyWRAP.tmedia_chroma_rgb565be
tmedia_chroma_nv12 = _tinyWRAP.tmedia_chroma_nv12
tmedia_chroma_nv21 = _tinyWRAP.tmedia_chroma_nv21
tmedia_chroma_yuv422p = _tinyWRAP.tmedia_chroma_yuv422p
tmedia_chroma_uyvy422 = _tinyWRAP.tmedia_chroma_uyvy422
tmedia_chroma_yuv420p = _tinyWRAP.tmedia_chroma_yuv420p
tmedia_qos_stype_none = _tinyWRAP.tmedia_qos_stype_none
tmedia_qos_stype_segmented = _tinyWRAP.tmedia_qos_stype_segmented
tmedia_qos_stype_e2e = _tinyWRAP.tmedia_qos_stype_e2e
@ -1290,6 +1293,7 @@ tdav_codec_id_h263pp = _tinyWRAP.tdav_codec_id_h263pp
tdav_codec_id_h264_bp10 = _tinyWRAP.tdav_codec_id_h264_bp10
tdav_codec_id_h264_bp20 = _tinyWRAP.tdav_codec_id_h264_bp20
tdav_codec_id_h264_bp30 = _tinyWRAP.tdav_codec_id_h264_bp30
tdav_codec_id_h264_svc = _tinyWRAP.tdav_codec_id_h264_svc
tdav_codec_id_theora = _tinyWRAP.tdav_codec_id_theora
tdav_codec_id_mp4ves_es = _tinyWRAP.tdav_codec_id_mp4ves_es
tdav_codec_id_vp8 = _tinyWRAP.tdav_codec_id_vp8

View File

@ -14567,6 +14567,66 @@ fail:
}
SWIGINTERN PyObject *_wrap_ProxyVideoConsumer_pull(PyObject *SWIGUNUSEDPARM(self), PyObject *args) {
PyObject *resultobj = 0;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *arg2 = (void *) 0 ;
unsigned int arg3 ;
void *argp1 = 0 ;
int res1 = 0 ;
int res2 ;
unsigned int val3 ;
int ecode3 = 0 ;
PyObject * obj0 = 0 ;
PyObject * obj1 = 0 ;
PyObject * obj2 = 0 ;
unsigned int result;
if (!PyArg_ParseTuple(args,(char *)"OOO:ProxyVideoConsumer_pull",&obj0,&obj1,&obj2)) SWIG_fail;
res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 );
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_pull" "', argument " "1"" of type '" "ProxyVideoConsumer *""'");
}
arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1);
res2 = SWIG_ConvertPtr(obj1,SWIG_as_voidptrptr(&arg2), 0, 0);
if (!SWIG_IsOK(res2)) {
SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "ProxyVideoConsumer_pull" "', argument " "2"" of type '" "void *""'");
}
ecode3 = SWIG_AsVal_unsigned_SS_int(obj2, &val3);
if (!SWIG_IsOK(ecode3)) {
SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "ProxyVideoConsumer_pull" "', argument " "3"" of type '" "unsigned int""'");
}
arg3 = static_cast< unsigned int >(val3);
result = (unsigned int)(arg1)->pull(arg2,arg3);
resultobj = SWIG_From_unsigned_SS_int(static_cast< unsigned int >(result));
return resultobj;
fail:
return NULL;
}
SWIGINTERN PyObject *_wrap_ProxyVideoConsumer_reset(PyObject *SWIGUNUSEDPARM(self), PyObject *args) {
PyObject *resultobj = 0;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
PyObject * obj0 = 0 ;
bool result;
if (!PyArg_ParseTuple(args,(char *)"O:ProxyVideoConsumer_reset",&obj0)) SWIG_fail;
res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_ProxyVideoConsumer, 0 | 0 );
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "ProxyVideoConsumer_reset" "', argument " "1"" of type '" "ProxyVideoConsumer *""'");
}
arg1 = reinterpret_cast< ProxyVideoConsumer * >(argp1);
result = (bool)(arg1)->reset();
resultobj = SWIG_From_bool(static_cast< bool >(result));
return resultobj;
fail:
return NULL;
}
SWIGINTERN PyObject *_wrap_ProxyVideoConsumer_getMediaSessionId(PyObject *SWIGUNUSEDPARM(self), PyObject *args) {
PyObject *resultobj = 0;
ProxyVideoConsumer *arg1 = (ProxyVideoConsumer *) 0 ;
@ -21159,6 +21219,8 @@ static PyMethodDef SwigMethods[] = {
{ (char *)"ProxyVideoConsumer_setAutoResizeDisplay", _wrap_ProxyVideoConsumer_setAutoResizeDisplay, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_getAutoResizeDisplay", _wrap_ProxyVideoConsumer_getAutoResizeDisplay, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_setConsumeBuffer", _wrap_ProxyVideoConsumer_setConsumeBuffer, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_pull", _wrap_ProxyVideoConsumer_pull, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_reset", _wrap_ProxyVideoConsumer_reset, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_getMediaSessionId", _wrap_ProxyVideoConsumer_getMediaSessionId, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_registerPlugin", _wrap_ProxyVideoConsumer_registerPlugin, METH_VARARGS, NULL},
{ (char *)"ProxyVideoConsumer_setDefaultChroma", _wrap_ProxyVideoConsumer_setDefaultChroma, METH_VARARGS, NULL},
@ -22427,16 +22489,17 @@ SWIG_init(void) {
SWIG_Python_SetConstant(d, "tsip_m_local_resume_nok",SWIG_From_int(static_cast< int >(tsip_m_local_resume_nok)));
SWIG_Python_SetConstant(d, "tsip_m_remote_hold",SWIG_From_int(static_cast< int >(tsip_m_remote_hold)));
SWIG_Python_SetConstant(d, "tsip_m_remote_resume",SWIG_From_int(static_cast< int >(tsip_m_remote_resume)));
SWIG_Python_SetConstant(d, "tmedia_rgb24",SWIG_From_int(static_cast< int >(tmedia_rgb24)));
SWIG_Python_SetConstant(d, "tmedia_bgr24",SWIG_From_int(static_cast< int >(tmedia_bgr24)));
SWIG_Python_SetConstant(d, "tmedia_rgb32",SWIG_From_int(static_cast< int >(tmedia_rgb32)));
SWIG_Python_SetConstant(d, "tmedia_rgb565le",SWIG_From_int(static_cast< int >(tmedia_rgb565le)));
SWIG_Python_SetConstant(d, "tmedia_rgb565be",SWIG_From_int(static_cast< int >(tmedia_rgb565be)));
SWIG_Python_SetConstant(d, "tmedia_nv12",SWIG_From_int(static_cast< int >(tmedia_nv12)));
SWIG_Python_SetConstant(d, "tmedia_nv21",SWIG_From_int(static_cast< int >(tmedia_nv21)));
SWIG_Python_SetConstant(d, "tmedia_yuv422p",SWIG_From_int(static_cast< int >(tmedia_yuv422p)));
SWIG_Python_SetConstant(d, "tmedia_uyvy422",SWIG_From_int(static_cast< int >(tmedia_uyvy422)));
SWIG_Python_SetConstant(d, "tmedia_yuv420p",SWIG_From_int(static_cast< int >(tmedia_yuv420p)));
SWIG_Python_SetConstant(d, "tmedia_chroma_none",SWIG_From_int(static_cast< int >(tmedia_chroma_none)));
SWIG_Python_SetConstant(d, "tmedia_chroma_rgb24",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb24)));
SWIG_Python_SetConstant(d, "tmedia_chroma_bgr24",SWIG_From_int(static_cast< int >(tmedia_chroma_bgr24)));
SWIG_Python_SetConstant(d, "tmedia_chroma_rgb32",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb32)));
SWIG_Python_SetConstant(d, "tmedia_chroma_rgb565le",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb565le)));
SWIG_Python_SetConstant(d, "tmedia_chroma_rgb565be",SWIG_From_int(static_cast< int >(tmedia_chroma_rgb565be)));
SWIG_Python_SetConstant(d, "tmedia_chroma_nv12",SWIG_From_int(static_cast< int >(tmedia_chroma_nv12)));
SWIG_Python_SetConstant(d, "tmedia_chroma_nv21",SWIG_From_int(static_cast< int >(tmedia_chroma_nv21)));
SWIG_Python_SetConstant(d, "tmedia_chroma_yuv422p",SWIG_From_int(static_cast< int >(tmedia_chroma_yuv422p)));
SWIG_Python_SetConstant(d, "tmedia_chroma_uyvy422",SWIG_From_int(static_cast< int >(tmedia_chroma_uyvy422)));
SWIG_Python_SetConstant(d, "tmedia_chroma_yuv420p",SWIG_From_int(static_cast< int >(tmedia_chroma_yuv420p)));
SWIG_Python_SetConstant(d, "tmedia_qos_stype_none",SWIG_From_int(static_cast< int >(tmedia_qos_stype_none)));
SWIG_Python_SetConstant(d, "tmedia_qos_stype_segmented",SWIG_From_int(static_cast< int >(tmedia_qos_stype_segmented)));
SWIG_Python_SetConstant(d, "tmedia_qos_stype_e2e",SWIG_From_int(static_cast< int >(tmedia_qos_stype_e2e)));
@ -22473,6 +22536,7 @@ SWIG_init(void) {
SWIG_Python_SetConstant(d, "tdav_codec_id_h264_bp10",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_bp10)));
SWIG_Python_SetConstant(d, "tdav_codec_id_h264_bp20",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_bp20)));
SWIG_Python_SetConstant(d, "tdav_codec_id_h264_bp30",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_bp30)));
SWIG_Python_SetConstant(d, "tdav_codec_id_h264_svc",SWIG_From_int(static_cast< int >(tdav_codec_id_h264_svc)));
SWIG_Python_SetConstant(d, "tdav_codec_id_theora",SWIG_From_int(static_cast< int >(tdav_codec_id_theora)));
SWIG_Python_SetConstant(d, "tdav_codec_id_mp4ves_es",SWIG_From_int(static_cast< int >(tdav_codec_id_mp4ves_es)));
SWIG_Python_SetConstant(d, "tdav_codec_id_vp8",SWIG_From_int(static_cast< int >(tdav_codec_id_vp8)));

View File

@ -1,6 +1,10 @@
- "Alberto Panizzo" <alberto(AT)amarulasolutions(dot)com>
- "Alex Vishnev" <alex9134(AT)gmai(dot)com>
- "Giacomo Vacca" <Tiscali>
- "Laurent Etiemble" <laurent(dot)etiemble(AT)gmail(dot)com>
- "Mamadou Diop" <diopmamadou(AT)doubango(dot)org>
- "Marco Zoncu" <Tiscali>
- "Michael Siddi" <Tiscali>
- "Paolo Baire" <Tiscali>
- "Philippe Verney" <verney(dot)philippe(AT)gmail(dot)com>
- "Rich Hovey" <rich(AT)hovey(dot)org>

View File

@ -0,0 +1,241 @@
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef TYPES_H
#define TYPES_H
#include "NVEncodeDataTypes.h"
#ifdef __cplusplus
extern "C" {
#endif
struct NVEncoderParams
{
char configFile[256];
char inputFile[256];
char outputFile[256];
int measure_psnr;
int measure_fps;
int force_device;
int iSurfaceFormat;
int iPictureType;
int nDeviceMemPitch;
int iCodecType; // NVVE_CODEC_TYPE,
int GPU_count; // Choose the specific GPU count
int GPU_devID; // Choose the specific GPU device ID
int iUseDeviceMem; // CUDA with DEVICE_MEMORY_INPUT (for encoding)
int iForcedGPU; // NVVE_FORCE_GPU_SELECTION //F22
int iOutputSize[2]; // NVVE_OUT_SIZE,
int iInputSize[2]; // NVVE_IN_SIZE,
float fAspectRatio; //
int iAspectRatio[3]; // NVVE_ASPECT_RATIO,
NVVE_FIELD_MODE Fieldmode; // NVVE_FIELD_ENC_MODE,
int iP_Interval; // NVVE_P_INTERVAL,
int iIDR_Period; // NVVE_IDR_PERIOD,
int iDynamicGOP; // NVVE_DYNAMIC_GOP,
NVVE_RateCtrlType RCType; // NVVE_RC_TYPE,
int iAvgBitrate; // NVVE_AVG_BITRATE,
int iPeakBitrate; // NVVE_PEAK_BITRATE,
int iQP_Level_Intra; // NVVE_QP_LEVEL_INTRA,
int iQP_Level_InterP; // NVVE_QP_LEVEL_INTER_P,
int iQP_Level_InterB; // NVVE_QP_LEVEL_INTER_B,
int iFrameRate[2]; // NVVE_FRAME_RATE,
int iDeblockMode; // NVVE_DEBLOCK_MODE,
int iProfileLevel; // NVVE_PROFILE_LEVEL,
int iForceIntra; // NVVE_FORCE_INTRA,
int iForceIDR; // NVVE_FORCE_IDR,
int iClearStat; // NVVE_CLEAR_STAT,
NVVE_DI_MODE DIMode; // NVVE_SET_DEINTERLACE,
NVVE_PRESETS_TARGET Presets; // NVVE_PRESETS,
int iDisableCabac; // NVVE_DISABLE_CABAC,
int iNaluFramingType; // NVVE_CONFIGURE_NALU_FRAMING_TYPE
int iDisableSPSPPS; // NVVE_DISABLE_SPS_PPS
NVVE_GPUOffloadLevel GPUOffloadLevel; // NVVE_GPU_OFFLOAD_LEVEL
NVVE_GPUOffloadLevel MaxOffloadLevel; // NVVE_GPU_OFFLOAD_LEVEL_MAX
int iSliceCnt; // NVVE_SLICE_COUNT //F19
int iMultiGPU; // NVVE_MULTI_GPU //F21
int iDeviceMemInput; // NVVE_DEVICE_MEMORY_INPUT //F23
// NVVE_STAT_NUM_CODED_FRAMES,
// NVVE_STAT_NUM_RECEIVED_FRAMES,
// NVVE_STAT_BITRATE,
// NVVE_STAT_NUM_BITS_GENERATED,
// NVVE_GET_PTS_DIFF_TIME,
// NVVE_GET_PTS_BASE_TIME,
// NVVE_GET_PTS_CODED_TIME,
// NVVE_GET_PTS_RECEIVED_TIME,
// NVVE_STAT_ELAPSED_TIME,
// NVVE_STAT_QBUF_FULLNESS,
// NVVE_STAT_PERF_FPS,
// NVVE_STAT_PERF_AVG_TIME,
};
typedef struct {
char *name;
char *yuv_type;
int bpp;
} _sYUVParams;
static _sYUVParams sSurfaceFormat[] =
{
{ "UYVY", "4:2:2", 16 },
{ "YUY2", "4:2:2", 16 },
{ "YV12", "4:2:0", 12 },
{ "NV12", "4:2:0", 12 },
{ "IYUV", "4:2:0", 12 },
{ NULL , 0 }
};
typedef struct {
char *name;
int params;
} _sNVVEEncodeParams;
static _sNVVEEncodeParams sNVVE_EncodeParams[] =
{
{ "UNDEFINED", 1 },
{ "NVVE_OUT_SIZE", 2 },
{ "NVVE_ASPECT_RATIO", 3 },
{ "NVVE_FIELD_ENC_MODE", 1 },
{ "NVVE_P_INTERVAL", 1 },
{ "NVVE_IDR_PERIOD", 1 },
{ "NVVE_DYNAMIC_GOP", 1 },
{ "NVVE_RC_TYPE", 1 },
{ "NVVE_AVG_BITRATE", 1 },
{ "NVVE_PEAK_BITRATE", 1 },
{ "NVVE_QP_LEVEL_INTRA", 1 },
{ "NVVE_QP_LEVEL_INTER_P", 1 },
{ "NVVE_QP_LEVEL_INTER_B", 1 },
{ "NVVE_FRAME_RATE", 2 },
{ "NVVE_DEBLOCK_MODE", 1 },
{ "NVVE_PROFILE_LEVEL", 1 },
{ "NVVE_FORCE_INTRA (DS)", 1 }, //DShow only
{ "NVVE_FORCE_IDR (DS)", 1 }, //DShow only
{ "NVVE_CLEAR_STAT (DS)", 1 }, //DShow only
{ "NVVE_SET_DEINTERLACE", 1 },
{ "NVVE_PRESETS", 1 },
{ "NVVE_IN_SIZE", 2 },
{ "NVVE_STAT_NUM_CODED_FRAMES (DS)", 1 }, //DShow only
{ "NVVE_STAT_NUM_RECEIVED_FRAMES (DS)", 1 }, //DShow only
{ "NVVE_STAT_BITRATE (DS)", 1 }, //DShow only
{ "NVVE_STAT_NUM_BITS_GENERATED (DS)", 1 }, //DShow only
{ "NVVE_GET_PTS_DIFF_TIME (DS)", 1 }, //DShow only
{ "NVVE_GET_PTS_BASE_TIME (DS)", 1 }, //DShow only
{ "NVVE_GET_PTS_CODED_TIME (DS)", 1 }, //DShow only
{ "NVVE_GET_PTS_RECEIVED_TIME (DS)", 1 }, //DShow only
{ "NVVE_STAT_ELAPSED_TIME (DS)", 1 }, //DShow only
{ "NVVE_STAT_QBUF_FULLNESS (DS)", 1 }, //DShow only
{ "NVVE_STAT_PERF_FPS (DS)", 1 }, //DShow only
{ "NVVE_STAT_PERF_AVG_TIME (DS)", 1 }, //DShow only
{ "NVVE_DISABLE_CABAC", 1 },
{ "NVVE_CONFIGURE_NALU_FRAMING_TYPE", 1 },
{ "NVVE_DISABLE_SPS_PPS", 1 },
{ "NVVE_SLICE_COUNT", 1 },
{ "NVVE_GPU_OFFLOAD_LEVEL", 1 },
{ "NVVE_GPU_OFFLOAD_LEVEL_MAX", 1 },
{ "NVVE_MULTI_GPU", 1 },
{ "NVVE_GET_GPU_COUNT", 1 },
{ "NVVE_GET_GPU_ATTRIBUTES", 1 },
{ "NVVE_FORCE_GPU_SELECTION", 1 },
{ "NVVE_DEVICE_MEMORY_INPUT", 1 },
{ "NVVE_DEVICE_CTX_LOCK", 1 },
{ NULL, 0 }
};
static _sNVVEEncodeParams sProfileName[] = {
{ "Baseline", 0x42 },
{ "Main" , 0x4d },
{ "High" , 0x64 },
{ NULL , 0 }
};
static _sNVVEEncodeParams sProfileLevel[] = {
{ "1.0", 0x0a },
{ "1.1", 0x0b },
{ "1.2", 0x0c },
{ "1.3", 0x0d },
{ "2.0", 0x14 },
{ "2.1", 0x15 },
{ "2.2", 0x16 },
{ "3.0", 0x1e },
{ "3.1", 0x1f },
{ "3.2", 0x20 },
{ "4.0", 0x28 },
{ "4.1", 0x29 },
{ "4.2", 0x29 },
{ "5.0", 0x32 },
{ "5.1", 0x33 },
{ "Auto", 0xff },
{ NULL , 0 }
};
inline char * sProfileIDX2Char(_sNVVEEncodeParams *pProfile, int ID)
{
int index;
for (index = 0; pProfile[index].name != NULL; index++) {
if (pProfile[index].params == ID) {
// found the profile return the string
return pProfile[index].name;
}
}
return NULL;
}
static char *sVideoEncodePresets[] = {
"PSP ( 320x 240)",
"iPod/iPhone ( 320x 240)",
"AVCHD",
"BluRay",
"HDV_1440",
"ZuneHD",
"FlipCam",
NULL
};
static char *sGPUOffloadLevel[] = {
"CPU PEL processing",
"Motion Estimation",
"Full Encode",
NULL
};
static char *sPictureType[] = {
"Frame Mode",
"Field Mode (top first)",
"Field Mode (bottom first)",
"Field Mode (picaff) unsupported",
NULL
};
static char *sPictureStructure[] = {
"Unknown",
"Top Field",
"Bottom Field",
"Frame Picture",
NULL
};
// Rate Control Method (NVVE_RC_TYPE)
static char *sNVVE_RateCtrlType[] = {
"Rate Control CQP",
"Rate Control VBR",
"Rate Control CBR with QP",
"Rate Control VBR with Min QP",
NULL
};
#ifdef __cplusplus
}
#endif
#endif

View File

@ -167,9 +167,11 @@ OBJS += src/audio/tdav_consumer_audio.o \
src/audio/tdav_webrtc_denoise.o
### video
OBJS += src/video/tdav_converter_video.o \
OBJS += src/video/tdav_consumer_video.o \
src/video/tdav_converter_video.o \
src/video/tdav_runnable_video.o \
src/video/tdav_session_video.o
src/video/tdav_session_video.o \
src/video/tdav_video_jitterbuffer.o
### msrp
OBJS += src/msrp/tdav_consumer_msrp.o \

View File

@ -38,7 +38,7 @@
TDAV_BEGIN_DECLS
/** Speex JitterBufferr*/
/** Speex JitterBuffer*/
typedef struct tdav_speex_jitterBuffer_s
{
TMEDIA_DECLARE_JITTER_BUFFER;

View File

@ -21,12 +21,11 @@
*/
/**@file tdav_codec_h264.h
* @brief H.264 codec plugin
* @brief H.264 codec plugin using FFmpeg for decoding and x264 for encoding
* RTP payloader/depayloader follows RFC 3984.
*
* @author Mamadou Diop <diopmamadou(at)doubango.org>
*
*/
#ifndef TINYDAV_CODEC_H264_H
#define TINYDAV_CODEC_H264_H
@ -35,42 +34,15 @@
#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264)
#include "tinymedia/tmedia_codec.h"
#include "tinydav/codecs/h264/tdav_codec_h264_common.h"
#include <libavcodec/avcodec.h>
TDAV_BEGIN_DECLS
// Because of FD, declare it here
typedef enum packetization_mode_e{
Single_NAL_Unit_Mode = 0, /* Single NAL mode (Only nals from 1-23 are allowed) */
Non_Interleaved_Mode = 1, /* Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed */
Interleaved_Mode = 2 /* 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.*/
}
packetization_mode_t;
typedef enum tdav_codec_h264_profile_e
{
tdav_codec_h264_bp99,
tdav_codec_h264_bp10,
tdav_codec_h264_bp20,
tdav_codec_h264_bp30,
}
tdav_codec_h264_profile_t;
typedef struct tdav_codec_h264_s
{
TMEDIA_DECLARE_CODEC_VIDEO;
tdav_codec_h264_profile_t profile;
packetization_mode_t pack_mode;
struct{
uint8_t* ptr;
tsk_size_t size;
} rtp;
TDAV_DECLARE_CODEC_H264_COMMON;
// Encoder
struct{
@ -78,7 +50,7 @@ typedef struct tdav_codec_h264_s
AVCodecContext* context;
AVFrame* picture;
void* buffer;
int frame_count;
int64_t frame_count;
} encoder;
// decoder
@ -89,6 +61,7 @@ typedef struct tdav_codec_h264_s
void* accumulator;
tsk_size_t accumulator_pos;
tsk_size_t accumulator_size;
uint16_t last_seq;
} decoder;
}
@ -98,6 +71,14 @@ TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_bp10_plugin_def
TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_bp20_plugin_def_t;
TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_bp30_plugin_def_t;
static inline tsk_bool_t tdav_codec_h264_is_ffmpeg_plugin(const tmedia_codec_plugin_def_t *plugin)
{
if(plugin && (plugin == tdav_codec_h264_bp10_plugin_def_t || plugin == tdav_codec_h264_bp20_plugin_def_t || plugin == tdav_codec_h264_bp30_plugin_def_t)){
return tsk_true;
}
return tsk_false;
}
TDAV_END_DECLS
#endif /* HAVE_FFMPEG */

View File

@ -0,0 +1,160 @@
/*
* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>.
*
* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
* This file is part of Open Source Doubango Framework.
*
* DOUBANGO is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DOUBANGO is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DOUBANGO.
*
*/
#ifndef TINYDAV_CODEC_H264_COMMON_H
#define TINYDAV_CODEC_H264_COMMON_H
#include "tinydav_config.h"
#include "tinydav/codecs/h264/tdav_codec_h264_rtp.h"
#include "tinymedia/tmedia_codec.h"
#include "tsk_memory.h"
#include "tsk_string.h"
#include "tsk_params.h"
#include <string.h>
TDAV_BEGIN_DECLS
#if !defined(H264_MAX_BR)
# define H264_MAX_BR 452
#endif
#if !defined(H264_MAX_MBPS)
# define H264_MAX_MBPS 11880
#endif
#if !defined(H264_PACKETIZATION_MODE)
# define H264_PACKETIZATION_MODE Non_Interleaved_Mode
#endif
// Because of FD, declare it here
typedef enum packetization_mode_e{
Single_NAL_Unit_Mode = 0, /* Single NAL mode (Only nals from 1-23 are allowed) */
Non_Interleaved_Mode = 1, /* Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed */
Interleaved_Mode = 2 /* 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.*/
}
packetization_mode_t;
typedef enum tdav_codec_h264_profile_e
{
tdav_codec_h264_bp99,
tdav_codec_h264_bp10,
tdav_codec_h264_bp20,
tdav_codec_h264_bp30,
}
tdav_codec_h264_profile_t;
typedef struct tdav_codec_h264_common_s
{
TMEDIA_DECLARE_CODEC_VIDEO;
tdav_codec_h264_profile_t profile;
packetization_mode_t pack_mode;
struct{
uint8_t* ptr;
tsk_size_t size;
} rtp;
}
tdav_codec_h264_common_t;
#define TDAV_CODEC_H264_COMMON(self) ((tdav_codec_h264_common_t*)(self))
#define TDAV_DECLARE_CODEC_H264_COMMON tdav_codec_h264_common_t __video__
static int tdav_codec_h264_common_init(tdav_codec_h264_common_t * h264)
{
if(h264){
}
return 0;
}
static int tdav_codec_h264_common_deinit(tdav_codec_h264_common_t * h264)
{
if(h264){
tmedia_codec_video_deinit(TMEDIA_CODEC_VIDEO(h264));
TSK_FREE(h264->rtp.ptr);
h264->rtp.size = 0;
}
return 0;
}
static tdav_codec_h264_profile_t tdav_codec_h264_common_get_profile(const char* fmtp)
{
tdav_codec_h264_profile_t profile = tdav_codec_h264_bp99;
tsk_size_t size = tsk_strlen(fmtp);
int start, end;
if((start = tsk_strindexOf(fmtp, size, "profile-level-id")) !=-1){
tsk_param_t* param;
if((end = tsk_strindexOf((fmtp+start), (size-start), ";")) == -1){
end = size;
}
if((param = tsk_params_parse_param((fmtp+start), (end-start)))){
profile_idc_t p_idc;
level_idc_t l_idc;
if(param->value){
tsk_strtrim_both(&param->value);
}
tdav_codec_h264_parse_profile(param->value, &p_idc, tsk_null, &l_idc);
switch(p_idc){
case profile_idc_baseline:
switch(l_idc){
case level_idc_1_0:
case level_idc_1_b:
case level_idc_1_1:
case level_idc_1_2:
case level_idc_1_3:
profile = tdav_codec_h264_bp10;
break;
case level_idc_2_0:
case level_idc_2_1:
case level_idc_2_2:
profile = tdav_codec_h264_bp20;
break;
case level_idc_3_0:
profile = tdav_codec_h264_bp30;
break;
}
break;
case profile_idc_extended:
case profile_idc_main:
case profile_idc_high:
default:
/* Not supported */
break;
}
TSK_OBJECT_SAFE_FREE(param);
}
}
return profile;
}
TDAV_END_DECLS
#endif /* TINYDAV_CODEC_H264_COMMON_H */

View File

@ -0,0 +1,126 @@
/*
* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>.
*
* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
* This file is part of Open Source Doubango Framework.
*
* DOUBANGO is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DOUBANGO is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DOUBANGO.
*
*/
/**@file tdav_codec_h264_cuda.h
* @brief H.264 codec plugin using NVIDIA CUDA for encoding/decoding.
* Env: gpucomputingsdk_4.0.17_win_32, cudatoolkit_4.0.17_win_32 and 280.26-notebook-win7-winvista-32bit-international-whql.
* http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoDecoder_Library.pdf
* http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_VideoEncoder_Library.pdf
* RTP payloader/depayloader follows RFC 3984.
*
* @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
*/
#ifndef TINYDAV_CODEC_H264_CUDA_H
#define TINYDAV_CODEC_H264_CUDA_H
#include "tinydav_config.h"
#if HAVE_CUDA
#include "tinydav/codecs/h264/tdav_codec_h264_common.h"
// I really don't want to use C++ code :(
#if !defined(__cplusplus)
typedef enum NVVE_FrameRate NVVE_FrameRate;
typedef enum NVVE_GPUOffloadLevel NVVE_GPUOffloadLevel;
typedef enum NVVE_ASPECT_RATIO_TYPE NVVE_ASPECT_RATIO_TYPE;
typedef enum NVVE_SurfaceFormat NVVE_SurfaceFormat;
typedef enum NVVE_PicStruct NVVE_PicStruct;
typedef enum NVVE_FIELD_MODE NVVE_FIELD_MODE;
typedef enum NVVE_RateCtrlType NVVE_RateCtrlType;
typedef enum NVVE_DI_MODE NVVE_DI_MODE;
typedef enum NVVE_PRESETS_TARGET NVVE_PRESETS_TARGET;
typedef enum NVVE_DI_MODE NVVE_DI_MODE;
typedef struct NVEncoderParams NVEncoderParams;
#endif /* __cplusplus */
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
#include <unknwn.h>
#include <nvcuvid.h>
#include <cuviddec.h>
#include <NVEncoderAPI.h>
#include <NVEncodeDataTypes.h>
#include <d3d9.h>
#include <cudad3d9.h>
#include <cuda/types.h>
#include "tsk_mutex.h"
TDAV_BEGIN_DECLS
typedef struct tdav_codec_h264_cuda_s
{
TDAV_DECLARE_CODEC_H264_COMMON;
struct {
NVEncoder context;
NVEncoderParams ctx_params;
NVVE_CallbackParams clb_params;
void *buffer;
tsk_size_t buffer_size;
int64_t frame_count;
} encoder;
struct {
tsk_mutex_handle_t *mutex;
CUvideodecoder context;
CUVIDDECODECREATEINFO info;
CUvideoparser cu_parser;
CUVIDPARSERPARAMS cu_paser_params;
CUdevice cu_device;
IDirect3D9 *dx_d3d;
IDirect3DDevice9 *dx_d3ddevice;
CUcontext cu_context;
void* accumulator;
tsk_size_t accumulator_pos;
tsk_size_t accumulator_size;
void *cu_buffer;
tsk_size_t cu_buffer_size;
tsk_size_t cu_buffer_pitch;
tsk_bool_t cu_buffer_avail;
uint16_t last_seq;
} decoder;
}
tdav_codec_h264_cuda_t;
TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp10_plugin_def_t;
TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp20_plugin_def_t;
TINYDAV_GEXTERN const tmedia_codec_plugin_def_t *tdav_codec_h264_cuda_bp30_plugin_def_t;
tsk_bool_t tdav_codec_h264_cuda_is_supported();
static inline tsk_bool_t tdav_codec_h264_is_cuda_plugin(const tmedia_codec_plugin_def_t *plugin)
{
if(plugin && (plugin == tdav_codec_h264_cuda_bp10_plugin_def_t || plugin == tdav_codec_h264_cuda_bp20_plugin_def_t || plugin == tdav_codec_h264_cuda_bp30_plugin_def_t)){
return tsk_true;
}
return tsk_false;
}
TDAV_END_DECLS
#endif /* HAVE_CUDA */
#endif /* TINYDAV_CODEC_H264_CUDA_H */

View File

@ -32,8 +32,6 @@
#include "tinydav_config.h"
#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264)
#include "tsk_common.h"
TDAV_BEGIN_DECLS
@ -46,7 +44,7 @@ TDAV_BEGIN_DECLS
#define H264_START_CODE_PREFIX_SIZE 4
struct tdav_codec_h264_s;
struct tdav_codec_h264_common_s;
extern uint8_t H264_START_CODE_PREFIX[4];
@ -114,10 +112,8 @@ nal_unit_type_t;
int tdav_codec_h264_parse_profile(const char* profile_level_id, profile_idc_t *p_idc, profile_iop_t *p_iop, level_idc_t *l_idc);
int tdav_codec_h264_get_pay(const void* in_data, tsk_size_t in_size, const void** out_data, tsk_size_t *out_size, tsk_bool_t* append_scp);
void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *data, tsk_size_t size, tsk_bool_t marker);
void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_common_s *self, const void *data, tsk_size_t size, tsk_bool_t marker);
TDAV_END_DECLS
#endif /* HAVE_FFMPEG */
#endif /* TINYDAV_CODEC_H264_RTP_H */

View File

@ -74,6 +74,7 @@ typedef struct tdav_codec_vp8_s
unsigned last_PartID:4;
unsigned last_S:1;
unsigned last_N:1;
unsigned frame_corrupted;
} decoder;
}
tdav_codec_vp8_t;

View File

@ -65,9 +65,10 @@ typedef enum tdav_codec_id_e
tdav_codec_id_h264_bp10 = 0x00010000<<4,
tdav_codec_id_h264_bp20 = 0x00010000<<5,
tdav_codec_id_h264_bp30 = 0x00010000<<6,
tdav_codec_id_theora = 0x00010000<<7,
tdav_codec_id_mp4ves_es = 0x00010000<<8,
tdav_codec_id_vp8 = 0x00010000<<9,
tdav_codec_id_h264_svc = 0x00010000<<7,
tdav_codec_id_theora = 0x00010000<<8,
tdav_codec_id_mp4ves_es = 0x00010000<<9,
tdav_codec_id_vp8 = 0x00010000<<10,
}
tdav_codec_id_t;

View File

@ -0,0 +1,71 @@
/*
* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>
*
* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
* This file is part of Open Source Doubango Framework.
*
* DOUBANGO is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DOUBANGO is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DOUBANGO.
*
*/
/**@file tdav_consumer_video.h
* @brief Base class for all Video consumers.
*
* @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*/
#ifndef TINYDAV_CONSUMER_VIDEO_H
#define TINYDAV_CONSUMER_VIDEO_H
#include "tinydav_config.h"
#include "tinymedia/tmedia_consumer.h"
#include "tsk_safeobj.h"
TDAV_BEGIN_DECLS
#define TDAV_CONSUMER_VIDEO(self) ((tdav_consumer_video_t*)(self))
typedef struct tdav_consumer_video_s
{
TMEDIA_DECLARE_CONSUMER;
struct tmedia_jitterbuffer_s* jitterbuffer;
TSK_DECLARE_SAFEOBJ;
}
tdav_consumer_video_t;
TINYDAV_API int tdav_consumer_video_init(tdav_consumer_video_t* self);
TINYDAV_API int tdav_consumer_video_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2);
#define tdav_consumer_video_prepare(self, codec) tmedia_consumer_prepare(TDAV_CONSUMER_VIDEO(self), codec)
#define tdav_consumer_video_start(self) tmedia_consumer_start(TDAV_CONSUMER_VIDEO(self))
#define tdav_consumer_video_consume(self, buffer, size) tmedia_consumer_consume(TDAV_CONSUMER_VIDEO(self), buffer, size)
#define tdav_consumer_video_pause(self) tmedia_consumer_pause(TDAV_CONSUMER_VIDEO(self))
#define tdav_consumer_video_stop(self) tmedia_consumer_stop(TDAV_CONSUMER_VIDEO(self))
#define tdav_consumer_video_has_jb(self) ((self) && (self)->jitterbuffer)
TINYDAV_API int tdav_consumer_video_set(tdav_consumer_video_t* self, const tmedia_param_t* param);
TINYDAV_API int tdav_consumer_video_put(tdav_consumer_video_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr);
TINYDAV_API tsk_size_t tdav_consumer_video_get(tdav_consumer_video_t* self, void* out_data, tsk_size_t out_size);
TINYDAV_API int tdav_consumer_video_tick(tdav_consumer_video_t* self);
TINYDAV_API int tdav_consumer_video_reset(tdav_consumer_video_t* self);
TINYDAV_API int tdav_consumer_video_deinit(tdav_consumer_video_t* self);
#define TDAV_DECLARE_CONSUMER_VIDEO tdav_consumer_video_t __consumer_video__
TDAV_END_DECLS
#endif /* TINYDAV_CONSUMER_VIDEO_H */

View File

@ -50,7 +50,8 @@ typedef struct tdav_converter_video_s
#if HAVE_FFMPEG || HAVE_SWSSCALE
struct SwsContext *context;
enum PixelFormat pixfmt;
enum PixelFormat srcFormat;
enum PixelFormat dstFormat;
AVFrame* srcFrame;
AVFrame* dstFrame;
@ -69,18 +70,19 @@ typedef struct tdav_converter_video_s
tsk_size_t dstWidth;
tsk_size_t dstHeight;
tsk_bool_t toYUV420;
// one shot parameters
int rotation;
tsk_bool_t flip;
}
tdav_converter_video_t;
tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t chroma, tsk_bool_t toYUV420);
tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tmedia_chroma_t srcChroma, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t dstChroma);
tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void* buffer, void** output, tsk_size_t* output_max_size);
#define tdav_converter_video_init(self, _rotation/*...To be completed with other parameters*/) \
#define tdav_converter_video_init(self, _rotation, _flip/*...To be completed with other parameters*/) \
if((self)){ \
(self)->rotation = (_rotation); \
(self)->flip = (_flip); \
}
#define tdav_converter_video_flip(frame, height) \

View File

@ -0,0 +1,77 @@
/*
* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>
*
* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
* This file is part of Open Source Doubango Framework.
*
* DOUBANGO is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DOUBANGO is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DOUBANGO.
*
*/
/**@file tdav_video_jitterbuffer.h
* @brief Video Jitter Buffer
*
* @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*/
#ifndef TINYDAV_VIDEO_JITTERBUFFER_H
#define TINYDAV_VIDEO_JITTERBUFFER_H
#include "tinydav_config.h"
#include "tinymedia/tmedia_jitterbuffer.h"
#include "tsk_buffer.h"
#include "tsk_timer.h"
#include "tsk_list.h"
#include "tsk_safeobj.h"
TDAV_BEGIN_DECLS
/** Video JitterBuffer packet */
typedef struct tdav_video_jitterbuffer_packet_s
{
TSK_DECLARE_OBJECT;
TSK_DECLARE_SAFEOBJ;
tsk_bool_t taken;
tsk_buffer_t *data;
int64_t seq_num;
}
tdav_video_jitterbuffer_packet_t;
TINYDAV_GEXTERN const tsk_object_def_t *tdav_video_jitterbuffer_packet_def_t;
typedef tsk_list_t tdav_video_jitterbuffer_packets_L_t;
/** Video JitterBuffer */
typedef struct tdav_video_jitterbuffer_s
{
TMEDIA_DECLARE_JITTER_BUFFER;
TSK_DECLARE_SAFEOBJ;
uint32_t fps;
uint32_t frame_duration;
uint32_t frame_max_count;
uint32_t frame_curr_index;
uint32_t tail; // in milliseconds
tdav_video_jitterbuffer_packets_L_t * packets;
tsk_timer_manager_handle_t *timer;
}
tdav_video_jitterbuffer_t;
const tmedia_jitterbuffer_plugin_def_t *tdav_video_jitterbuffer_plugin_def_t;
TDAV_END_DECLS
#endif /* TINYDAV_VIDEO_JITTERBUFFER_H */

View File

@ -74,7 +74,7 @@ int tdav_consumer_audio_init(tdav_consumer_audio_t* self)
TMEDIA_CONSUMER(self)->audio.gain = TSK_MIN(tmedia_defaults_get_audio_consumer_gain(), TDAV_AUDIO_GAIN_MAX);
/* self:jitterbuffer */
if(!(self->jitterbuffer = tmedia_jitterbuffer_create())){
if(!(self->jitterbuffer = tmedia_jitterbuffer_create(tmedia_audio))){
TSK_DEBUG_ERROR("Failed to create jitter buffer");
return -2;
}

View File

@ -47,7 +47,13 @@
#define TDAV_SPEAKUP_10MS_FRAME_SIZE(self) (((self)->rate * TDAV_SPEAKUP_10MS)/1000)
#define TDAV_SPEAKUP_PTIME_FRAME_SIZE(self) (((self)->rate * (self)->framesize)/1000)
int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate)
static int tdav_speakup_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
{
TSK_DEBUG_ERROR("Not implemented");
return -2;
}
static int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate)
{
tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
if(!jitterbuffer->jbuffer){
@ -65,12 +71,12 @@ int tdav_speakup_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_d
return 0;
}
int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
static int tdav_speakup_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
{
return 0;
}
int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
static int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
{
tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
@ -135,7 +141,7 @@ int tdav_speakup_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_s
return 0;
}
tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
static tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
{
tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
int jret;
@ -186,7 +192,7 @@ tsk_size_t tdav_speakup_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_
return (_10ms_count * jitterbuffer->_10ms_size_bytes);
}
int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
static int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
{
tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
if(jitterbuffer->jbuffer){
@ -199,7 +205,7 @@ int tdav_speakup_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
}
}
int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self)
static int tdav_speakup_jitterbuffer_close(tmedia_jitterbuffer_t* self)
{
tdav_speakup_jitterbuffer_t *jitterbuffer = (tdav_speakup_jitterbuffer_t *)self;
if(jitterbuffer->jbuffer){
@ -254,9 +260,10 @@ static const tsk_object_def_t tdav_speakup_jitterbuffer_def_s =
static const tmedia_jitterbuffer_plugin_def_t tdav_speakup_jitterbuffer_plugin_def_s =
{
&tdav_speakup_jitterbuffer_def_s,
tmedia_audio,
"Audio/video JitterBuffer based on Speex",
tdav_speakup_jitterbuffer_set,
tdav_speakup_jitterbuffer_open,
tdav_speakup_jitterbuffer_tick,
tdav_speakup_jitterbuffer_put,

View File

@ -34,6 +34,11 @@
#include "tsk_memory.h"
#include "tsk_debug.h"
static int tdav_speex_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
{
TSK_DEBUG_ERROR("Not implemented");
return -2;
}
static int tdav_speex_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate)
{
@ -184,9 +189,10 @@ static const tsk_object_def_t tdav_speex_jitterbuffer_def_s =
static const tmedia_jitterbuffer_plugin_def_t tdav_speex_jitterbuffer_plugin_def_s =
{
&tdav_speex_jitterbuffer_def_s,
tmedia_audio,
"Audio JitterBuffer based on Speex",
"Audio/video JitterBuffer based on Speex",
tdav_speex_jitterbuffer_set,
tdav_speex_jitterbuffer_open,
tdav_speex_jitterbuffer_tick,
tdav_speex_jitterbuffer_put,

View File

@ -223,11 +223,6 @@ tsk_size_t tdav_codec_h261_encode(tmedia_codec_t* self, const void* in_data, tsk
TSK_DEBUG_ERROR("Invalid size");
return 0;
}
// Flip
if(self->video.flip.encoded){
tdav_converter_video_flip(h261->encoder.picture, h261->encoder.context->height);
}
// Encode data
h261->encoder.picture->pts = AV_NOPTS_VALUE;
@ -331,9 +326,6 @@ tsk_size_t tdav_codec_h261_decode(tmedia_codec_t* self, const void* in_data, tsk
retsize = xsize;
TMEDIA_CODEC_VIDEO(h261)->in.width = h261->decoder.context->width;
TMEDIA_CODEC_VIDEO(h261)->in.height = h261->decoder.context->height;
if(self->video.flip.decoded){
tdav_converter_video_flip(h261->decoder.picture, h261->decoder.context->height);
}
/* copy picture into a linear buffer */
avpicture_layout((AVPicture *)h261->decoder.picture, h261->decoder.context->pix_fmt, h261->decoder.context->width, h261->decoder.context->height,
*out_data, retsize);

View File

@ -288,11 +288,6 @@ static tsk_size_t tdav_codec_h263_encode(tmedia_codec_t* self, const void* in_da
TSK_DEBUG_ERROR("Invalid size");
return 0;
}
// Flip
if(self->video.flip.encoded){
tdav_converter_video_flip(h263->encoder.picture, h263->encoder.context->height);
}
h263->encoder.picture->pts = AV_NOPTS_VALUE;
h263->encoder.picture->quality = h263->encoder.context->global_quality;
@ -442,10 +437,6 @@ static tsk_size_t tdav_codec_h263_decode(tmedia_codec_t* self, const void* in_da
retsize = xsize;
TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width;
TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height;
// flip
if(self->video.flip.decoded){
tdav_converter_video_flip(h263->decoder.picture, h263->decoder.context->height);
}
/* copy picture into a linear buffer */
avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height,
*out_data, retsize);
@ -755,10 +746,6 @@ static tsk_size_t tdav_codec_h263p_decode(tmedia_codec_t* self, const void* in_d
retsize = xsize;
TMEDIA_CODEC_VIDEO(h263)->in.width = h263->decoder.context->width;
TMEDIA_CODEC_VIDEO(h263)->in.height = h263->decoder.context->height;
// flip
if(self->video.flip.decoded){
tdav_converter_video_flip(h263->decoder.picture, h263->decoder.context->height);
}
/* copy picture into a linear buffer */
avpicture_layout((AVPicture *)h263->decoder.picture, h263->decoder.context->pix_fmt, h263->decoder.context->width, h263->decoder.context->height,
*out_data, retsize);

View File

@ -21,7 +21,7 @@
*/
/**@file tdav_codec_h264.c
* @brief H.264 codec plugin
* @brief H.264 codec plugin using FFmpeg for decoding and x264 for encoding
* RTP payloader/depayloader follows RFC 3984
*
* @author Mamadou Diop <diopmamadou(at)doubango.org>
@ -41,13 +41,8 @@
#include "tsk_memory.h"
#include "tsk_debug.h"
#define H264_PACKETIZATION_MODE Non_Interleaved_Mode
#define H264_MAX_BR 452
#define H264_MAX_MBPS 11880
int tdav_codec_h264_init(tdav_codec_h264_t* self, tdav_codec_h264_profile_t profile);
int tdav_codec_h264_deinit(tdav_codec_h264_t* self);
tdav_codec_h264_profile_t tdav_codec_h264_get_profile(const char* fmtp);
static int tdav_codec_h264_init(tdav_codec_h264_t* self, tdav_codec_h264_profile_t profile);
static int tdav_codec_h264_deinit(tdav_codec_h264_t* self);
static void tdav_codec_h264_encap(const tdav_codec_h264_t* h264, const uint8_t* pdata, tsk_size_t size);
@ -55,7 +50,7 @@ static void tdav_codec_h264_encap(const tdav_codec_h264_t* h264, const uint8_t*
#define tdav_codec_h264_fmtp_set tsk_null /* FIXME: should be removed from all plugins (useless) */
int tdav_codec_h264_open(tmedia_codec_t* self)
static int tdav_codec_h264_open(tmedia_codec_t* self)
{
int ret;
int size;
@ -109,7 +104,7 @@ int tdav_codec_h264_open(tmedia_codec_t* self)
h264->encoder.context->b_frame_strategy = 1;
h264->encoder.context->chromaoffset = 0;
switch(h264->profile){
switch(TDAV_CODEC_H264_COMMON(h264)->profile){
case tdav_codec_h264_bp10:
default:
h264->encoder.context->profile = FF_PROFILE_H264_BASELINE;
@ -174,12 +169,6 @@ int tdav_codec_h264_open(tmedia_codec_t* self)
}
avcodec_get_frame_defaults(h264->decoder.picture);
size = avpicture_get_size(PIX_FMT_YUV420P, h264->decoder.context->width, h264->decoder.context->height);
if(!(h264->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
return -2;
}
// Open decoder
if((ret = avcodec_open(h264->decoder.context, h264->decoder.codec)) < 0){
TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
@ -189,7 +178,7 @@ int tdav_codec_h264_open(tmedia_codec_t* self)
return 0;
}
int tdav_codec_h264_close(tmedia_codec_t* self)
static int tdav_codec_h264_close(tmedia_codec_t* self)
{
tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
@ -224,15 +213,13 @@ int tdav_codec_h264_close(tmedia_codec_t* self)
if(h264->decoder.picture){
av_free(h264->decoder.picture);
}
if(h264->decoder.accumulator){
TSK_FREE(h264->decoder.accumulator);
h264->decoder.accumulator_pos = 0;
}
TSK_FREE(h264->decoder.accumulator);
h264->decoder.accumulator_pos = 0;
return 0;
}
tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
static tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size)
{
int ret = 0;
int size;
@ -256,11 +243,6 @@ tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk
TSK_DEBUG_ERROR("Invalid size");
return 0;
}
// Flip
if(self->video.flip.encoded){
tdav_converter_video_flip(h264->encoder.picture, h264->encoder.context->height);
}
// send keyframe for:
// - the first frame
@ -291,7 +273,7 @@ tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk
return 0;
}
tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
static tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
{
tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
const trtp_rtp_header_t* rtp_hdr = proto_hdr;
@ -300,14 +282,16 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk
tsk_size_t pay_size = 0;
int ret;
tsk_bool_t append_scp;
tsk_size_t xsize, retsize = 0;
tsk_size_t retsize = 0, size_to_copy = 0;
static tsk_size_t xmax_size = (1920 * 1080 * 3) >> 3;
static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
int got_picture_ptr;
if(!h264 || !in_data || !in_size || !out_data || !h264->decoder.context){
TSK_DEBUG_ERROR("Invalid parameter");
return 0;
}
//TSK_DEBUG_INFO("SeqNo=%hu", rtp_hdr->seq_num);
/* Packet lost? */
@ -341,22 +325,43 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk
TSK_DEBUG_ERROR("Depayloader failed to get H.264 content");
return 0;
}
xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height);
//append_scp = tsk_true;
size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
if((int)(h264->decoder.accumulator_pos + pay_size) <= xsize){
if(append_scp){
memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, sizeof(H264_START_CODE_PREFIX));
h264->decoder.accumulator_pos += sizeof(H264_START_CODE_PREFIX);
}
memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
h264->decoder.accumulator_pos += pay_size;
}
else{
TSK_DEBUG_WARN("Buffer overflow");
h264->decoder.accumulator_pos = 0;
return 0;
}
// start-accumulator
if(!h264->decoder.accumulator){
if(size_to_copy > xmax_size){
TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
return 0;
}
if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){
TSK_DEBUG_ERROR("Failed to allocated new buffer");
return 0;
}
h264->decoder.accumulator_size = size_to_copy;
}
if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){
TSK_DEBUG_ERROR("BufferOverflow");
h264->decoder.accumulator_pos = 0;
return 0;
}
if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){
if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){
TSK_DEBUG_ERROR("Failed to reallocated new buffer");
h264->decoder.accumulator_pos = 0;
h264->decoder.accumulator_size = 0;
return 0;
}
h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy);
}
if(append_scp){
memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size);
h264->decoder.accumulator_pos += start_code_prefix_size;
}
memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
h264->decoder.accumulator_pos += pay_size;
// end-accumulator
if(rtp_hdr->marker){
AVPacket packet;
@ -372,10 +377,10 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk
TSK_DEBUG_ERROR("=============Failed to decode the buffer");
}
else if(got_picture_ptr){
if(self->video.flip.decoded){
tdav_converter_video_flip(h264->decoder.picture, h264->decoder.context->height);
}
tsk_size_t xsize;
/* fill out */
xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height);
if(*out_max_size<xsize){
if((*out_data = tsk_realloc(*out_data, (xsize + FF_INPUT_BUFFER_PADDING_SIZE)))){
*out_max_size = xsize;
@ -397,7 +402,7 @@ tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk
return retsize;
}
tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* fmtp)
static tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* fmtp)
{
tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)codec;
tsk_params_L_t* params = tsk_null;
@ -414,7 +419,7 @@ tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* f
TSK_DEBUG_INFO("Trying to match [%s]", fmtp);
/* Check whether the profile match (If the profile is missing, then we consider that it's ok) */
if(((profile = tdav_codec_h264_get_profile(fmtp)) != tdav_codec_h264_bp99) && (profile != h264->profile)){
if(((profile = tdav_codec_h264_common_get_profile(fmtp)) != tdav_codec_h264_bp99) && (profile != TDAV_CODEC_H264_COMMON(h264)->profile)){
TSK_DEBUG_INFO("Profile not matching");
return tsk_false;
}
@ -437,7 +442,7 @@ tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* f
/* === packetization-mode ===*/
if((val_int = tsk_params_get_param_value_as_int(params, "packetization-mode")) != -1){
if((packetization_mode_t)val_int == Single_NAL_Unit_Mode || (packetization_mode_t)val_int == Non_Interleaved_Mode){
h264->pack_mode = (packetization_mode_t)val_int;
TDAV_CODEC_H264_COMMON(h264)->pack_mode = (packetization_mode_t)val_int;
}
else{
TSK_DEBUG_INFO("packetization-mode not matching");
@ -475,10 +480,10 @@ tsk_bool_t tdav_codec_h264_fmtp_match(const tmedia_codec_t* codec, const char* f
// sx >>= 1;
// sy >>= 1;
//}
TMEDIA_CODEC_VIDEO(h264)->out.width = sx&(~1), TMEDIA_CODEC_VIDEO(h264)->in.height = sy&(~1);
TMEDIA_CODEC_VIDEO(h264)->out.width = sx&(~1), TMEDIA_CODEC_VIDEO(h264)->out.height = sy&(~1);
}
else{
TMEDIA_CODEC_VIDEO(h264)->out.width = std_w, TMEDIA_CODEC_VIDEO(h264)->in.height = std_h;
TMEDIA_CODEC_VIDEO(h264)->out.width = std_w, TMEDIA_CODEC_VIDEO(h264)->out.height = std_h;
}
}
}
@ -488,7 +493,7 @@ bail:
return ret;
}
char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self)
static char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self)
{
tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
char* fmtp = tsk_null;
@ -498,7 +503,7 @@ char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self)
return tsk_null;
}
switch(h264->profile){
switch(TDAV_CODEC_H264_COMMON(h264)->profile){
case tdav_codec_h264_bp10:
fmtp = tsk_strdup("profile-level-id=42e00a");
break;
@ -512,7 +517,7 @@ char* tdav_codec_h264_fmtp_get(const tmedia_codec_t* self)
if(fmtp){
tsk_strcat_2(&fmtp, "; packetization-mode=%d; max-br=%d; max-mbps=%d",
h264->pack_mode, TMEDIA_CODEC_VIDEO(h264)->in.max_br/1000, TMEDIA_CODEC_VIDEO(h264)->in.max_mbps/1000);
TDAV_CODEC_H264_COMMON(h264)->pack_mode, TMEDIA_CODEC_VIDEO(h264)->in.max_br/1000, TMEDIA_CODEC_VIDEO(h264)->in.max_mbps/1000);
}
return fmtp;
@ -540,7 +545,7 @@ static tsk_object_t* tdav_codec_h264_bp10_dtor(tsk_object_t * self)
tdav_codec_h264_t *h264 = self;
if(h264){
/* deinit base */
tmedia_codec_video_deinit(self);
tdav_codec_h264_common_deinit(self);
/* deinit self */
tdav_codec_h264_deinit(h264);
@ -604,7 +609,7 @@ static tsk_object_t* tdav_codec_h264_bp20_dtor(tsk_object_t * self)
tdav_codec_h264_t *h264 = self;
if(h264){
/* deinit base */
tmedia_codec_video_deinit(self);
tdav_codec_h264_common_deinit(self);
/* deinit self */
tdav_codec_h264_deinit(h264);
@ -668,7 +673,7 @@ static tsk_object_t* tdav_codec_h264_bp30_dtor(tsk_object_t * self)
tdav_codec_h264_t *h264 = self;
if(h264){
/* deinit base */
tmedia_codec_video_deinit(self);
tdav_codec_h264_common_deinit(self);
/* deinit self */
tdav_codec_h264_deinit(h264);
@ -757,9 +762,14 @@ int tdav_codec_h264_init(tdav_codec_h264_t* self, tdav_codec_h264_profile_t prof
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
if((ret = tdav_codec_h264_common_init(TDAV_CODEC_H264_COMMON(self)))){
TSK_DEBUG_ERROR("tdav_codec_h264_common_init() faile with error code=%d", ret);
return ret;
}
self->pack_mode = H264_PACKETIZATION_MODE;
self->profile = profile;
TDAV_CODEC_H264_COMMON(self)->pack_mode = H264_PACKETIZATION_MODE;
TDAV_CODEC_H264_COMMON(self)->profile = profile;
TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000;
TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000;
@ -793,102 +803,34 @@ int tdav_codec_h264_deinit(tdav_codec_h264_t* self)
self->decoder.codec = tsk_null;
// FFMpeg resources are destroyed by close()
TSK_FREE(self->rtp.ptr);
self->rtp.size = 0;
return 0;
}
tdav_codec_h264_profile_t tdav_codec_h264_get_profile(const char* fmtp)
{
tdav_codec_h264_profile_t profile = tdav_codec_h264_bp99;
tsk_size_t size = tsk_strlen(fmtp);
int start, end;
if((start = tsk_strindexOf(fmtp, size, "profile-level-id")) !=-1){
tsk_param_t* param;
if((end = tsk_strindexOf((fmtp+start), (size-start), ";")) == -1){
end = size;
}
if((param = tsk_params_parse_param((fmtp+start), (end-start)))){
profile_idc_t p_idc;
level_idc_t l_idc;
if(param->value){
tsk_strtrim_both(&param->value);
}
tdav_codec_h264_parse_profile(param->value, &p_idc, tsk_null, &l_idc);
switch(p_idc){
case profile_idc_baseline:
switch(l_idc){
case level_idc_1_0:
case level_idc_1_b:
case level_idc_1_1:
case level_idc_1_2:
case level_idc_1_3:
profile = tdav_codec_h264_bp10;
break;
case level_idc_2_0:
case level_idc_2_1:
case level_idc_2_2:
profile = tdav_codec_h264_bp20;
break;
case level_idc_3_0:
profile = tdav_codec_h264_bp30;
break;
}
break;
case profile_idc_extended:
case profile_idc_main:
case profile_idc_high:
default:
/* Not supported */
break;
}
TSK_OBJECT_SAFE_FREE(param);
}
}
return profile;
}
static void tdav_codec_h264_encap(const tdav_codec_h264_t* h264, const uint8_t* pdata, tsk_size_t size)
{
register uint32_t i;
uint32_t last_scp, prev_scp;
static uint32_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 ..but */
register int32_t i;
int32_t last_scp, prev_scp;
static int32_t size_of_scp = sizeof(H264_START_CODE_PREFIX); /* we know it's equal to 4 ..but */
if(!pdata || !size){
return;
}
last_scp = 0, prev_scp = 0;
/*
#if 1
if(size < H264_RTP_PAYLOAD_SIZE){
goto last;
}
#else
goto last;
#endif
*/
for(i = size_of_scp; i<(size - size_of_scp); i++){
for(i = size_of_scp; i<(int32_t)(size - size_of_scp); i++){
if(pdata[i] == H264_START_CODE_PREFIX[0] && pdata[i+1] == H264_START_CODE_PREFIX[1] && pdata[i+2] == H264_START_CODE_PREFIX[2] && pdata[i+3] == H264_START_CODE_PREFIX[3]){ /* Found Start Code Prefix */
prev_scp = last_scp;
if((i - last_scp) >= H264_RTP_PAYLOAD_SIZE || 1){
tdav_codec_h264_rtp_callback((tdav_codec_h264_t*) h264, pdata + prev_scp,
tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + prev_scp,
(i - prev_scp), (prev_scp == size));
}
last_scp = i;
}
}
//last:
if(last_scp < size){
tdav_codec_h264_rtp_callback((tdav_codec_h264_t*) h264, pdata + last_scp,
if(last_scp < (int32_t)size){
tdav_codec_h264_rtp_callback(TDAV_CODEC_H264_COMMON(h264), pdata + last_scp,
(size - last_scp), tsk_true);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -29,9 +29,7 @@
*/
#include "tinydav/codecs/h264/tdav_codec_h264_rtp.h"
#include "tinydav/codecs/h264/tdav_codec_h264.h"
#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264)
#include "tinydav/codecs/h264/tdav_codec_h264_common.h"
#include "tinymedia/tmedia_codec.h"
@ -164,11 +162,10 @@ int tdav_codec_h264_get_pay(const void* in_data, tsk_size_t in_size, const void*
case stap_b:
case mtap16:
case mtap24:
case fu_b:
break;
case fu_a:
return tdav_codec_h264_get_fua_pay(pdata, in_size, out_data, out_size, append_scp);
case fu_b:
return -1;
default: /* NAL unit (1-23) */
return tdav_codec_h264_get_nalunit_pay(pdata, in_size, out_data, out_size);
}
@ -278,12 +275,11 @@ int tdav_codec_h264_get_nalunit_pay(const uint8_t* in_data, tsk_size_t in_size,
return 0;
}
#if TDAV_UNDER_WINDOWS
# include "tsk_thread.h"
#endif
void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *data, tsk_size_t size, tsk_bool_t marker)
void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_common_s *self, const void *data, tsk_size_t size, tsk_bool_t marker)
{
uint8_t* pdata = (uint8_t*)data;
uint8_t* pdata = (uint8_t*)data;
//TSK_DEBUG_INFO("%x %x %x %x -- %u", pdata[0], pdata[1], pdata[2], pdata[3], size);
if(size>4 && pdata[0] == H264_START_CODE_PREFIX[0] && pdata[1] == H264_START_CODE_PREFIX[1]){
if(pdata[2] == H264_START_CODE_PREFIX[3]){
@ -304,10 +300,6 @@ void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *da
}
}
else if(size > H264_NAL_UNIT_TYPE_HEADER_SIZE){
#if TDAV_UNDER_WINDOWS
tsk_bool_t burst = ((size/H264_RTP_PAYLOAD_SIZE) > 5);
int count = 0;
#endif
/* Should be Fragmented as FUA */
uint8_t fua_hdr[H264_FUA_HEADER_SIZE]; /* "FU indicator" and "FU header" - 2bytes */
fua_hdr[0] = pdata[0] & 0x60/* F=0 */, fua_hdr[0] |= fu_a;
@ -343,14 +335,7 @@ void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_s *self, const void *da
// send data
if(TMEDIA_CODEC_VIDEO(self)->callback){
TMEDIA_CODEC_VIDEO(self)->callback(TMEDIA_CODEC_VIDEO(self)->callback_data, self->rtp.ptr, (packet_size + H264_FUA_HEADER_SIZE), (3003* (30/TMEDIA_CODEC_VIDEO(self)->out.fps)), (size == 0));
#if TDAV_UNDER_WINDOWS// FIXME: WinSock problem: Why do we get packet lost (burst case only)?
if(burst && (++count % 2 == 0)){
tsk_thread_sleep(1); // 1 millisecond
}
#endif
}
}
}
}
#endif /* HAVE_FFMPEG */

View File

@ -290,11 +290,6 @@ tsk_size_t tdav_codec_mp4ves_encode(tmedia_codec_t* self, const void* in_data, t
TSK_DEBUG_ERROR("Invalid size");
return 0;
}
// Flip
if(self->video.flip.encoded){
tdav_converter_video_flip(mp4v->encoder.picture, mp4v->encoder.context->height);
}
mp4v->encoder.picture->pts = AV_NOPTS_VALUE;
mp4v->encoder.picture->quality = mp4v->encoder.context->global_quality;
@ -369,10 +364,7 @@ tsk_size_t tdav_codec_mp4ves_decode(tmedia_codec_t* self, const void* in_data, t
retsize = xsize;
TMEDIA_CODEC_VIDEO(mp4v)->in.width = mp4v->decoder.context->width;
TMEDIA_CODEC_VIDEO(mp4v)->in.height = mp4v->decoder.context->height;
// flip
if(self->video.flip.decoded){
tdav_converter_video_flip(mp4v->decoder.picture, mp4v->decoder.context->height);
}
/* copy picture into a linear buffer */
avpicture_layout((AVPicture *)mp4v->decoder.picture, mp4v->decoder.context->pix_fmt, mp4v->decoder.context->width, mp4v->decoder.context->height,
*out_data, retsize);

View File

@ -261,11 +261,6 @@ tsk_size_t tdav_codec_theora_encode(tmedia_codec_t* self, const void* in_data, t
return 0;
}
// Flip
if(self->video.flip.encoded){
tdav_converter_video_flip(theora->encoder.picture, theora->encoder.context->height);
}
// Encode data
//theora->encoder.picture->pts = tsk_time_now();
theora->encoder.picture->pts = AV_NOPTS_VALUE;
@ -365,9 +360,7 @@ tsk_size_t tdav_codec_theora_decode(tmedia_codec_t* self, const void* in_data, t
retsize = xsize;
TMEDIA_CODEC_VIDEO(theora)->in.width = theora->decoder.context->width;
TMEDIA_CODEC_VIDEO(theora)->in.height = theora->decoder.context->height;
if(self->video.flip.decoded){
tdav_converter_video_flip(theora->decoder.picture, theora->decoder.context->height);
}
/* allocate buffer */
if(*out_max_size <xsize){
if((*out_data = tsk_realloc(*out_data, xsize))){

View File

@ -32,6 +32,10 @@
#if HAVE_LIBVPX
#if TDAV_UNDER_WINDOWS
# include <windows.h>
#endif
#include "tinyrtp/rtp/trtp_rtp_packet.h"
#include "tsk_memory.h"
@ -39,9 +43,15 @@
#include "tsk_debug.h"
#define TDAV_VP8_PAY_DESC_SIZE 1 /* |X|R|N|S|PartID| */
#define TDAV_SYSTEM_CORES_COUNT 1
#define TDAV_VP8_GOP_SIZE_IN_SECONDS 2
#define TDAV_VP8_RTP_PAYLOAD_MAX_SIZE 950
#define TDAV_SYSTEM_CORES_COUNT 0
#define TDAV_VP8_GOP_SIZE_IN_SECONDS 5
#define TDAV_VP8_RTP_PAYLOAD_MAX_SIZE 1050
#if !defined(TDAV_VP8_MAX_BANDWIDTH_KB)
# define TDAV_VP8_MAX_BANDWIDTH_KB 6000
#endif
#if !defined(TDAV_VP8_MIN_BANDWIDTH_KB)
# define TDAV_VP8_MIN_BANDWIDTH_KB 100
#endif
#define vp8_interface_enc (vpx_codec_vp8_cx())
#define vp8_interface_dec (vpx_codec_vp8_dx())
@ -57,9 +67,12 @@ static int tdav_codec_vp8_open(tmedia_codec_t* self)
vpx_codec_enc_cfg_t enc_cfg;
vpx_codec_dec_cfg_t dec_cfg;
vpx_codec_caps_t dec_caps;
vpx_enc_frame_flags_t enc_flags;
vpx_codec_flags_t dec_flags = 0;
vpx_codec_err_t vpx_ret;
static vp8_postproc_cfg_t __pp = { VP8_DEBLOCK | VP8_DEMACROBLOCK, 4, 0};
if(!vp8){
TSK_DEBUG_ERROR("Invalid parameter");
@ -79,15 +92,27 @@ static int tdav_codec_vp8_open(tmedia_codec_t* self)
enc_cfg.g_timebase.den = TMEDIA_CODEC_VIDEO(vp8)->out.fps;
enc_cfg.rc_target_bitrate = TMEDIA_CODEC_VIDEO(vp8)->out.width * TMEDIA_CODEC_VIDEO(vp8)->out.height * enc_cfg.rc_target_bitrate
/ enc_cfg.g_w / enc_cfg.g_h;
enc_cfg.rc_target_bitrate = TSK_CLAMP(TDAV_VP8_MIN_BANDWIDTH_KB, enc_cfg.rc_target_bitrate>>=0, TDAV_VP8_MAX_BANDWIDTH_KB);
enc_cfg.rc_end_usage = VPX_CBR;
enc_cfg.g_w = TMEDIA_CODEC_VIDEO(vp8)->out.width;
enc_cfg.g_h = TMEDIA_CODEC_VIDEO(vp8)->out.height;
enc_cfg.kf_mode = VPX_KF_DISABLED;
enc_cfg.g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT;
enc_cfg.g_lag_in_frames = 0;
#if TDAV_UNDER_WINDOWS
{
SYSTEM_INFO SystemInfo;
GetSystemInfo(&SystemInfo);
enc_cfg.g_threads = SystemInfo.dwNumberOfProcessors;
}
#else
enc_cfg.g_threads = TDAV_SYSTEM_CORES_COUNT;
#endif
enc_cfg.g_pass = VPX_RC_ONE_PASS;
enc_cfg.rc_min_quantizer = 4;
enc_cfg.rc_max_quantizer = 56;
enc_cfg.rc_min_quantizer = TSK_CLAMP(enc_cfg.rc_min_quantizer, 10, enc_cfg.rc_max_quantizer);
enc_cfg.rc_max_quantizer = TSK_CLAMP(enc_cfg.rc_min_quantizer, 51, enc_cfg.rc_max_quantizer);
enc_cfg.rc_resize_allowed = 0;
//enc_cfg.g_profile = 1;
enc_flags = 0; //VPX_EFLAG_XXX
@ -99,20 +124,44 @@ static int tdav_codec_vp8_open(tmedia_codec_t* self)
vp8->encoder.gop_size = TDAV_VP8_GOP_SIZE_IN_SECONDS * TMEDIA_CODEC_VIDEO(vp8)->out.fps;
vp8->encoder.initialized = tsk_true;
//vpx_codec_control(&vp8->encoder.context, VP8E_SET_CPUUSED, 0);
//vpx_codec_control(&vp8->encoder.context, VP8E_SET_SHARPNESS, 7);
//vpx_codec_control(&vp8->encoder.context, VP8E_SET_ENABLEAUTOALTREF, 1);
//
// Decoder
//
dec_cfg.w = TMEDIA_CODEC_VIDEO(vp8)->out.width;
dec_cfg.h = TMEDIA_CODEC_VIDEO(vp8)->out.height;
#if TDAV_UNDER_WINDOWS
{
SYSTEM_INFO SystemInfo;
GetSystemInfo(&SystemInfo);
dec_cfg.threads = SystemInfo.dwNumberOfProcessors;
}
#else
dec_cfg.threads = TDAV_SYSTEM_CORES_COUNT;
#endif
dec_caps = vpx_codec_get_caps(&vpx_codec_vp8_dx_algo);
if(dec_caps & VPX_CODEC_CAP_POSTPROC){
dec_flags |= VPX_CODEC_USE_POSTPROC;
}
if(dec_caps & VPX_CODEC_CAP_ERROR_CONCEALMENT){
dec_flags |= VPX_CODEC_USE_ERROR_CONCEALMENT;
}
if((vpx_ret = vpx_codec_dec_init(&vp8->decoder.context, vp8_interface_dec, &dec_cfg, dec_flags)) != VPX_CODEC_OK){
TSK_DEBUG_ERROR("vpx_codec_dec_init failed with error =%s", vpx_codec_err_to_string(vpx_ret));
return -4;
}
vp8->decoder.initialized = tsk_true;
if((vpx_ret = vpx_codec_control(&vp8->decoder.context, VP8_SET_POSTPROC, &__pp))){
TSK_DEBUG_WARN("vpx_codec_dec_init failed with error =%s", vpx_codec_err_to_string(vpx_ret));
}
vp8->decoder.initialized = tsk_true;
return 0;
}
@ -162,10 +211,11 @@ static tsk_size_t tdav_codec_vp8_encode(tmedia_codec_t* self, const void* in_dat
return 0;
}
// flip
if(self->video.flip.encoded){
#if !HAVE_FFMPEG// convert flip use FFmpeg
if(TMEDIA_CODEC_VIDEO(self)->out.flip){
vpx_img_flip(&image);
}
#endif
// encode data
++vp8->encoder.pts;
@ -208,8 +258,9 @@ static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_dat
tdav_codec_vp8_t* vp8 = (tdav_codec_vp8_t*)self;
const trtp_rtp_header_t* rtp_hdr = proto_hdr;
const uint8_t* pdata = in_data;
tsk_size_t xmax_size, ret = 0;
tsk_size_t ret = 0;
uint8_t X, R, N, S, PartID; // |X|R|N|S|PartID|
static tsk_size_t xmax_size = (1920 * 1080 * 3) >> 3;
if(!self || !in_data || in_size<1 || !out_data || !vp8->decoder.initialized){
TSK_DEBUG_ERROR("Invalid parameter");
@ -254,11 +305,10 @@ static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_dat
goto bail;
}
TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num);
vp8->decoder.frame_corrupted = tsk_true;
}
vp8->decoder.last_seq = rtp_hdr->seq_num;
xmax_size = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1;
// start-accumulator
if(!vp8->decoder.accumulator){
if(in_size > xmax_size){
@ -295,36 +345,55 @@ static tsk_size_t tdav_codec_vp8_decode(tmedia_codec_t* self, const void* in_dat
// (vp8->decoder.last_PartID == 0 && vp8->decoder.last_S && S) => previous was "first decodable" and current is new one
if(rtp_hdr->marker /*|| (vp8->decoder.last_PartID == 0 && vp8->decoder.last_S)*/){
vpx_image_t *img;
vpx_codec_iter_t iter = tsk_null;
vpx_codec_err_t vpx_ret = vpx_codec_decode(&vp8->decoder.context, vp8->decoder.accumulator, vp8->decoder.accumulator_pos, tsk_null, 0);
vpx_codec_iter_t iter = tsk_null;
vpx_codec_err_t vpx_ret;
tsk_size_t pay_size = vp8->decoder.accumulator_pos;
// in all cases: reset accumulator
vp8->decoder.accumulator_pos = 0;
// libvpx will crash very ofen when the frame is corrupted => for now we decided not to decode such frame
// according to the latest release there is a function to check if the frame
// is corrupted or not => To be checked
if(vp8->decoder.frame_corrupted){
vp8->decoder.frame_corrupted = tsk_false;
goto bail;
}
vpx_ret = vpx_codec_decode(&vp8->decoder.context, vp8->decoder.accumulator, pay_size, tsk_null, 0);
if(vpx_ret != VPX_CODEC_OK){
TSK_DEBUG_ERROR("vpx_codec_decode failed with error =%s", vpx_codec_err_to_string(vpx_ret));
goto bail;
}
// update sizes
TMEDIA_CODEC_VIDEO(vp8)->in.width = vp8->decoder.context.config.dec->w;
TMEDIA_CODEC_VIDEO(vp8)->in.height = vp8->decoder.context.config.dec->h;
xmax_size = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1;
// allocate destination buffer
if(*out_max_size <xmax_size){
if(!(*out_data = tsk_realloc(*out_data, xmax_size))){
TSK_DEBUG_ERROR("Failed to allocate new buffer");
vp8->decoder.accumulator_pos = 0;
*out_max_size = 0;
goto bail;
}
*out_max_size = xmax_size;
}
// copy decoded data
ret = 0;
while((img = vpx_codec_get_frame(&vp8->decoder.context, &iter))){
unsigned int plane, y;
// flip
if(self->video.flip.decoded){
// update sizes
TMEDIA_CODEC_VIDEO(vp8)->in.width = img->d_w;
TMEDIA_CODEC_VIDEO(vp8)->in.height = img->d_h;
xmax_size = (TMEDIA_CODEC_VIDEO(vp8)->in.width * TMEDIA_CODEC_VIDEO(vp8)->in.height * 3) >> 1;
// allocate destination buffer
if(*out_max_size <xmax_size){
if(!(*out_data = tsk_realloc(*out_data, xmax_size))){
TSK_DEBUG_ERROR("Failed to allocate new buffer");
vp8->decoder.accumulator_pos = 0;
*out_max_size = 0;
goto bail;
}
*out_max_size = xmax_size;
}
#if !HAVE_FFMPEG// convert flip use FFmpeg
if(TMEDIA_CODEC_VIDEO(vp8)->in.flip){
vpx_img_flip(img);
}
#endif
// layout picture
for(plane=0; plane < 3; plane++) {
unsigned char *buf =img->planes[plane];
for(y=0; y<img->d_h >> (plane ? 1 : 0); y++) {
@ -358,11 +427,11 @@ static tsk_bool_t tdav_codec_vp8_fmtp_match(const tmedia_codec_t* codec, const c
TSK_DEBUG_ERROR("Failed to match fmtp=%s", fmtp);
return tsk_false;
}
TMEDIA_CODEC_VIDEO(codec)->in.width = TMEDIA_CODEC_VIDEO(codec)->out.width = width;
TMEDIA_CODEC_VIDEO(codec)->in.height = TMEDIA_CODEC_VIDEO(codec)->out.height = height;
TMEDIA_CODEC_VIDEO(codec)->in.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps = fps;
return tsk_true;
}
@ -491,17 +560,30 @@ static void tdav_codec_vp8_encap(tdav_codec_vp8_t* self, const vpx_codec_cx_pkt_
// first partition (contains modes and motion vectors)
part_ID = 0; // The first VP8 partition(containing modes and motion vectors) MUST be labeled with PartID = 0
part_start = tsk_true;
part_size = (frame_ptr[2] << 16) | (frame_ptr[1] << 8) | frame_ptr[0];
part_size = (part_size >> 5) & 0x7FFFF;
if(part_size > pkt_size){
TSK_DEBUG_ERROR("part_size is > pkt_size(%u,%u)", part_size, pkt_size);
return;
}
part_start = tsk_true;
#if 0 // The first partition could be as big as 10kb for HD 720p video frames => we have to split it
tdav_codec_vp8_rtp_callback(self, &frame_ptr[index], part_size, part_ID, part_start, non_ref, (index + part_size)==pkt_size);
index += part_size;
#else
// first,first,....partitions (or fragment if part_size > TDAV_VP8_RTP_PAYLOAD_MAX_SIZE)
while(index<part_size){
uint32_t frag_size = TSK_MIN(TDAV_VP8_RTP_PAYLOAD_MAX_SIZE, (part_size - index));
tdav_codec_vp8_rtp_callback(self, &frame_ptr[index], frag_size, part_ID, part_start, non_ref, tsk_false);
part_start = tsk_false;
index += frag_size;
}
#endif
// second,third,... partitions (or fragment if part_size > TDAV_VP8_RTP_PAYLOAD_MAX_SIZE)
part_start = tsk_true;
while(index<pkt_size){
if(part_start){
/* PartID SHOULD be incremented for each subsequent partition,

View File

@ -57,6 +57,7 @@
#include "tinydav/codecs/h261/tdav_codec_h261.h"
#include "tinydav/codecs/h263/tdav_codec_h263.h"
#include "tinydav/codecs/h264/tdav_codec_h264.h"
#include "tinydav/codecs/h264/tdav_codec_h264_cuda.h"
#include "tinydav/codecs/theora/tdav_codec_theora.h"
#include "tinydav/codecs/mp4ves/tdav_codec_mp4ves.h"
#include "tinydav/codecs/vpx/tdav_codec_vp8.h"
@ -98,11 +99,17 @@
#else
# include "tinydav/audio/tdav_speakup_jitterbuffer.h"
#endif
#if TELEPRESENCE
# include "tinydav/video/tdav_video_jitterbuffer.h"
#endif
#if HAVE_FFMPEG
# include <libavcodec/avcodec.h>
#endif
static inline tsk_bool_t _tdav_codec_is_supported(tdav_codec_id_t codec, const tmedia_codec_plugin_def_t* plugin);
int tdav_init()
{
int ret = 0;
@ -181,6 +188,13 @@ int tdav_init()
#if HAVE_LIBVPX
tmedia_codec_plugin_register(tdav_codec_vp8_plugin_def_t);
#endif
#if HAVE_CUDA
if(tdav_codec_h264_cuda_is_supported()){
tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp10_plugin_def_t);
tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp20_plugin_def_t);
tmedia_codec_plugin_register(tdav_codec_h264_cuda_bp30_plugin_def_t);
}
#endif
#if HAVE_FFMPEG
tmedia_codec_plugin_register(tdav_codec_mp4ves_plugin_def_t);
# if !defined(HAVE_H264) || HAVE_H264
@ -253,6 +267,9 @@ int tdav_init()
#else
tmedia_jitterbuffer_plugin_register(tdav_speakup_jitterbuffer_plugin_def_t);
#endif
#if TELEPRESENCE
tmedia_jitterbuffer_plugin_register(tdav_video_jitterbuffer_plugin_def_t);
#endif
return ret;
}
@ -292,8 +309,14 @@ static tdav_codec_decl_t __codecs[] = {
{ tdav_codec_id_vp8, &tdav_codec_vp8_plugin_def_t },
#endif
#if HAVE_CUDA
// tdav_codec_h264_cuda_is_supported() will be used to check availability at runtime
{ tdav_codec_id_h264_bp30, &tdav_codec_h264_cuda_bp30_plugin_def_t },
{ tdav_codec_id_h264_bp20, &tdav_codec_h264_cuda_bp20_plugin_def_t },
{ tdav_codec_id_h264_bp10, &tdav_codec_h264_cuda_bp10_plugin_def_t },
#endif
#if HAVE_FFMPEG
# if !defined(HAVE_H264) || HAVE_H264
# if (!defined(HAVE_H264) || HAVE_H264) || HAVE_CUDA
{ tdav_codec_id_h264_bp30, &tdav_codec_h264_bp30_plugin_def_t },
{ tdav_codec_id_h264_bp20, &tdav_codec_h264_bp20_plugin_def_t },
{ tdav_codec_id_h264_bp10, &tdav_codec_h264_bp10_plugin_def_t },
@ -343,17 +366,19 @@ void tdav_set_codecs(tdav_codec_id_t codecs)
{
int i;
for(i=0; i<sizeof(__codecs)/sizeof(tdav_codec_decl_t); i++){
for(i=0; i<sizeof(__codecs)/sizeof(tdav_codec_decl_t); ++i){
if((codecs & __codecs[i].id)){
tmedia_codec_plugin_register(*__codecs[i].plugin);
if(_tdav_codec_is_supported(__codecs[i].id, *(__codecs[i].plugin))){
tmedia_codec_plugin_register(*(__codecs[i].plugin));
}
}
else{
tmedia_codec_plugin_unregister(*__codecs[i].plugin);
tmedia_codec_plugin_unregister(*(__codecs[i].plugin));
}
}
}
tsk_bool_t tdav_codec_is_supported(tdav_codec_id_t codec)
tsk_bool_t _tdav_codec_is_supported(tdav_codec_id_t codec, const tmedia_codec_plugin_def_t* plugin)
{
switch(codec){
@ -435,11 +460,25 @@ tsk_bool_t tdav_codec_is_supported(tdav_codec_id_t codec)
case tdav_codec_id_h264_bp10:
case tdav_codec_id_h264_bp20:
case tdav_codec_id_h264_bp30:
#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264)
return tsk_true;
#else
return tsk_false;
{
if(plugin){
#if HAVE_CUDA
if(tdav_codec_h264_is_cuda_plugin(plugin) && tdav_codec_h264_cuda_is_supported()) return tsk_true;
#endif
#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264)
if(tdav_codec_h264_is_ffmpeg_plugin(plugin)) return tsk_true;
#endif
}
else{
#if HAVE_CUDA
if(tdav_codec_h264_cuda_is_supported()) return tsk_true;
#endif
#if HAVE_FFMPEG && (!defined(HAVE_H264) || HAVE_H264)
return tsk_true;
#endif
}
return tsk_false;
}
case tdav_codec_id_amr_wb_oa:
case tdav_codec_id_amr_wb_be:
@ -450,6 +489,11 @@ tsk_bool_t tdav_codec_is_supported(tdav_codec_id_t codec)
}
}
tsk_bool_t tdav_codec_is_supported(tdav_codec_id_t codec)
{
return _tdav_codec_is_supported(codec, tsk_null);
}
int tdav_deinit()
{
int ret = 0;
@ -505,6 +549,13 @@ int tdav_deinit()
#if HAVE_LIBVPX
tmedia_codec_plugin_unregister(tdav_codec_vp8_plugin_def_t);
#endif
#if HAVE_CUDA
if(tdav_codec_h264_cuda_is_supported()){
tmedia_codec_plugin_unregister(tdav_codec_h264_cuda_bp10_plugin_def_t);
tmedia_codec_plugin_unregister(tdav_codec_h264_cuda_bp20_plugin_def_t);
tmedia_codec_plugin_unregister(tdav_codec_h264_cuda_bp30_plugin_def_t);
}
#endif
#if HAVE_FFMPEG
tmedia_codec_plugin_unregister(tdav_codec_mp4ves_plugin_def_t);
tmedia_codec_plugin_unregister(tdav_codec_h261_plugin_def_t);
@ -576,6 +627,9 @@ int tdav_deinit()
#else
tmedia_jitterbuffer_plugin_unregister(tdav_speakup_jitterbuffer_plugin_def_t);
#endif
#if TELEPRESENCE
tmedia_jitterbuffer_plugin_unregister(tdav_video_jitterbuffer_plugin_def_t);
#endif
return ret;
}

View File

@ -0,0 +1,203 @@
/*
* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>
*
* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
* This file is part of Open Source Doubango Framework.
*
* DOUBANGO is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DOUBANGO is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DOUBANGO.
*
*/
/**@file tdav_consumer_video.c
* @brief Base class for all Video consumers.
*
* @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*/
#include "tinydav/video/tdav_consumer_video.h"
#include "tinymedia/tmedia_jitterbuffer.h"
#include "tinyrtp/rtp/trtp_rtp_header.h"
#include "tsk_debug.h"
#define TDAV_VIDEO_DEFAULT_WIDTH 176
#define TDAV_VIDEO_DEFAULT_HEIGHT 144
#define TDAV_VIDEO_DEFAULT_FPS 15
#define TDAV_VIDEO_DEFAULT_AUTORESIZE tsk_true
/** Initialize video consumer */
int tdav_consumer_video_init(tdav_consumer_video_t* self)
{
int ret;
if(!self){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
/* base */
if((ret = tmedia_consumer_init(TMEDIA_CONSUMER(self)))){
return ret;
}
/* self (should be update by prepare() by using the codec's info)*/
TMEDIA_CONSUMER(self)->video.fps = TDAV_VIDEO_DEFAULT_FPS;
TMEDIA_CONSUMER(self)->video.display.width = TDAV_VIDEO_DEFAULT_WIDTH;
TMEDIA_CONSUMER(self)->video.display.height = TDAV_VIDEO_DEFAULT_HEIGHT;
TMEDIA_CONSUMER(self)->video.display.auto_resize = TDAV_VIDEO_DEFAULT_AUTORESIZE;
/* self:jitterbuffer */
if(!self->jitterbuffer && !(self->jitterbuffer = tmedia_jitterbuffer_create(tmedia_video))){
TSK_DEBUG_WARN("Failed to video create jitter buffer");
}
if(self->jitterbuffer){
tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
}
tsk_safeobj_init(self);
return 0;
}
/**
* Generic function to compare two consumers.
* @param consumer1 The first consumer to compare.
* @param consumer2 The second consumer to compare.
* @retval Returns an integral value indicating the relationship between the two consumers:
* <0 : @a consumer1 less than @a consumer2.<br>
* 0 : @a consumer1 identical to @a consumer2.<br>
* >0 : @a consumer1 greater than @a consumer2.<br>
*/
int tdav_consumer_video_cmp(const tsk_object_t* consumer1, const tsk_object_t* consumer2)
{
return (TDAV_CONSUMER_VIDEO(consumer1) - TDAV_CONSUMER_VIDEO(consumer2));
}
int tdav_consumer_video_set(tdav_consumer_video_t* self, const tmedia_param_t* param)
{
if(!self){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
return 0;
}
int tdav_consumer_video_put(tdav_consumer_video_t* self, const void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
{
const trtp_rtp_header_t* rtp_hdr = TRTP_RTP_HEADER(proto_hdr);
int ret;
if(!self || !data || !self->jitterbuffer || !rtp_hdr){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
tsk_safeobj_lock(self);
if(!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
uint32_t frame_duration = (1000 / TMEDIA_CONSUMER(self)->video.fps);
uint32_t rate = 90000;//FIXME
if((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate))){
TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
tsk_safeobj_unlock(self);
return ret;
}
}
ret = tmedia_jitterbuffer_put(TMEDIA_JITTER_BUFFER(self->jitterbuffer), (void*)data, data_size, proto_hdr);
tsk_safeobj_unlock(self);
return ret;
}
/* get data drom the jitter buffer (consumers should always have ptime of 20ms) */
tsk_size_t tdav_consumer_video_get(tdav_consumer_video_t* self, void* out_data, tsk_size_t out_size)
{
tsk_size_t ret_size = 0;
if(!self && self->jitterbuffer){
TSK_DEBUG_ERROR("Invalid parameter");
return 0;
}
tsk_safeobj_lock(self);
if(!TMEDIA_JITTER_BUFFER(self->jitterbuffer)->opened){
int ret;
uint32_t frame_duration = (1000 / TMEDIA_CONSUMER(self)->video.fps);
uint32_t rate = 90000;//FIXME
if((ret = tmedia_jitterbuffer_open(TMEDIA_JITTER_BUFFER(self->jitterbuffer), frame_duration, rate))){
TSK_DEBUG_ERROR("Failed to open jitterbuffer (%d)", ret);
tsk_safeobj_unlock(self);
return 0;
}
}
ret_size = tmedia_jitterbuffer_get(TMEDIA_JITTER_BUFFER(self->jitterbuffer), out_data, out_size);
tsk_safeobj_unlock(self);
return ret_size;
}
int tdav_consumer_video_tick(tdav_consumer_video_t* self)
{
if(!self){
TSK_DEBUG_ERROR("Invalid parameter");
return 0;
}
return tmedia_jitterbuffer_tick(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
}
/** Reset jitterbuffer */
int tdav_consumer_video_reset(tdav_consumer_video_t* self){
int ret;
if(!self){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
tsk_safeobj_lock(self);
ret = tmedia_jitterbuffer_reset(TMEDIA_JITTER_BUFFER(self->jitterbuffer));
tsk_safeobj_unlock(self);
return ret;
}
/* tsk_safeobj_lock(self); */
/* tsk_safeobj_unlock(self); */
/** DeInitialize video consumer */
int tdav_consumer_video_deinit(tdav_consumer_video_t* self)
{
int ret;
if(!self){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
/* base */
if((ret = tmedia_consumer_deinit(TMEDIA_CONSUMER(self)))){
/* return ret; */
}
/* self */
TSK_OBJECT_SAFE_FREE(self->jitterbuffer);
tsk_safeobj_deinit(self);
return 0;
}

View File

@ -44,50 +44,51 @@
} \
}
static inline enum PixelFormat _tdav_converter_video_get_pixfmt(tmedia_chroma_t chroma)
{
switch(chroma){
case tmedia_chroma_rgb24:
return PIX_FMT_RGB24;
case tmedia_chroma_bgr24:
return PIX_FMT_BGR24;
case tmedia_chroma_rgb32:
return PIX_FMT_RGB32;
case tmedia_chroma_rgb565le:
return PIX_FMT_RGB565LE;
case tmedia_chroma_rgb565be:
return PIX_FMT_RGB565BE;
case tmedia_chroma_nv21:
return PIX_FMT_NV21;
case tmedia_chroma_nv12:
return PIX_FMT_NV12;
case tmedia_chroma_yuv422p:
return PIX_FMT_YUV422P;
case tmedia_chroma_uyvy422:
return PIX_FMT_UYVY422;
case tmedia_chroma_yuv420p:
return PIX_FMT_YUV420P;
default:
TSK_DEBUG_ERROR("Invalid chroma %d", (int)chroma);
return PIX_FMT_NONE;
}
}
tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t chroma, tsk_bool_t toYUV420)
tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_size_t srcHeight, tmedia_chroma_t srcChroma, tsk_size_t dstWidth, tsk_size_t dstHeight, tmedia_chroma_t dstChroma)
{
#if HAVE_FFMPEG || HAVE_SWSSCALE
tdav_converter_video_t* converter;
enum PixelFormat pixfmt;
enum PixelFormat srcPixfmt, dstPixfmt;
TSK_DEBUG_INFO("Creating new Video Converter src=(%dx%d) dst=(%dx%d)", srcWidth, srcHeight, dstWidth, dstHeight);
switch(chroma){
case tmedia_rgb24:
pixfmt = PIX_FMT_RGB24;
break;
case tmedia_bgr24:
pixfmt = PIX_FMT_BGR24;
break;
case tmedia_rgb32:
pixfmt = PIX_FMT_RGB32;
break;
case tmedia_rgb565le:
pixfmt = PIX_FMT_RGB565LE;
break;
case tmedia_rgb565be:
pixfmt = PIX_FMT_RGB565BE;
break;
case tmedia_nv21:
pixfmt = PIX_FMT_NV21;
break;
case tmedia_nv12:
pixfmt = PIX_FMT_NV12;
break;
case tmedia_yuv422p:
pixfmt = PIX_FMT_YUV422P;
break;
case tmedia_uyvy422:
pixfmt = PIX_FMT_UYVY422;
break;
case tmedia_yuv420p:
pixfmt = PIX_FMT_YUV420P;
break;
default:
TSK_DEBUG_ERROR("Invalid chroma");
return tsk_null;
if((srcPixfmt = _tdav_converter_video_get_pixfmt(srcChroma)) == PIX_FMT_NONE){
TSK_DEBUG_ERROR("Invalid source chroma");
return tsk_null;
}
if((dstPixfmt = _tdav_converter_video_get_pixfmt(dstChroma)) == PIX_FMT_NONE){
TSK_DEBUG_ERROR("Invalid destination chroma");
return tsk_null;
}
if(!(converter = tsk_object_new(tdav_converter_video_def_t))){
@ -96,8 +97,8 @@ tdav_converter_video_t* tdav_converter_video_create(tsk_size_t srcWidth, tsk_siz
}
// Set values
converter->toYUV420 = toYUV420;
converter->pixfmt = pixfmt;
converter->srcFormat = srcPixfmt;
converter->dstFormat = dstPixfmt;
converter->srcWidth = srcWidth ? srcWidth : dstWidth;
converter->srcHeight = srcHeight ? srcHeight : dstHeight;
converter->dstWidth = dstWidth ? dstWidth : srcWidth;
@ -113,17 +114,12 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
{
#if HAVE_FFMPEG || HAVE_SWSSCALE
int ret, size;
enum PixelFormat srcFormat, dstFormat;
if(!self || !buffer || !output){
TSK_DEBUG_ERROR("Invalid parameter");
return 0;
}
/* Formats */
srcFormat = self->toYUV420 ? self->pixfmt : PIX_FMT_YUV420P;
dstFormat = self->toYUV420 ? PIX_FMT_YUV420P : self->pixfmt;
/* Pictures */
if(!self->srcFrame){
if(!(self->srcFrame = avcodec_alloc_frame())){
@ -138,7 +134,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
}
}
size = avpicture_get_size(dstFormat, self->dstWidth, self->dstHeight);
size = avpicture_get_size(self->dstFormat, self->dstWidth, self->dstHeight);
if((int)*output_max_size <size){
if(!(*output = tsk_realloc(*output, (size + FF_INPUT_BUFFER_PADDING_SIZE)))){
*output_max_size = 0;
@ -149,16 +145,16 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
}
/* Wrap the source buffer */
ret = avpicture_fill((AVPicture *)self->srcFrame, (uint8_t*)buffer, srcFormat, self->srcWidth, self->srcHeight);
ret = avpicture_fill((AVPicture *)self->srcFrame, (uint8_t*)buffer, self->srcFormat, self->srcWidth, self->srcHeight);
/* Wrap the destination buffer */
ret = avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, dstFormat, self->dstWidth, self->dstHeight);
ret = avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, self->dstFormat, self->dstWidth, self->dstHeight);
/* === performs conversion === */
/* Context */
if(!self->context){
self->context = sws_getContext(
self->srcWidth, self->srcHeight, srcFormat,
self->dstWidth, self->dstHeight, dstFormat,
self->srcWidth, self->srcHeight, self->srcFormat,
self->dstWidth, self->dstHeight, self->dstFormat,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if(!self->context){
@ -166,6 +162,11 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
return 0;
}
}
// flip
if(self->flip){
tdav_converter_video_flip(self->srcFrame, self->srcHeight);
}
// chroma conversion
ret = sws_scale(self->context, (const uint8_t* const*)self->srcFrame->data, self->srcFrame->linesize, 0, self->srcHeight,
self->dstFrame->data, self->dstFrame->linesize);
@ -175,7 +176,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
}
// Rotation
if(self->rotation && (PIX_FMT_YUV420P == dstFormat) && self->rotation==90/*FIXME: For now only 90° rotation is supported */){
if(self->rotation && (PIX_FMT_YUV420P == self->dstFormat) && self->rotation==90/*FIXME: For now only 90° rotation is supported */){
// because we rotated 90 width = original height, height = original width
int w = self->dstHeight;
int h = self->dstWidth;
@ -189,7 +190,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
// allocate rotation temporary buffer
if(!self->rot.buffer){
int buff_size = avpicture_get_size(dstFormat, w, h);
int buff_size = avpicture_get_size(self->dstFormat, w, h);
if (!(self->rot.buffer = (uint8_t *)av_malloc(buff_size))){
TSK_DEBUG_ERROR("failed to allocate new buffer for the frame");
TSK_FREE(*output);
@ -198,7 +199,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
}
//wrap
avpicture_fill((AVPicture *)self->rot.frame, self->rot.buffer, dstFormat, w, h);
avpicture_fill((AVPicture *)self->rot.frame, self->rot.buffer, self->dstFormat, w, h);
// rotate
rotate90(self->dstWidth, self->dstHeight, self->dstFrame->data[0], self->rot.frame->data[0]);
rotate90(self->dstWidth/2, self->dstHeight/2, self->dstFrame->data[1], self->rot.frame->data[1]);
@ -220,14 +221,14 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
top_band = (r_h-self->dstHeight)/3;
if(!self->rot.context){
if(!(self->rot.context = sws_getContext(w, h, dstFormat, r_w, r_h, dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){
if(!(self->rot.context = sws_getContext(w, h, self->dstFormat, r_w, r_h, self->dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){
TSK_DEBUG_ERROR("Failed to create context");
TSK_FREE(*output);
return 0;
}
}
r_size = avpicture_get_size(dstFormat, r_w, r_h);
r_size = avpicture_get_size(self->dstFormat, r_w, r_h);
if((int)*output_max_size <r_size){
if(!(*output = tsk_realloc(*output, (r_size + FF_INPUT_BUFFER_PADDING_SIZE)))){
*output_max_size = 0;
@ -238,7 +239,7 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
}
// re-wrap
avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, dstFormat, r_w, r_h);
avpicture_fill((AVPicture *)self->dstFrame, (uint8_t*)*output, self->dstFormat, r_w, r_h);
// pad
sws_scale(self->rot.context, (const uint8_t* const*)self->rot.frame->data, self->rot.frame->linesize,
@ -249,15 +250,15 @@ tsk_size_t tdav_converter_video_convert(tdav_converter_video_t* self, const void
self->dstFrame->data[1] = self->dstFrame->data[1] + ((top_band >> y_shift) * self->dstFrame->linesize[1]) + (left_band >> x_shift);
self->dstFrame->data[2] = self->dstFrame->data[2] + ((top_band >> y_shift) * self->dstFrame->linesize[2]) + (left_band >> x_shift);
avpicture_layout((const AVPicture*)self->dstFrame, dstFormat, self->dstWidth, self->dstHeight, *output, *output_max_size);
avpicture_layout((const AVPicture*)self->dstFrame, self->dstFormat, self->dstWidth, self->dstHeight, *output, *output_max_size);
}
#else // Crash
#else
// Context
if(!self->rot.context){
if(!(self->rot.context = sws_getContext(w, h, dstFormat, h, w, dstFormat, SWS_BICUBIC, NULL, NULL, NULL))){
if(!(self->rot.context = sws_getContext(w, h,self->dstFormat, h, w, self->dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL))){
TSK_DEBUG_ERROR("Failed to create context");
TSK_FREE(*output);
return 0;

View File

@ -25,7 +25,6 @@
*
* @author Mamadou Diop <diopmamadou(at)doubango.org>
*
*/
#include "tinydav/video/tdav_session_video.h"
@ -81,16 +80,17 @@ static int tdav_session_video_rtp_cb(const void* callback_data, const struct trt
}
// Convert decoded data to the consumer chroma and size
#define CONSUMER_INSIZE_CHANGED ((session->consumer->video.in.width * session->consumer->video.in.height * 3)/2 != out_size)// we have good reasons not to use 1.5f
#define CONSUMER_DISPLAY_NEED_RESIZE (session->consumer->video.in.width != session->consumer->video.display.width || session->consumer->video.in.height != session->consumer->video.display.height)
#define CONSUMER_DECODED_HAS_DIFF_SIZE (session->consumer->video.display.width != TMEDIA_CODEC_VIDEO(codec)->in.width || session->consumer->video.display.height != TMEDIA_CODEC_VIDEO(codec)->in.height)
#define CONSUMER_DISPLAY_NEED_CHROMACHANGE (session->consumer->video.display.chroma != tmedia_yuv420p)
#define CONSUMER_INSIZE_MISMATCH ((session->consumer->video.in.width * session->consumer->video.in.height * 3)>>1 != out_size)// we have good reasons not to use 1.5f
#define CONSUMER_IN_N_DISPLAY_MISMATCH (session->consumer->video.in.width != session->consumer->video.display.width || session->consumer->video.in.height != session->consumer->video.display.height)
#define CONSUMER_DISPLAY_N_CODEC_MISMATCH (session->consumer->video.display.width != TMEDIA_CODEC_VIDEO(codec)->in.width || session->consumer->video.display.height != TMEDIA_CODEC_VIDEO(codec)->in.height)
#define CONSUMER_DISPLAY_N_CONVERTER_MISMATCH ( (session->conv.fromYUV420 && session->conv.fromYUV420->dstWidth != session->consumer->video.display.width) || (session->conv.fromYUV420 && session->conv.fromYUV420->dstHeight != session->consumer->video.display.height) )
#define CONSUMER_CHROMA_MISMATCH (session->consumer->video.display.chroma != TMEDIA_CODEC_VIDEO(codec)->in.chroma)
#define DECODED_NEED_FLIP (TMEDIA_CODEC_VIDEO(codec)->in.flip)
if((CONSUMER_DISPLAY_NEED_CHROMACHANGE || CONSUMER_DECODED_HAS_DIFF_SIZE || CONSUMER_DISPLAY_NEED_RESIZE || CONSUMER_INSIZE_CHANGED)){
tsk_size_t _output_size;
if((CONSUMER_CHROMA_MISMATCH || CONSUMER_DISPLAY_N_CODEC_MISMATCH || CONSUMER_IN_N_DISPLAY_MISMATCH || CONSUMER_INSIZE_MISMATCH || CONSUMER_DISPLAY_N_CONVERTER_MISMATCH || DECODED_NEED_FLIP)){
// Create video converter if not already done
if(!session->conv.fromYUV420 || CONSUMER_DECODED_HAS_DIFF_SIZE || CONSUMER_INSIZE_CHANGED){
if(!session->conv.fromYUV420 || CONSUMER_DISPLAY_N_CONVERTER_MISMATCH || CONSUMER_INSIZE_MISMATCH){
TSK_OBJECT_SAFE_FREE(session->conv.fromYUV420);
// update in (set by the codec)
session->consumer->video.in.width = TMEDIA_CODEC_VIDEO(codec)->in.width;//decoded width
@ -102,22 +102,27 @@ static int tdav_session_video_rtp_cb(const void* callback_data, const struct trt
session->consumer->video.display.height = session->consumer->video.in.height;
}
// create converter
if(!(session->conv.fromYUV420 = tdav_converter_video_create(TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, session->consumer->video.display.width, session->consumer->video.display.height,
session->consumer->video.display.chroma, tsk_false))){
if(!(session->conv.fromYUV420 = tdav_converter_video_create(TMEDIA_CODEC_VIDEO(codec)->in.width, TMEDIA_CODEC_VIDEO(codec)->in.height, TMEDIA_CODEC_VIDEO(codec)->in.chroma, session->consumer->video.display.width, session->consumer->video.display.height,
session->consumer->video.display.chroma))){
TSK_DEBUG_ERROR("Failed to create video converter");
ret = -3;
goto bail;
}
}
}
if(session->conv.fromYUV420){
// update one-shot parameters
tdav_converter_video_init(session->conv.fromYUV420, 0/*rotation*/, TMEDIA_CODEC_VIDEO(codec)->in.flip);
// convert data to the consumer's chroma
_output_size = tdav_converter_video_convert(session->conv.fromYUV420, session->decoder.buffer, &session->decoder.conv_buffer, &session->decoder.conv_buffer_size);
if(!_output_size || !session->decoder.conv_buffer){
out_size = tdav_converter_video_convert(session->conv.fromYUV420, session->decoder.buffer, &session->decoder.conv_buffer, &session->decoder.conv_buffer_size);
if(!out_size || !session->decoder.conv_buffer){
TSK_DEBUG_ERROR("Failed to convert YUV420 buffer to consumer's chroma");
ret = -4;
goto bail;
}
tmedia_consumer_consume(session->consumer, session->decoder.conv_buffer, _output_size, packet->header);
tmedia_consumer_consume(session->consumer, session->decoder.conv_buffer, out_size, packet->header);
if(!session->decoder.conv_buffer){
/* taken by the consumer */
session->decoder.conv_buffer_size = 0;
@ -189,8 +194,9 @@ static int tdav_session_video_producer_enc_cb(const void* callback_data, const v
#define PRODUCER_SIZE_CHANGED (session->conv.producerWidth != session->producer->video.width) || (session->conv.producerHeight != session->producer->video.height) \
|| (session->conv.xProducerSize != size)
#define ENCODED_NEED_FLIP TMEDIA_CODEC_VIDEO(codec)->out.flip
// Video codecs only accept YUV420P buffers ==> do conversion if needed or producer doesn't have the right size
if((session->producer->video.chroma != tmedia_yuv420p) || PRODUCER_SIZE_CHANGED){
if((session->producer->video.chroma != TMEDIA_CODEC_VIDEO(codec)->out.chroma) || PRODUCER_SIZE_CHANGED || ENCODED_NEED_FLIP){
// Create video converter if not already done or producer size has changed
if(!session->conv.toYUV420 || PRODUCER_SIZE_CHANGED){
TSK_OBJECT_SAFE_FREE(session->conv.toYUV420);
@ -198,15 +204,18 @@ static int tdav_session_video_producer_enc_cb(const void* callback_data, const v
session->conv.producerHeight = session->producer->video.height;
session->conv.xProducerSize = size;
if(!(session->conv.toYUV420 = tdav_converter_video_create(session->producer->video.width, session->producer->video.height, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height,
session->producer->video.chroma, tsk_true))){
if(!(session->conv.toYUV420 = tdav_converter_video_create(session->producer->video.width, session->producer->video.height, session->producer->video.chroma, TMEDIA_CODEC_VIDEO(codec)->out.width, TMEDIA_CODEC_VIDEO(codec)->out.height,
TMEDIA_CODEC_VIDEO(codec)->out.chroma))){
TSK_DEBUG_ERROR("Failed to create video converter");
ret = -5;
goto bail;
}
}
}
if(session->conv.toYUV420){
// update one-shot parameters
tdav_converter_video_init(session->conv.toYUV420, session->producer->video.rotation);
tdav_converter_video_init(session->conv.toYUV420, session->producer->video.rotation, TMEDIA_CODEC_VIDEO(codec)->out.flip);
// convert data to yuv420p
yuv420p_size = tdav_converter_video_convert(session->conv.toYUV420, buffer, &session->encoder.conv_buffer, &session->encoder.conv_buffer_size);
if(!yuv420p_size || !session->encoder.conv_buffer){
@ -267,7 +276,7 @@ int tmedia_session_video_set(tmedia_session_t* self, const tmedia_param_t* param
tsk_bool_t flip = (tsk_bool_t)TSK_TO_INT32((uint8_t*)param->value);
tmedia_codecs_L_t *codecs = tsk_object_ref(self->codecs);
tsk_list_foreach(item, codecs){
((tmedia_codec_t*)item->data)->video.flip.decoded = flip;
TMEDIA_CODEC_VIDEO(item->data)->in.flip = flip;
}
tsk_object_unref(codecs);
}
@ -285,7 +294,7 @@ int tmedia_session_video_set(tmedia_session_t* self, const tmedia_param_t* param
tsk_bool_t flip = (tsk_bool_t)TSK_TO_INT32((uint8_t*)param->value);
tmedia_codecs_L_t *codecs = tsk_object_ref(self->codecs);
tsk_list_foreach(item, codecs){
((tmedia_codec_t*)item->data)->video.flip.encoded = flip;
TMEDIA_CODEC_VIDEO(item->data)->out.flip = flip;
}
tsk_object_unref(codecs);
}

View File

@ -0,0 +1,296 @@
/*
* Copyright (C) 2011 Doubango Telecom <http://www.doubango.org>
*
* Contact: Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*
* This file is part of Open Source Doubango Framework.
*
* DOUBANGO is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DOUBANGO is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DOUBANGO.
*
*/
/**@file tdav_video_jitterbuffer.c
* @brief Video Jitter Buffer
*
* @author Mamadou Diop <diopmamadou(at)doubango(DOT)org>
*/
#include "tinydav/video/tdav_video_jitterbuffer.h"
#include "tinyrtp/rtp/trtp_rtp_header.h"
#include "tsk_debug.h"
#define TDAV_VIDEO_JB_TAIL 1000 // in milliseconds
// Internal functions
static tdav_video_jitterbuffer_packet_t* _tdav_video_jitterbuffer_packet_create(void* data, tsk_size_t data_size);
static int tdav_video_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
{
TSK_DEBUG_ERROR("Not implemented");
return -2;
}
static int tdav_video_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate)
{
tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self;
if(!jb->packets && !(jb->packets = tsk_list_create())){
TSK_DEBUG_ERROR("Failed to create list");
return -2;
}
if(!jb->timer && !(jb->timer = tsk_timer_manager_create())){
TSK_DEBUG_ERROR("Failed to create list");
return -3;
}
jb->frame_duration = frame_duration;
jb->frame_max_count = (jb->tail / jb->frame_duration);
return 0;
}
static int tdav_video_jitterbuffer_tick(tmedia_jitterbuffer_t* self)
{
tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self;
// do nothing
// only useful for audio
return 0;
}
static int tdav_video_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr)
{
tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self;
const trtp_rtp_header_t* rtp_hdr;
register uint32_t i = 0;
tsk_bool_t item_found = tsk_false;
const tsk_list_item_t *item;
tdav_video_jitterbuffer_packet_t *jb_packet = tsk_null;
int ret = 0;
if(!data || !data_size || !proto_hdr){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
rtp_hdr = TRTP_RTP_HEADER(proto_hdr);
tsk_safeobj_lock(jb);
tsk_list_foreach(item, jb->packets){
if(i++ == jb->frame_curr_index){
if((jb_packet = (tdav_video_jitterbuffer_packet_t*)item->data)){
if((ret = tsk_buffer_copy(jb_packet->data, 0, data, data_size))){
TSK_DEBUG_ERROR("Failed to copy buffer");
goto done;
}
}
else{
TSK_DEBUG_ERROR("Item contains null data");
goto done;
}
// update values
jb_packet->taken = tsk_false;
jb_packet->seq_num = rtp_hdr->seq_num;
item_found = tsk_true;
}
}
if(!item_found && !jb_packet){
if((jb_packet = _tdav_video_jitterbuffer_packet_create(data, data_size))){
// update values (use constructor)
jb_packet->seq_num = rtp_hdr->seq_num;
tsk_list_push_ascending_data(jb->packets, (void**)&jb_packet);
TSK_OBJECT_SAFE_FREE(jb_packet);
}
else{
TSK_DEBUG_ERROR("Failed to create jb packet");
ret = -2;
goto done;
}
}
done:
if(ret == 0){
jb->frame_curr_index = (++jb->frame_curr_index % jb->frame_max_count);
}
tsk_safeobj_unlock(jb);
return ret;
}
static tsk_size_t tdav_video_jitterbuffer_get(tmedia_jitterbuffer_t* self, void* out_data, tsk_size_t out_size)
{
tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self;
const tsk_list_item_t *item;
tdav_video_jitterbuffer_packet_t *jb_packet = tsk_null;
tsk_size_t ret_size = 0;
if(!out_data || !out_size){
TSK_DEBUG_ERROR("Invalid parameter");
return 0;
}
tsk_safeobj_lock(jb);
tsk_list_foreach(item, jb->packets){
if((jb_packet = (tdav_video_jitterbuffer_packet_t*)item->data) && !jb_packet->taken){
ret_size = TSK_MIN(TSK_BUFFER_SIZE(jb_packet->data), out_size);
memcpy(out_data, TSK_BUFFER_DATA(jb_packet->data), ret_size);
jb_packet->taken = tsk_true;
break;
}
}
tsk_safeobj_unlock(jb);
return ret_size;
}
static int tdav_video_jitterbuffer_reset(tmedia_jitterbuffer_t* self)
{
tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self;
return 0;
}
static int tdav_video_jitterbuffer_close(tmedia_jitterbuffer_t* self)
{
tdav_video_jitterbuffer_t *jb = (tdav_video_jitterbuffer_t *)self;
return 0;
}
//
// Speex jitterbuffer Packet Object definition
//
static tdav_video_jitterbuffer_packet_t* _tdav_video_jitterbuffer_packet_create(void* data, tsk_size_t data_size)
{
tdav_video_jitterbuffer_packet_t *jb_packet;
if((jb_packet = tsk_object_new(tdav_video_jitterbuffer_packet_def_t))){
jb_packet->data = tsk_buffer_create(data, data_size);
}
return jb_packet;
}
/* constructor */
static tsk_object_t* tdav_video_jitterbuffer_packet_ctor(tsk_object_t * self, va_list * app)
{
tdav_video_jitterbuffer_packet_t *jb_packet = self;
if(jb_packet){
TSK_OBJECT_SAFE_FREE(jb_packet->data);
tsk_safeobj_init(jb_packet);
}
return self;
}
/* destructor */
static tsk_object_t* tdav_video_jitterbuffer_packet_dtor(tsk_object_t * self)
{
tdav_video_jitterbuffer_packet_t *jb_packet = self;
if(jb_packet){
tsk_safeobj_deinit(jb_packet);
}
return self;
}
/* comparator */
static int tdav_video_jitterbuffer_packet_cmp(const tsk_object_t *_p1, const tsk_object_t *_p2)
{
const tdav_video_jitterbuffer_packet_t *p1 = _p1;
const tdav_video_jitterbuffer_packet_t *p2 = _p2;
if(p1 && p2){
return (int)(p1->seq_num - p2->seq_num);
}
else if(!p1 && !p2) return 0;
else return -1;
}
/* object definition */
static const tsk_object_def_t tdav_video_jitterbuffer_packet_def_s =
{
sizeof(tdav_video_jitterbuffer_packet_t),
tdav_video_jitterbuffer_packet_ctor,
tdav_video_jitterbuffer_packet_dtor,
tdav_video_jitterbuffer_packet_cmp,
};
const tsk_object_def_t *tdav_video_jitterbuffer_packet_def_t = &tdav_video_jitterbuffer_packet_def_s;
//
// Speex jitterbuffer Plugin definition
//
/* constructor */
static tsk_object_t* tdav_video_jitterbuffer_ctor(tsk_object_t * self, va_list * app)
{
tdav_video_jitterbuffer_t *jb = self;
if(jb){
/* init base */
tmedia_jitterbuffer_init(TMEDIA_JITTER_BUFFER(jb));
/* init self */
tsk_safeobj_init(jb);
jb->tail = TDAV_VIDEO_JB_TAIL;
}
return self;
}
/* destructor */
static tsk_object_t* tdav_video_jitterbuffer_dtor(tsk_object_t * self)
{
tdav_video_jitterbuffer_t *jb = self;
if(jb){
/* deinit base */
tmedia_jitterbuffer_deinit(TMEDIA_JITTER_BUFFER(jb));
/* deinit self */
TSK_OBJECT_SAFE_FREE(jb->packets);
if(jb->timer){
tsk_timer_manager_destroy(&jb->timer);
}
tsk_safeobj_deinit(jb);
}
return self;
}
/* object definition */
static const tsk_object_def_t tdav_video_jitterbuffer_def_s =
{
sizeof(tdav_video_jitterbuffer_t),
tdav_video_jitterbuffer_ctor,
tdav_video_jitterbuffer_dtor,
tsk_null,
};
/* plugin definition*/
static const tmedia_jitterbuffer_plugin_def_t tdav_video_jitterbuffer_plugin_def_s =
{
&tdav_video_jitterbuffer_def_s,
tmedia_video,
"Native Video JitterBuffer",
tdav_video_jitterbuffer_set,
tdav_video_jitterbuffer_open,
tdav_video_jitterbuffer_tick,
tdav_video_jitterbuffer_put,
tdav_video_jitterbuffer_get,
tdav_video_jitterbuffer_reset,
tdav_video_jitterbuffer_close,
};
const tmedia_jitterbuffer_plugin_def_t *tdav_video_jitterbuffer_plugin_def_t = &tdav_video_jitterbuffer_plugin_def_s;

View File

@ -54,7 +54,7 @@
"a=rtpmap:115 BV16/8000\r\n" \
"a=fmtp:102 octet-align=0; mode-set=0,1,2,3,4,5,6,7; mode-change-period=1; mode-change-capability=2; mode-change-neighbor=0\r\n" \
"a=fmtp:103 octet-align=1; mode-set=0,1,2,3,4,5,6,7; mode-change-period=1; mode-change-capability=2; mode-change-neighbor=0\r\n" \
"m=video 6060 RTP/AVP 125 111 98 121 31 126 34 32\r\n" \
"m=video 6060 RTP/AVP 125 98 111 121 31 126 34 32\r\n" \
"i=Video line\r\n" \
"b=A-YZ:92\r\n" \
"b=B-YZ:256\r\n" \

View File

@ -41,8 +41,8 @@
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="&quot;$(PSDK_DIR)include&quot;;&quot;$(DXSDK_DIR)include&quot;;..\thirdparties\win32\include;include;..\tinyMSRP\include;..\tinyRTP\include;..\tinyMEDIA\include;..\tinySDP\include;..\tinyNET\src;..\tinyDSHOW\include;..\tinySAK\src;..\thirdparties\win32\include\BroadVoice16\bvcommon;..\thirdparties\win32\include\BroadVoice16\bv16"
PreprocessorDefinitions="HAVE_G729=0;HAVE_BV16=0;HAVE_OPENCORE_AMR=1;HAVE_ILBC=0;HAVE_LIBGSM=1;HAVE_TINYDSHOW=1;HAVE_DSOUND_H=1;HAVE_WAVE_API=0;HAVE_FFMPEG=1;HAVE_SPEEX_DSP=1;HAVE_WEBRTC=1;HAVE_SPEEX_JB=1;HAVE_LIB_SPEEX=1;HAVE_LIBVPX=1;G192BITSTREAM=0;DEBUG_LEVEL=DEBUG_LEVEL_INFO;WIN32;_DEBUG;_WINDOWS;_USRDLL;_WIN32_WINNT=0x0501;TINYDAV_EXPORTS"
AdditionalIncludeDirectories="&quot;$(PSDK_DIR)include&quot;;&quot;$(DXSDK_DIR)include&quot;;&quot;$(CUDA_PATH)\include&quot;;..\thirdparties\win32\include;include;..\tinyMSRP\include;..\tinyRTP\include;..\tinyMEDIA\include;..\tinySDP\include;..\tinyNET\src;..\tinyDSHOW\include;..\tinySAK\src;..\thirdparties\win32\include\BroadVoice16\bvcommon;..\thirdparties\win32\include\BroadVoice16\bv16"
PreprocessorDefinitions="HAVE_CUDA=0;HAVE_G729=0;HAVE_BV16=0;HAVE_OPENCORE_AMR=1;HAVE_ILBC=0;HAVE_LIBGSM=1;HAVE_TINYDSHOW=1;HAVE_DSOUND_H=1;HAVE_WAVE_API=0;HAVE_FFMPEG=1;HAVE_SPEEX_DSP=1;HAVE_WEBRTC=1;HAVE_SPEEX_JB=1;HAVE_LIB_SPEEX=1;HAVE_LIBVPX=1;G192BITSTREAM=0;DEBUG_LEVEL=DEBUG_LEVEL_INFO;WIN32;_DEBUG;_WINDOWS;_USRDLL;_WIN32_WINNT=0x0501;TINYDAV_EXPORTS"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -51,7 +51,7 @@
WarnAsError="true"
Detect64BitPortabilityProblems="false"
DebugInformationFormat="4"
CompileAs="1"
CompileAs="0"
/>
<Tool
Name="VCManagedResourceCompilerTool"
@ -64,7 +64,7 @@
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="Winmm.lib $(OutDir)\tinySAK.lib $(OutDir)\tinyNET.lib $(OutDir)\tinyRTP.lib $(OutDir)\tinyMSRP.lib $(OutDir)\tinySDP.lib $(OutDir)\tinyMEDIA.lib $(OutDir)\tinyDSHOW.lib &quot;..\thirdparties\win32\lib\gsm\libgsm.a&quot; &quot;..\thirdparties\win32\lib\ilbc\libiLBC.a&quot; &quot;..\thirdparties\win32\lib\speex\libspeex.a&quot; &quot;..\thirdparties\win32\lib\speex\libspeexdsp.a&quot; ..\thirdparties\win32\lib\libgcc.a ..\thirdparties\win32\lib\libmingwex.a &quot;..\thirdparties\win32\lib\ffmpeg\libavcodec.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libavutil.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libswscale.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libavcore.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libx264.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libtheora.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libogg.a&quot; &quot;..\thirdparties\win32\lib\webrtc\aec.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\apm_util.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\system_wrappers.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\spl.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\ns.lib&quot; &quot;..\thirdparties\win32\lib\vpx\vpxmd.lib&quot;"
AdditionalDependencies="Winmm.lib &quot;$(OutDir)\tinySAK.lib&quot; &quot;$(OutDir)\tinyNET.lib&quot; &quot;$(OutDir)\tinyRTP.lib&quot; &quot;$(OutDir)\tinyMSRP.lib&quot; &quot;$(OutDir)\tinySDP.lib&quot; &quot;$(OutDir)\tinyMEDIA.lib&quot; &quot;$(OutDir)\tinyDSHOW.lib&quot; &quot;..\thirdparties\win32\lib\gsm\libgsm.a&quot; &quot;..\thirdparties\win32\lib\ilbc\libiLBC.a&quot; &quot;..\thirdparties\win32\lib\speex\libspeex.a&quot; &quot;..\thirdparties\win32\lib\speex\libspeexdsp.a&quot; ..\thirdparties\win32\lib\libgcc.a ..\thirdparties\win32\lib\libmingwex.a &quot;..\thirdparties\win32\lib\ffmpeg\libavcodec.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libavutil.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libswscale.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libavcore.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libx264.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libtheora.a&quot; &quot;..\thirdparties\win32\lib\ffmpeg\libogg.a&quot; &quot;..\thirdparties\win32\lib\webrtc\aec.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\apm_util.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\system_wrappers.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\spl.lib&quot; &quot;..\thirdparties\win32\lib\webrtc\ns.lib&quot; &quot;..\thirdparties\win32\lib\vpx\vpxmd.lib&quot;"
LinkIncremental="2"
IgnoreDefaultLibraryNames="MSVCRT;LIBCMTD;LIBCMT"
GenerateDebugInformation="true"
@ -122,7 +122,7 @@
Name="VCCLCompilerTool"
EnableIntrinsicFunctions="false"
AdditionalIncludeDirectories="&quot;$(PSDK_DIR)include&quot;;&quot;$(DXSDK_DIR)include&quot;;..\thirdparties\win32\include;include;..\tinyMSRP\include;..\tinyRTP\include;..\tinyMEDIA\include;..\tinySDP\include;..\tinyNET\src;..\tinyDSHOW\include;..\tinySAK\src;..\thirdparties\win32\include\BroadVoice16\bvcommon;..\thirdparties\win32\include\BroadVoice16\bv16"
PreprocessorDefinitions="WIN32;NDEBUG;_WINDOWS;_USRDLL;TINYDAV_EXPORTS;DEBUG_LEVEL=DEBUG_LEVEL_INFOS;HAVE_G729=0;HAVE_BV16=0;HAVE_OPENCORE_AMR=1;HAVE_ILBC=0;HAVE_LIBGSM=1;HAVE_TINYDSHOW=1;HAVE_DSOUND_H=1;HAVE_WAVE_API=0;HAVE_FFMPEG=1;HAVE_SPEEX_DSP=1;HAVE_WEBRTC=1;HAVE_SPEEX_JB=1;HAVE_LIB_SPEEX=1;HAVE_LIBVPX=1;G192BITSTREAM=0;_WIN32_WINNT=0x0501"
PreprocessorDefinitions="WIN32;NDEBUG;_WINDOWS;_USRDLL;TINYDAV_EXPORTS;DEBUG_LEVEL=DEBUG_LEVEL_INFOS;HAVE_CUDA=0;HAVE_G729=0;HAVE_BV16=0;HAVE_OPENCORE_AMR=1;HAVE_ILBC=0;HAVE_LIBGSM=1;HAVE_TINYDSHOW=1;HAVE_DSOUND_H=1;HAVE_WAVE_API=0;HAVE_FFMPEG=1;HAVE_SPEEX_DSP=1;HAVE_WEBRTC=1;HAVE_SPEEX_JB=1;HAVE_LIB_SPEEX=1;HAVE_LIBVPX=1;G192BITSTREAM=0;_WIN32_WINNT=0x0501"
RuntimeLibrary="2"
EnableFunctionLevelLinking="false"
UsePrecompiledHeader="0"
@ -265,6 +265,14 @@
RelativePath=".\include\tinydav\codecs\h264\tdav_codec_h264.h"
>
</File>
<File
RelativePath=".\include\tinydav\codecs\h264\tdav_codec_h264_common.h"
>
</File>
<File
RelativePath=".\include\tinydav\codecs\h264\tdav_codec_h264_cuda.h"
>
</File>
<File
RelativePath=".\include\tinydav\codecs\h264\tdav_codec_h264_rtp.h"
>
@ -450,6 +458,10 @@
<Filter
Name="video"
>
<File
RelativePath=".\include\tinydav\video\tdav_consumer_video.h"
>
</File>
<File
RelativePath=".\include\tinydav\video\tdav_converter_video.h"
>
@ -462,6 +474,10 @@
RelativePath=".\include\tinydav\video\tdav_session_video.h"
>
</File>
<File
RelativePath=".\include\tinydav\video\tdav_video_jitterbuffer.h"
>
</File>
<Filter
Name="android"
>
@ -573,6 +589,10 @@
RelativePath=".\src\codecs\h264\tdav_codec_h264.c"
>
</File>
<File
RelativePath=".\src\codecs\h264\tdav_codec_h264_cuda.cxx"
>
</File>
<File
RelativePath=".\src\codecs\h264\tdav_codec_h264_rtp.c"
>
@ -758,6 +778,10 @@
<Filter
Name="video"
>
<File
RelativePath=".\src\video\tdav_consumer_video.c"
>
</File>
<File
RelativePath=".\src\video\tdav_converter_video.c"
>
@ -770,6 +794,10 @@
RelativePath=".\src\video\tdav_session_video.c"
>
</File>
<File
RelativePath=".\src\video\tdav_video_jitterbuffer.c"
>
</File>
<Filter
Name="android"
>

View File

@ -83,7 +83,6 @@ private:
#ifdef _WIN32_WCE
IBaseFilter *colorConvertor565; //http://msdn.microsoft.com/en-us/library/aa926076.aspx
#else
IBaseFilter *decompressorFilter;
DSFrameRateFilter *frameRateFilter;
#endif

View File

@ -48,7 +48,6 @@ DSCaptureGraph::DSCaptureGraph(ISampleGrabberCB* callback, HRESULT *hr)
this->colorConvertor565 = NULL;
#else
this->frameRateFilter = NULL;
this->decompressorFilter = NULL;
#endif
this->nullRendererFilter = NULL;
@ -75,7 +74,6 @@ DSCaptureGraph::~DSCaptureGraph()
#ifdef _WIN32_WCE
SAFE_RELEASE(this->colorConvertor565);
#else
SAFE_RELEASE(this->decompressorFilter);
#endif
SAFE_RELEASE(this->nullRendererFilter);
@ -212,8 +210,9 @@ HRESULT DSCaptureGraph::connect()
hr = ConnectFilters(this->graphBuilder, this->colorConvertor565, this->sampleGrabberFilter) ; if(FAILED(hr))return hr;
hr = ConnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); if(FAILED(hr))return hr;
#else
hr = ConnectFilters(this->graphBuilder, this->sourceFilter, this->decompressorFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; }
hr = ConnectFilters(this->graphBuilder, this->decompressorFilter, this->frameRateFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; }
// No convertor needed
// AVI Decompressor Filter is automatically by the Filter Graph Manager when needed
hr = ConnectFilters(this->graphBuilder, this->sourceFilter, this->frameRateFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; }
hr = ConnectFilters(this->graphBuilder, this->frameRateFilter, this->sampleGrabberFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; }
hr = ConnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); if(FAILED(hr)) { TSK_DEBUG_ERROR("ConnectFilters failed"); return hr; }
#endif
@ -255,8 +254,7 @@ HRESULT DSCaptureGraph::disconnect()
hr = DisconnectFilters(this->graphBuilder, this->colorConvertor565, this->sampleGrabberFilter); if(FAILED(hr))return hr;
hr = DisconnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter); if(FAILED(hr))return hr;
#else
hr = DisconnectFilters(this->graphBuilder, this->sourceFilter, this->decompressorFilter);
hr = DisconnectFilters(this->graphBuilder, this->decompressorFilter, this->frameRateFilter);
hr = DisconnectFilters(this->graphBuilder, this->sourceFilter, this->frameRateFilter);
hr = DisconnectFilters(this->graphBuilder, this->frameRateFilter, this->sampleGrabberFilter);
hr = DisconnectFilters(this->graphBuilder, this->sampleGrabberFilter, this->nullRendererFilter);
#endif
@ -375,10 +373,6 @@ HRESULT DSCaptureGraph::createCaptureGraph()
hr = COCREATE(CLSID_SampleGrabber, IID_IBaseFilter, this->sampleGrabberFilter);
if(FAILED(hr)) return hr;
// Create the AVI decoder filter
hr = COCREATE(CLSID_AVIDec, IID_IBaseFilter, this->decompressorFilter);
if(FAILED(hr)) return hr;
// Create tdshow filter
LPUNKNOWN pUnk = NULL;
this->frameRateFilter = new DSFrameRateFilter(FILTER_FRAMERATE, pUnk, &hr);
@ -400,22 +394,10 @@ HRESULT DSCaptureGraph::createCaptureGraph()
hr = this->graphBuilder->AddFilter(this->frameRateFilter, FILTER_FRAMERATE);
if(FAILED(hr)) return hr;
// Add AVIDec to the graph
hr = this->graphBuilder->AddFilter(this->decompressorFilter, FILTER_AVI_DECOMPRESSOR);
if(FAILED(hr)) return hr;
// Find media control
hr = QUERY(this->graphBuilder, IID_IMediaControl, this->mediaController);
if(FAILED(hr)) return hr;
// Disable timing
/*IMediaFilter *mediaFilterController;
hr = QUERY(this->graphBuilder, IID_IMediaFilter, mediaFilterController);
if(FAILED(hr)) return hr;
mediaFilterController->SetSyncSource(NULL);
SAFE_RELEASE(mediaFilterController);*/
// Create the sample grabber
hr = QUERY(this->sampleGrabberFilter, IID_ISampleGrabber, this->grabberController);
if(FAILED(hr)) return hr;

View File

@ -50,8 +50,8 @@ DSGrabber::DSGrabber(HRESULT *hr)
// Init the bitmap info header with default values
memset(&(this->bitmapInfo), 0, sizeof(BITMAPINFOHEADER));
this->bitmapInfo.biSize = sizeof(BITMAPINFOHEADER);
this->bitmapInfo.biWidth = 176;
this->bitmapInfo.biHeight = 144;
this->bitmapInfo.biWidth = 352;
this->bitmapInfo.biHeight = 288;
this->bitmapInfo.biPlanes = 1;
this->bitmapInfo.biBitCount = 24;
this->bitmapInfo.biCompression = 0;
@ -89,7 +89,9 @@ void DSGrabber::start()
if (!this->graph->isRunning()){
first_buffer = true;
this->preview->start();
if(this->preview){
this->preview->start();
}
this->graph->connect();
this->graph->start();
}
@ -97,7 +99,9 @@ void DSGrabber::start()
void DSGrabber::stop()
{
if (this->graph->isRunning()){
this->preview->stop();
if(this->preview){
this->preview->stop();
}
this->graph->stop();
this->graph->disconnect();
}
@ -140,8 +144,10 @@ bool DSGrabber::setCaptureParameters(int w, int h, int f)
// Setup source filter in the graph
HRESULT hr = this->graph->setParameters(fmt, this->fps);
// Set preview parameters
this->preview->setFps(this->fps);
this->preview->setSize(this->width, this->height);
if(this->preview){
this->preview->setFps(this->fps);
this->preview->setSize(this->width, this->height);
}
tsk_mutex_unlock(this->mutex_buffer);

View File

@ -50,8 +50,8 @@ DSOutputStream::DSOutputStream(HRESULT *phr, DSOutputFilter *pParent, LPCWSTR pP
this->frameLength = (1000)/DEFAULT_FPS;
this->fps = DEFAULT_FPS;
this->width = 176;
this->height = 144;
this->width = 352;
this->height = 288;
this->overlay = false;

View File

@ -214,7 +214,7 @@ static tsk_object_t* tdshow_consumer_ctor(tsk_object_t * self, va_list * app)
/* init base */
tmedia_consumer_init(TMEDIA_CONSUMER(consumer));
TMEDIA_CONSUMER(consumer)->video.display.chroma = tmedia_bgr24; // RGB24 on x86 (little endians) stored as BGR24
TMEDIA_CONSUMER(consumer)->video.display.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24
/* init self */
TMEDIA_CONSUMER(consumer)->video.fps = 15;

View File

@ -88,7 +88,6 @@ int tdshow_producer_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec
}
TMEDIA_PRODUCER(producer)->video.fps = TMEDIA_CODEC_VIDEO(codec)->out.fps;
// FIXME
TMEDIA_PRODUCER(producer)->video.width = TMEDIA_CODEC_VIDEO(codec)->out.width;
TMEDIA_PRODUCER(producer)->video.height = TMEDIA_CODEC_VIDEO(codec)->out.height;
@ -207,11 +206,11 @@ static tsk_object_t* tdshow_producer_ctor(tsk_object_t * self, va_list * app)
/* init base */
tmedia_producer_init(TMEDIA_PRODUCER(producer));
TMEDIA_PRODUCER(producer)->video.chroma = tmedia_bgr24; // RGB24 on x86 (little endians) stored as BGR24
TMEDIA_PRODUCER(producer)->video.chroma = tmedia_chroma_bgr24; // RGB24 on x86 (little endians) stored as BGR24
/* init self with default values*/
TMEDIA_PRODUCER(producer)->video.fps = 15;
TMEDIA_PRODUCER(producer)->video.width = 176;
TMEDIA_PRODUCER(producer)->video.height = 144;
TMEDIA_PRODUCER(producer)->video.width = 352;
TMEDIA_PRODUCER(producer)->video.height = 288;
if(IsMainThread()){
producer->grabber = new DSGrabber(&hr);

View File

@ -147,17 +147,6 @@ typedef struct tmedia_codec_s
//! the negociated format (only useful for codecs with dyn. payload type)
char* neg_format;
struct {
unsigned __FIXME__:1;
} audio;
struct {
struct {
unsigned encoded:1;
unsigned decoded:1;
} flip;
} video;
//! plugin used to create the codec
const struct tmedia_codec_plugin_def_s* plugin;
}
@ -272,6 +261,8 @@ typedef struct tmedia_codec_video_s
unsigned fps;
unsigned max_br;
unsigned max_mbps;
tmedia_chroma_t chroma;
tsk_bool_t flip;
}in;// decoded
struct{
unsigned width;
@ -279,6 +270,8 @@ typedef struct tmedia_codec_video_s
unsigned fps;
unsigned max_br;
unsigned max_mbps;
tmedia_chroma_t chroma;
tsk_bool_t flip;
}out;// encoded

View File

@ -89,16 +89,17 @@ tmedia_video_size_t;
// used by tinyWRAP
typedef enum tmedia_chroma_e
{
tmedia_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7
tmedia_bgr24, // used by windows consumer (DirectShow) -
tmedia_rgb32, // used by iOS4 consumer (iPhone and iPod touch)
tmedia_rgb565le, // (used by both android and wince consumers)
tmedia_rgb565be,
tmedia_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4)
tmedia_nv21, // Yuv420 SP (used by android producer)
tmedia_yuv422p,
tmedia_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G)
tmedia_yuv420p, // Default
tmedia_chroma_none=0,
tmedia_chroma_rgb24, // will be stored as bgr24 on x86 (little endians) machines; e.g. WindowsPhone7
tmedia_chroma_bgr24, // used by windows consumer (DirectShow) -
tmedia_chroma_rgb32, // used by iOS4 consumer (iPhone and iPod touch)
tmedia_chroma_rgb565le, // (used by both android and wince consumers)
tmedia_chroma_rgb565be,
tmedia_chroma_nv12, // used by iOS4 producer (iPhone and iPod Touch 3GS and 4)
tmedia_chroma_nv21, // Yuv420 SP (used by android producer)
tmedia_chroma_yuv422p,
tmedia_chroma_uyvy422, // used by iOS4 producer (iPhone and iPod Touch 3G)
tmedia_chroma_yuv420p, // Default
}
tmedia_chroma_t;

View File

@ -44,13 +44,17 @@ TMEDIA_BEGIN_DECLS
#define TMEDIA_CONSUMER_PTIME_DEFAULT 20
/**Max number of plugins (consumer types) we can create */
#define TMED_CONSUMER_MAX_PLUGINS 0x0F
#if !defined(TMED_CONSUMER_MAX_PLUGINS)
# define TMED_CONSUMER_MAX_PLUGINS 0x0F
#endif
/** cast any pointer to @ref tmedia_consumer_t* object */
#define TMEDIA_CONSUMER(self) ((tmedia_consumer_t*)(self))
/** Default Video chroma */
#define TMEDIA_CONSUMER_CHROMA_DEFAULT tmedia_yuv420p
#if !defined(TMEDIA_CONSUMER_CHROMA_DEFAULT)
# define TMEDIA_CONSUMER_CHROMA_DEFAULT tmedia_chroma_yuv420p
#endif
/** Base object for all Consumers */
typedef struct tmedia_consumer_s

View File

@ -21,7 +21,7 @@
*/
/**@file tmedia_jitterbuffer.h
* @brief JitterBuffer Plugin
* @brief Audio/Video JitterBuffer Plugin
*
* @author Mamadou Diop <diopmamadou(at)doubango.org>
*/
@ -30,6 +30,10 @@
#include "tinymedia_config.h"
#include "tinymedia/tmedia_params.h"
#include "tmedia_common.h"
#include "tsk_object.h"
TMEDIA_BEGIN_DECLS
@ -37,6 +41,11 @@ TMEDIA_BEGIN_DECLS
/** cast any pointer to @ref tmedia_jitterbuffer_t* object */
#define TMEDIA_JITTER_BUFFER(self) ((tmedia_jitterbuffer_t*)(self))
/**Max number of plugins (jb types) we can create */
#if !defined(TMED_JITTER_BUFFER_MAX_PLUGINS)
# define TMED_JITTER_BUFFER_MAX_PLUGINS 0x0F
#endif
/** Base object for all JitterBuffers */
typedef struct tmedia_jitterbuffer_s
{
@ -56,9 +65,13 @@ typedef struct tmedia_jitterbuffer_plugin_def_s
//! object definition used to create an instance of the jitterbufferr
const tsk_object_def_t* objdef;
//! the type of the jitter buffer
tmedia_type_t type;
//! full description (usefull for debugging)
const char* desc;
int (*set) (tmedia_jitterbuffer_t* , const tmedia_param_t*);
int (* open) (tmedia_jitterbuffer_t*, uint32_t frame_duration, uint32_t rate);
int (* tick) (tmedia_jitterbuffer_t*);
int (* put) (tmedia_jitterbuffer_t*, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr);
@ -69,6 +82,7 @@ typedef struct tmedia_jitterbuffer_plugin_def_s
tmedia_jitterbuffer_plugin_def_t;
TINYMEDIA_API int tmedia_jitterbuffer_init(tmedia_jitterbuffer_t* self);
TINYMEDIA_API int tmedia_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param);
TINYMEDIA_API int tmedia_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate);
TINYMEDIA_API int tmedia_jitterbuffer_tick(tmedia_jitterbuffer_t* self);
TINYMEDIA_API int tmedia_jitterbuffer_put(tmedia_jitterbuffer_t* self, void* data, tsk_size_t data_size, const tsk_object_t* proto_hdr);
@ -79,7 +93,8 @@ TINYMEDIA_API int tmedia_jitterbuffer_deinit(tmedia_jitterbuffer_t* self);
TINYMEDIA_API int tmedia_jitterbuffer_plugin_register(const tmedia_jitterbuffer_plugin_def_t* plugin);
TINYMEDIA_API int tmedia_jitterbuffer_plugin_unregister();
TINYMEDIA_API tmedia_jitterbuffer_t* tmedia_jitterbuffer_create();
TINYMEDIA_API int tmedia_jitter_buffer_plugin_unregister_by_type(tmedia_type_t type);
TINYMEDIA_API tmedia_jitterbuffer_t* tmedia_jitterbuffer_create(tmedia_type_t type);
TMEDIA_END_DECLS

View File

@ -52,7 +52,7 @@ typedef int (*tmedia_producer_enc_cb_f)(const void* callback_data, const void* b
typedef int (*tmedia_producer_raw_cb_f)(const void* callback_data, const void* buffer, tsk_size_t size, uint32_t duration, tsk_bool_t marker);
/** Default Video chroma */
#define TMEDIA_PRODUCER_CHROMA_DEFAULT tmedia_yuv420p
#define TMEDIA_PRODUCER_CHROMA_DEFAULT tmedia_chroma_yuv420p
/** Base object for all Producers */
typedef struct tmedia_producer_s

View File

@ -82,16 +82,14 @@ int tmedia_codec_init(tmedia_codec_t* self, tmedia_type_t type, const char* name
// Video flipping: For backward compatibility we have to initialize the default values
// according to the CFLAGS: 'FLIP_ENCODED_PICT' and 'FLIP_DECODED_PICT'. At any time you
// can update thse values (e.g. when the device switch from landscape to portrait) using video_session->set();
if(type & tmedia_video){
#if FLIP_ENCODED_PICT
self->video.flip.encoded = tsk_true;
#else
self->video.flip.encoded = tsk_false;
TMEDIA_CODEC_VIDEO(self)->out.flip = tsk_true;
#endif
#if FLIP_DECODED_PICT
self->video.flip.decoded = tsk_true;
#else
self->video.flip.decoded = tsk_false;
TMEDIA_CODEC_VIDEO(self)->in.flip = tsk_true;
#endif
}
return 0;
}
@ -301,9 +299,11 @@ tmedia_codec_t* tmedia_codec_create(const char* format)
{ /* Video codec */
tmedia_codec_video_t* video = TMEDIA_CODEC_VIDEO(codec);
tmedia_codec_video_init(TMEDIA_CODEC(video), plugin->name, plugin->desc, plugin->format);
video->in.width = video->out.width = plugin->video.width;
video->in.height = video->out.height = plugin->video.height;
video->in.fps = video->out.fps = plugin->video.fps;
if(!video->in.width)video->in.width = video->out.width = plugin->video.width;
if(!video->in.height)video->in.height = video->out.height = plugin->video.height;
if(!video->in.fps)video->in.fps = video->out.fps = plugin->video.fps;
if(video->in.chroma==tmedia_chroma_none)video->in.chroma = tmedia_chroma_yuv420p;
if(video->out.chroma==tmedia_chroma_none)video->out.chroma = tmedia_chroma_yuv420p;
break;
}
case tmedia_msrp:

View File

@ -222,27 +222,27 @@ const tmedia_video_size_t* tmedia_get_video_size(tmedia_chroma_t chroma, tsk_siz
tsk_size_t i;
switch(chroma)
{
case tmedia_rgb24:
case tmedia_bgr24:
case tmedia_chroma_rgb24:
case tmedia_chroma_bgr24:
factor = 3.f;
break;
case tmedia_rgb565le:
case tmedia_rgb565be:
case tmedia_chroma_rgb565le:
case tmedia_chroma_rgb565be:
factor = 2.f;
break;
case tmedia_rgb32:
case tmedia_chroma_rgb32:
factor = 4.f;
break;
case tmedia_nv21:
case tmedia_nv12:
case tmedia_yuv420p:
case tmedia_chroma_nv21:
case tmedia_chroma_nv12:
case tmedia_chroma_yuv420p:
factor = 1.5f;
break;
case tmedia_yuv422p:
case tmedia_uyvy422:
case tmedia_chroma_yuv422p:
case tmedia_chroma_uyvy422:
factor = 2.f;
break;
}

View File

@ -35,7 +35,7 @@
*/
/* pointer to all registered consumers */
const tmedia_consumer_plugin_def_t* __tmedia_consumer_plugins[TMED_CONSUMER_MAX_PLUGINS] = {0};
static const tmedia_consumer_plugin_def_t* __tmedia_consumer_plugins[TMED_CONSUMER_MAX_PLUGINS] = {0};
/**@ingroup tmedia_consumer_group
* Initialize the consumer.

View File

@ -21,7 +21,7 @@
*/
/**@file tmedia_jitterbuffer.c
* @brief JitterBuffer plugin
* @brief Audio/Video JitterBuffer plugin
*
* @author Mamadou Diop <diopmamadou(at)doubango.org>
*/
@ -29,7 +29,8 @@
#include "tsk_debug.h"
static const tmedia_jitterbuffer_plugin_def_t* __tmedia_jitterbuffer_plugin = tsk_null;
/* pointer to all registered jitter_buffers */
static const tmedia_jitterbuffer_plugin_def_t* __tmedia_jitterbuffer_plugins[TMED_JITTER_BUFFER_MAX_PLUGINS] = {0};
int tmedia_jitterbuffer_init(tmedia_jitterbuffer_t* self)
{
@ -40,6 +41,15 @@ int tmedia_jitterbuffer_init(tmedia_jitterbuffer_t* self)
return 0;
}
int tmedia_jitterbuffer_set(tmedia_jitterbuffer_t *self, const tmedia_param_t* param)
{
if(!self || !self->plugin || !param){
TSK_DEBUG_ERROR("Invalid parameter");
return 0;
}
return self->plugin->set ? self->plugin->set(self, param) : 0;
}
int tmedia_jitterbuffer_open(tmedia_jitterbuffer_t* self, uint32_t frame_duration, uint32_t rate)
{
int ret;
@ -164,30 +174,108 @@ int tmedia_jitterbuffer_deinit(tmedia_jitterbuffer_t* self)
return 0;
}
tmedia_jitterbuffer_t* tmedia_jitterbuffer_create(tmedia_type_t type)
{
tmedia_jitterbuffer_t* jitter_buffer = tsk_null;
const tmedia_jitterbuffer_plugin_def_t* plugin;
tsk_size_t i = 0;
while((i < TMED_JITTER_BUFFER_MAX_PLUGINS) && (plugin = __tmedia_jitterbuffer_plugins[i++])){
if(plugin->objdef && plugin->type == type){
if((jitter_buffer = tsk_object_new(plugin->objdef))){
/* initialize the newly created jitter_buffer */
jitter_buffer->plugin = plugin;
break;
}
}
}
return jitter_buffer;
}
int tmedia_jitterbuffer_plugin_register(const tmedia_jitterbuffer_plugin_def_t* plugin)
{
tsk_size_t i;
if(!plugin){
TSK_DEBUG_ERROR("Invalid parameter");
return -1;
}
__tmedia_jitterbuffer_plugin = plugin;
return 0;
}
int tmedia_jitterbuffer_plugin_unregister()
{
__tmedia_jitterbuffer_plugin = tsk_null;
return 0;
}
tmedia_jitterbuffer_t* tmedia_jitterbuffer_create()
{
tmedia_jitterbuffer_t* jitterbuffer = tsk_null;
if(__tmedia_jitterbuffer_plugin){
if((jitterbuffer = tsk_object_new(__tmedia_jitterbuffer_plugin->objdef))){
jitterbuffer->plugin = __tmedia_jitterbuffer_plugin;
/* add or replace the plugin */
for(i = 0; i<TMED_JITTER_BUFFER_MAX_PLUGINS; i++){
if(!__tmedia_jitterbuffer_plugins[i] || (__tmedia_jitterbuffer_plugins[i] == plugin)){
__tmedia_jitterbuffer_plugins[i] = plugin;
return 0;
}
}
return jitterbuffer;
TSK_DEBUG_ERROR("There are already %d plugins.", TMED_JITTER_BUFFER_MAX_PLUGINS);
return -2;
}
/**@ingroup tmedia_jitterbuffer_group
* UnRegisters a jitter_buffer plugin.
* @param plugin the definition of the plugin.
* @retval Zero if succeed and non-zero error code otherwise.
*/
int tmedia_jitterbuffer_plugin_unregister(const tmedia_jitterbuffer_plugin_def_t* plugin)
{
tsk_size_t i;
tsk_bool_t found = tsk_false;
if(!plugin){
TSK_DEBUG_ERROR("Invalid Parameter");
return -1;
}
/* find the plugin to unregister */
for(i = 0; i<TMED_JITTER_BUFFER_MAX_PLUGINS && __tmedia_jitterbuffer_plugins[i]; i++){
if(__tmedia_jitterbuffer_plugins[i] == plugin){
__tmedia_jitterbuffer_plugins[i] = tsk_null;
found = tsk_true;
break;
}
}
/* compact */
if(found){
for(; i<(TMED_JITTER_BUFFER_MAX_PLUGINS - 1); i++){
if(__tmedia_jitterbuffer_plugins[i+1]){
__tmedia_jitterbuffer_plugins[i] = __tmedia_jitterbuffer_plugins[i+1];
}
else{
break;
}
}
__tmedia_jitterbuffer_plugins[i] = tsk_null;
}
return (found ? 0 : -2);
}
int tmedia_jitterbuffer_plugin_unregister_by_type(tmedia_type_t type)
{
tsk_size_t i;
tsk_bool_t found = tsk_false;
/* find the plugin to unregister */
for(i = 0; i<TMED_JITTER_BUFFER_MAX_PLUGINS && __tmedia_jitterbuffer_plugins[i]; i++){
if((__tmedia_jitterbuffer_plugins[i]->type & type) == __tmedia_jitterbuffer_plugins[i]->type){
__tmedia_jitterbuffer_plugins[i] = tsk_null;
found = tsk_true;
break;
}
}
/* compact */
if(found){
for(; i<(TMED_JITTER_BUFFER_MAX_PLUGINS - 1); i++){
if(__tmedia_jitterbuffer_plugins[i+1]){
__tmedia_jitterbuffer_plugins[i] = __tmedia_jitterbuffer_plugins[i+1];
}
else{
break;
}
}
__tmedia_jitterbuffer_plugins[i] = tsk_null;
}
return (found ? 0 : -2);
}

View File

@ -216,7 +216,7 @@ static tsk_object_t* tnet_socket_ctor(tsk_object_t * self, va_list * app)
/* Find our address. */
for(ptr = result; ptr; ptr = ptr->ai_next){
sock->fd = socket(ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol);
sock->fd = tnet_soccket(ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol);
if(ptr->ai_family != AF_INET6 && ptr->ai_family != AF_INET){
continue;
}

View File

@ -516,8 +516,6 @@ void *tnet_transport_mainthread(void *param)
transport_socket_t* active_socket;
int index;
SetPriorityClass(GetCurrentThread(), REALTIME_PRIORITY_CLASS);
TSK_DEBUG_INFO("Starting [%s] server with IP {%s} on port {%d}...", transport->description, transport->master->ip, transport->master->port);
while(TSK_RUNNABLE(transport)->running || TSK_RUNNABLE(transport)->started)

View File

@ -114,6 +114,14 @@ TINYNET_API int tnet_get_peerip_n_port(tnet_fd_t localFD, tnet_ip_t *ip, tnet_po
# define tnet_get_sockaddr_size(psockaddr) ((psockaddr)->sa_family == AF_INET6 ? sizeof(struct sockaddr_in6): ((psockaddr)->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(*(psockaddr))))
#endif
#if TNET_UNDER_WINDOWS
# define tnet_ioctlt ioctlsocket /* FIXME: use WSAIoctl */
# define tnet_soccket(family, type, protocol) WSASocket((family), (type), (protocol), NULL, 0, WSA_FLAG_OVERLAPPED)
#else
# define tnet_ioctlt ioctl
# define tnet_soccket(family, type, protocol) socket((family), (type), (protocol))
#endif
TINYNET_API int tnet_getnameinfo(const struct sockaddr *sa, socklen_t salen, char* node, socklen_t nodelen, char* service, socklen_t servicelen, int flags);
TINYNET_API int tnet_gethostname(tnet_host_t* result);
@ -156,13 +164,6 @@ TINYNET_API int tnet_sockfd_close(tnet_fd_t *fd);
}
#if TSK_UNDER_WINDOWS
# define tnet_ioctlt ioctlsocket /* FIXME: use WSAIoctl */
#else
# define tnet_ioctlt ioctl
#endif
tnet_interface_t* tnet_interface_create(const char* description, const void* mac_address, tsk_size_t mac_address_length);
tnet_address_t* tnet_address_create(const char* ip);

View File

@ -35,8 +35,8 @@
#include "tsk_debug.h"
#define TINY_RCVBUF (256/2/*Will be doubled and min on linux is 256*/) /* tiny buffer used to disable receiving */
#define BIG_RCVBUF 64000
#define BIG_SNDBUF 64000
#define BIG_RCVBUF (64 * 1024)
#define BIG_SNDBUF (64 * 1024)
// TODO: Add support for outbound DTMF (http://www.ietf.org/rfc/rfc2833.txt)
@ -317,7 +317,7 @@ int trtp_manager_start(trtp_manager_t* self)
/* Flush buffers and re-enable sockets */
{
char buff[1024];
char buff[2048];
// re-enable sockets
_trtp_manager_enable_sockets(self);

View File

@ -43,9 +43,10 @@ typedef int tsk_boolean_t;
#define tsk_true 1
#define tsk_false 0
#define TSK_MIN(a,b) (((a) < (b)) ? (a) : (b))
#define TSK_MAX(a,b) (((a) > (b)) ? (a) : (b))
#define TSK_ABS(a) (((a)< 0) ? -(a) : (a))
#define TSK_MIN(a,b) (((a) < (b)) ? (a) : (b))
#define TSK_MAX(a,b) (((a) > (b)) ? (a) : (b))
#define TSK_ABS(a) (((a)< 0) ? -(a) : (a))
#define TSK_CLAMP(nMin, nVal, nMax) ((nVal) > (nMax)) ? (nMax) : (((nVal) < (nMin)) ? (nMin) : (nVal))
// used to avoid doing *((uint32_t*)ptr) which don't respect memory alignment on
// some embedded (ARM,?...) platforms

View File

@ -89,22 +89,27 @@
* @sa @ref tsk_semaphore_destroy
*/
tsk_semaphore_handle_t* tsk_semaphore_create()
{
return tsk_semaphore_create_2(0);
}
tsk_semaphore_handle_t* tsk_semaphore_create_2(int initial_val)
{
SEMAPHORE_T handle = 0;
#if TSK_UNDER_WINDOWS
handle = CreateSemaphore(NULL, 0, 0x7FFFFFFF, NULL);
handle = CreateSemaphore(NULL, initial_val, 0x7FFFFFFF, NULL);
#else
handle = tsk_calloc(1, sizeof(SEMAPHORE_S));
#if TSK_USE_NAMED_SEM
named_sem_t * nsem = (named_sem_t*)handle;
tsk_sprintf(&(nsem->name), "/sem-%d", sem_count++);
if((nsem->sem = sem_open(nsem->name, O_CREAT /*| O_EXCL*/, S_IRUSR | S_IWUSR, 0)) == SEM_FAILED)
if((nsem->sem = sem_open(nsem->name, O_CREAT /*| O_EXCL*/, S_IRUSR | S_IWUSR, initial_val)) == SEM_FAILED)
{
TSK_FREE(nsem->name);
#else
if(sem_init((SEMAPHORE_T)handle, 0, 0))
if(sem_init((SEMAPHORE_T)handle, 0, initial_val))
{
#endif
TSK_FREE(handle);

View File

@ -37,6 +37,7 @@ TSK_BEGIN_DECLS
typedef void tsk_semaphore_handle_t;
TINYSAK_API tsk_semaphore_handle_t* tsk_semaphore_create();
TINYSAK_API tsk_semaphore_handle_t* tsk_semaphore_create_2(int initial_val);
TINYSAK_API int tsk_semaphore_increment(tsk_semaphore_handle_t* handle);
TINYSAK_API int tsk_semaphore_decrement(tsk_semaphore_handle_t* handle);
TINYSAK_API void tsk_semaphore_destroy(tsk_semaphore_handle_t** handle);

View File

@ -138,7 +138,8 @@ typedef struct tsip_dialog_s
tsip_uri_t* uri_remote;
tsip_uri_t* uri_remote_target;
struct sockaddr_storage remote_addr; // Only valid for Dgram
uint32_t cseq_value;
char* cseq_method;
@ -162,7 +163,7 @@ typedef tsk_list_t tsip_dialogs_L_t;
tsip_request_t *tsip_dialog_request_new(const tsip_dialog_t *self, const char* method);
int tsip_dialog_request_send(const tsip_dialog_t *self, tsip_request_t* request);
tsip_response_t *tsip_dialog_response_new(const tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request);
tsip_response_t *tsip_dialog_response_new(tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request);
int tsip_dialog_response_send(const tsip_dialog_t *self, tsip_response_t* response);
int tsip_dialog_apply_action(tsip_message_t* message, const tsip_action_t* action);

View File

@ -391,6 +391,8 @@ tsip_request_t *tsip_dialog_request_new(const tsip_dialog_t *self, const char* m
request->sigcomp_id = tsk_strdup(self->ss->sigcomp_id);
}
/* Remote Address: Used if "Server mode" otherwise Proxy-CSCF will be used */
request->remote_addr = self->remote_addr;
TSK_OBJECT_SAFE_FREE(request_uri);
TSK_OBJECT_SAFE_FREE(from_uri);
@ -441,7 +443,7 @@ int tsip_dialog_request_send(const tsip_dialog_t *self, tsip_request_t* request)
return ret;
}
tsip_response_t *tsip_dialog_response_new(const tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request)
tsip_response_t *tsip_dialog_response_new(tsip_dialog_t *self, short status, const char* phrase, const tsip_request_t* request)
{
/* Reponse is created as per RFC 3261 subclause 8.2.6 and (headers+tags) are copied
* as per subclause 8.2.6.2.
@ -480,6 +482,8 @@ tsip_response_t *tsip_dialog_response_new(const tsip_dialog_t *self, short statu
* it's up to the transport layer to copy it to these headers */
response->sigcomp_id = tsk_strdup(self->ss->sigcomp_id);
}
/* Remote Addr: used to send requests if "Server Mode" otherwise Proxy-CSCF address will be used */
self->remote_addr = request->remote_addr;
}
return response;
}

View File

@ -698,12 +698,19 @@ int x0000_Any_2_Any_X_i2xxINVITEorUPDATE(va_list *app)
int x0000_Any_2_Trying_X_oBYE(va_list *app)
{
tsip_dialog_invite_t *self = va_arg(*app, tsip_dialog_invite_t *);
int ret;
/* Alert the user */
TSIP_DIALOG_SIGNAL(self, tsip_event_code_dialog_terminating, "Terminating dialog");
/* send BYE */
return send_BYE(self);
if((ret = send_BYE(self)) == 0){
// stop session manager
if(self->msession_mgr && self->msession_mgr->started){
tmedia_session_mgr_stop(self->msession_mgr);
}
}
return ret;
}
/* Any -> (iBYE) -> Terminated */

View File

@ -296,10 +296,11 @@ tsk_size_t tsip_transport_send(const tsip_transport_t* self, const char *branch,
}
}
else{
if(self->stack->network.mode_server && TSIP_MESSAGE_IS_RESPONSE(msg)){ // In server mode we will never send request. At least for now ;)
if(self->stack->network.mode_server){
ret = tsip_transport_send_raw(self, (const struct sockaddr*)&msg->remote_addr, buffer->data, buffer->size);
}
else{
// always send to the Proxy-CSCF
ret = tsip_transport_send_raw(self, tsk_null/* Use P-CSCF addr */, buffer->data, buffer->size);
}