Worked around a possible endless loop at MGCP library shutdown.
Fixed a one time memory leak. git-svn-id: http://voip.null.ro/svn/yate@5306 acf43c95-373e-0410-b603-e72c3f656dc1
This commit is contained in:
parent
da2a85ff1a
commit
592aea3e07
|
@ -41,6 +41,7 @@ public:
|
|||
virtual void run();
|
||||
private:
|
||||
MGCPEngine* m_engine;
|
||||
SocketAddr m_addr;
|
||||
Action m_action;
|
||||
};
|
||||
|
||||
|
@ -67,6 +68,7 @@ MGCPPrivateThread::MGCPPrivateThread(MGCPEngine* engine, bool process,
|
|||
Thread::Priority priority)
|
||||
: Thread(process?"MGCP Process":"MGCP Receive",priority),
|
||||
m_engine(engine),
|
||||
m_addr(AF_INET),
|
||||
m_action(process?Process:Receive)
|
||||
{
|
||||
DDebug(m_engine,DebugInfo,"MGCPPrivateThread::MGCPPrivateThread() [%p]",this);
|
||||
|
@ -91,7 +93,7 @@ void MGCPPrivateThread::run()
|
|||
m_engine->runProcess();
|
||||
break;
|
||||
case Receive:
|
||||
m_engine->runReceive();
|
||||
m_engine->runReceive(m_addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -454,9 +456,8 @@ bool MGCPEngine::process(u_int64_t time)
|
|||
}
|
||||
|
||||
// Repeatedly calls receive() until the calling thread terminates
|
||||
void MGCPEngine::runReceive()
|
||||
void MGCPEngine::runReceive(SocketAddr& addr)
|
||||
{
|
||||
SocketAddr addr(AF_INET);
|
||||
if (m_recvBuf)
|
||||
delete[] m_recvBuf;
|
||||
m_recvBuf = new unsigned char[maxRecvPacket()];
|
||||
|
@ -468,6 +469,13 @@ void MGCPEngine::runReceive()
|
|||
Thread::check(true);
|
||||
}
|
||||
|
||||
// Repeatedly calls receive() until the calling thread terminates
|
||||
void MGCPEngine::runReceive()
|
||||
{
|
||||
SocketAddr addr(AF_INET);
|
||||
runReceive(addr);
|
||||
}
|
||||
|
||||
// Repeatedly calls process() until the calling thread terminates
|
||||
void MGCPEngine::runProcess()
|
||||
{
|
||||
|
@ -553,7 +561,7 @@ void MGCPEngine::cleanup(bool gracefully, const char* text)
|
|||
String::boolText(gracefully),text);
|
||||
|
||||
// Terminate transactions
|
||||
lock();
|
||||
Lock mylock(this);
|
||||
if (gracefully)
|
||||
for (ObjList* o = m_transactions.skipNull(); o; o = o->skipNext()) {
|
||||
MGCPTransaction* tr = static_cast<MGCPTransaction*>(o->get());
|
||||
|
@ -561,7 +569,6 @@ void MGCPEngine::cleanup(bool gracefully, const char* text)
|
|||
tr->setResponse(400,text);
|
||||
}
|
||||
m_transactions.clear();
|
||||
unlock();
|
||||
|
||||
// Check if we have any private threads to wait
|
||||
if (!m_threads.skipNull())
|
||||
|
@ -569,14 +576,20 @@ void MGCPEngine::cleanup(bool gracefully, const char* text)
|
|||
|
||||
// Terminate private threads
|
||||
Debug(this,DebugAll,"Terminating %u private threads",m_threads.count());
|
||||
lock();
|
||||
ListIterator iter(m_threads);
|
||||
for (GenObject* o = 0; 0 != (o = iter.get());)
|
||||
static_cast<MGCPPrivateThread*>(o)->cancel(!gracefully);
|
||||
unlock();
|
||||
DDebug(this,DebugAll,"Waiting for private threads to terminate");
|
||||
while (m_threads.skipNull())
|
||||
Thread::yield();
|
||||
u_int64_t maxWait = Time::now() + 2000000;
|
||||
while (m_threads.skipNull()) {
|
||||
mylock.drop();
|
||||
if (Time::now() > maxWait) {
|
||||
Debug(this,DebugGoOn,"Private threads did not terminate!");
|
||||
return;
|
||||
}
|
||||
Thread::idle();
|
||||
mylock.acquire(this);
|
||||
}
|
||||
DDebug(this,DebugAll,"Private threads terminated");
|
||||
}
|
||||
|
||||
|
@ -630,7 +643,7 @@ void MGCPEngine::appendThread(MGCPPrivateThread* thread)
|
|||
if (!thread)
|
||||
return;
|
||||
Lock lock(this);
|
||||
m_threads.append(thread);
|
||||
m_threads.append(thread)->setDelete(false);
|
||||
XDebug(this,DebugAll,"Added private thread (%p)",thread);
|
||||
}
|
||||
|
||||
|
|
|
@ -975,6 +975,12 @@ public:
|
|||
*/
|
||||
bool process(u_int64_t time = Time());
|
||||
|
||||
/**
|
||||
* Repeatedly calls @ref receive() until the calling thread terminates
|
||||
* @param addr The sender's address if received any data
|
||||
*/
|
||||
void runReceive(SocketAddr& addr);
|
||||
|
||||
/**
|
||||
* Repeatedly calls @ref receive() until the calling thread terminates
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue