forked from osmocom/wireshark
RTP Player: Memory consumption improvements
Audio for play is now decoded and stored without silence parts. Changes: - ui/qt/utils/rtp_audio_file.cpp created to handle silence skipping - ui/qt/rtp_audio_stream.cpp refactored to support it - Fixed issue with exporting streams: File synchronized export was missing leading silence. - No line is shown in waveform graph if there is silence
This commit is contained in:
parent
d5bd5a920b
commit
54b7886a5e
|
@ -66,6 +66,7 @@ set(WIRESHARK_UTILS_HEADERS
|
|||
utils/rtp_audio_routing.h
|
||||
utils/rtp_audio_routing_filter.h
|
||||
utils/rtp_audio_silence_generator.h
|
||||
utils/rtp_audio_file.h
|
||||
utils/stock_icon.h
|
||||
utils/tango_colors.h
|
||||
utils/variant_pointer.h
|
||||
|
@ -309,6 +310,7 @@ set(WIRESHARK_UTILS_SRCS
|
|||
utils/rtp_audio_routing.cpp
|
||||
utils/rtp_audio_routing_filter.cpp
|
||||
utils/rtp_audio_silence_generator.cpp
|
||||
utils/rtp_audio_file.cpp
|
||||
utils/stock_icon.cpp
|
||||
utils/wireshark_mime_data.cpp
|
||||
utils/wireshark_zip_helper.cpp
|
||||
|
|
|
@ -29,15 +29,12 @@
|
|||
#include <wsutil/nstime.h>
|
||||
|
||||
#include <ui/qt/utils/rtp_audio_routing_filter.h>
|
||||
#include <ui/qt/utils/rtp_audio_file.h>
|
||||
|
||||
#include <QAudioFormat>
|
||||
#include <QAudioOutput>
|
||||
#include <QDir>
|
||||
#include <QTemporaryFile>
|
||||
#include <QVariant>
|
||||
#include <QTimer>
|
||||
#include <QDebug>
|
||||
#include <QBuffer>
|
||||
|
||||
// To do:
|
||||
// - Only allow one rtpstream_info_t per RtpAudioStream?
|
||||
|
@ -47,8 +44,6 @@ static const spx_int16_t visual_sample_rate_ = 1000;
|
|||
RtpAudioStream::RtpAudioStream(QObject *parent, rtpstream_id_t *id, bool stereo_required) :
|
||||
QObject(parent)
|
||||
, first_packet_(true)
|
||||
, sample_file_(NULL)
|
||||
, sample_file_frame_(NULL)
|
||||
, decoders_hash_(rtp_decoder_hash_table_new())
|
||||
, global_start_rel_time_(0.0)
|
||||
, start_abs_offset_(0.0)
|
||||
|
@ -75,26 +70,13 @@ RtpAudioStream::RtpAudioStream(QObject *parent, rtpstream_id_t *id, bool stereo_
|
|||
visual_resampler_ = speex_resampler_init(1, visual_sample_rate_,
|
||||
visual_sample_rate_, SPEEX_RESAMPLER_QUALITY_MIN, NULL);
|
||||
|
||||
QString tempname = QString("%1/wireshark_rtp_stream").arg(QDir::tempPath());
|
||||
sample_file_ = new QTemporaryFile(tempname, this);
|
||||
if (!sample_file_->open(QIODevice::ReadWrite)) {
|
||||
// We are out of file resources
|
||||
delete sample_file_;
|
||||
try {
|
||||
// RtpAudioFile is ready for writing Frames
|
||||
audio_file_ = new RtpAudioFile();
|
||||
} catch (...) {
|
||||
speex_resampler_destroy(visual_resampler_);
|
||||
rtpstream_info_free_data(&rtpstream_);
|
||||
rtpstream_id_free(&id_);
|
||||
qWarning() << "Can't create temp file in " << tempname;
|
||||
throw -1;
|
||||
}
|
||||
sample_file_frame_ = new QBuffer(this);
|
||||
if (! sample_file_frame_->open(QIODevice::ReadWrite)) {
|
||||
// We are out of file resources
|
||||
delete sample_file_;
|
||||
delete sample_file_frame_;
|
||||
speex_resampler_destroy(visual_resampler_);
|
||||
rtpstream_info_free_data(&rtpstream_);
|
||||
rtpstream_id_free(&id_);
|
||||
qWarning() << "Can't create temp file in " << tempname;
|
||||
throw -1;
|
||||
}
|
||||
|
||||
|
@ -114,8 +96,7 @@ RtpAudioStream::~RtpAudioStream()
|
|||
speex_resampler_destroy(visual_resampler_);
|
||||
rtpstream_info_free_data(&rtpstream_);
|
||||
rtpstream_id_free(&id_);
|
||||
if (sample_file_) delete sample_file_;
|
||||
if (sample_file_frame_) delete sample_file_frame_;
|
||||
if (audio_file_) delete audio_file_;
|
||||
// temp_file_ is released by audio_output_
|
||||
if (audio_output_) delete audio_output_;
|
||||
}
|
||||
|
@ -188,21 +169,6 @@ void RtpAudioStream::reset(double global_start_time)
|
|||
visual_samples_.clear();
|
||||
out_of_seq_timestamps_.clear();
|
||||
jitter_drop_timestamps_.clear();
|
||||
|
||||
// Create new temp files
|
||||
if (sample_file_) delete sample_file_;
|
||||
if (sample_file_frame_) delete sample_file_frame_;
|
||||
QString tempname = QString("%1/wireshark_rtp_stream").arg(QDir::tempPath());
|
||||
sample_file_ = new QTemporaryFile(tempname, this);
|
||||
if (!sample_file_->open(QIODevice::ReadWrite)) {
|
||||
qWarning() << "Can't create temp file in " << tempname << " during retap";
|
||||
}
|
||||
sample_file_frame_ = new QBuffer(this);
|
||||
if (!sample_file_frame_->open(QIODevice::ReadWrite)) {
|
||||
qWarning() << "Can't create temp file in " << tempname << " during retap";
|
||||
}
|
||||
|
||||
// RTP_STREAM_DEBUG("Writing to %s", tempname.toUtf8().constData());
|
||||
}
|
||||
|
||||
AudioRouting RtpAudioStream::getAudioRouting()
|
||||
|
@ -215,11 +181,6 @@ void RtpAudioStream::setAudioRouting(AudioRouting audio_routing)
|
|||
audio_routing_ = audio_routing;
|
||||
}
|
||||
|
||||
/* Fix for bug 4119/5902: don't insert too many silence frames.
|
||||
* XXX - is there a better thing to do here?
|
||||
*/
|
||||
static const qint64 max_silence_samples_ = MAX_SILENCE_FRAMES;
|
||||
|
||||
void RtpAudioStream::decode(QAudioDeviceInfo out_device)
|
||||
{
|
||||
if (rtp_packets_.size() < 1) return;
|
||||
|
@ -227,10 +188,15 @@ void RtpAudioStream::decode(QAudioDeviceInfo out_device)
|
|||
if (audio_resampler_) {
|
||||
speex_resampler_reset_mem(audio_resampler_);
|
||||
}
|
||||
audio_file_->setFrameWriteStage();
|
||||
decodeAudio(out_device);
|
||||
|
||||
// Skip silence at begin of the stream
|
||||
audio_file_->setFrameReadStage(prepend_samples_);
|
||||
|
||||
speex_resampler_reset_mem(visual_resampler_);
|
||||
decodeVisual();
|
||||
audio_file_->setDataReadStage();
|
||||
}
|
||||
|
||||
// Side effect: it creates and initiates resampler if needed
|
||||
|
@ -296,8 +262,6 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
|
||||
size_t decoded_bytes_prev = 0;
|
||||
|
||||
rtp_frame_info frame_info;
|
||||
|
||||
for (int cur_packet = 0; cur_packet < rtp_packets_.size(); cur_packet++) {
|
||||
SAMPLE *decode_buff = NULL;
|
||||
// TODO: Update a progress bar here.
|
||||
|
@ -354,6 +318,7 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
audio_out_rate_ = calculateAudioOutRate(out_device, sample_rate, audio_requested_out_rate_);
|
||||
|
||||
// Calculate count of prepend samples for the stream
|
||||
// The earliest stream starts at 0.
|
||||
// Note: Order of operations and separation to two formulas is
|
||||
// important.
|
||||
// When joined, calculated incorrectly - probably caused by
|
||||
|
@ -361,10 +326,9 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
prepend_samples_ = (start_rel_time_ - global_start_rel_time_) * sample_rate;
|
||||
prepend_samples_ = prepend_samples_ * audio_out_rate_ / sample_rate;
|
||||
|
||||
sample_file_->seek(0);
|
||||
// Prepend silence to match our sibling streams.
|
||||
if (prepend_samples_ > 0) {
|
||||
writeSilence(prepend_samples_);
|
||||
if ((prepend_samples_ > 0) && (audio_out_rate_ != 0)) {
|
||||
audio_file_->frameWriteSilence(rtp_packet->frame_num, prepend_samples_);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -396,19 +360,10 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
|
||||
silence_samples = (qint64)((arrive_time - arrive_time_prev)*sample_rate - decoded_bytes_prev / SAMPLE_BYTES);
|
||||
silence_samples = silence_samples * audio_out_rate_ / sample_rate;
|
||||
/* Fix for bug 4119/5902: don't insert too many silence frames.
|
||||
* XXX - is there a better thing to do here?
|
||||
*/
|
||||
silence_samples = qMin(silence_samples, max_silence_samples_);
|
||||
silence_timestamps_.append(stop_rel_time_);
|
||||
// Timestamp shift can make silence calculation negative
|
||||
if (silence_samples > 0) {
|
||||
writeSilence(silence_samples);
|
||||
|
||||
// Record frame info to separate file
|
||||
frame_info.len = silence_samples * SAMPLE_BYTES;
|
||||
frame_info.frame_num = rtp_packet->frame_num;
|
||||
sample_file_frame_->write((char *)&frame_info, sizeof(frame_info));
|
||||
if ((silence_samples > 0) && (audio_out_rate_ != 0)) {
|
||||
audio_file_->frameWriteSilence(rtp_packet->frame_num, silence_samples);
|
||||
}
|
||||
|
||||
decoded_bytes_prev = 0;
|
||||
|
@ -435,17 +390,10 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
}
|
||||
|
||||
if (silence_samples > 0) {
|
||||
/* Fix for bug 4119/5902: don't insert too many silence frames.
|
||||
* XXX - is there a better thing to do here?
|
||||
*/
|
||||
silence_samples = qMin(silence_samples, max_silence_samples_);
|
||||
silence_timestamps_.append(stop_rel_time_);
|
||||
writeSilence(silence_samples);
|
||||
|
||||
// Record frame info to separate file
|
||||
frame_info.len = silence_samples * SAMPLE_BYTES;
|
||||
frame_info.frame_num = rtp_packet->frame_num;
|
||||
sample_file_frame_->write((char *)&frame_info, sizeof(frame_info));
|
||||
if ((silence_samples > 0) && (audio_out_rate_ != 0)) {
|
||||
audio_file_->frameWriteSilence(rtp_packet->frame_num, silence_samples);
|
||||
}
|
||||
}
|
||||
|
||||
// XXX rtp_player.c:696 adds audio here.
|
||||
|
@ -476,13 +424,8 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
// We should write only newer data to avoid duplicates in replay
|
||||
if (last_sequence_w < last_sequence) {
|
||||
// Write the decoded, possibly-resampled audio to our temp file.
|
||||
sample_file_->write(write_buff, write_bytes);
|
||||
audio_file_->frameWriteSamples(rtp_packet->frame_num, write_buff, write_bytes);
|
||||
last_sequence_w = last_sequence;
|
||||
|
||||
// Record frame info to separate file
|
||||
frame_info.len = write_bytes;
|
||||
frame_info.frame_num = rtp_packet->frame_num;
|
||||
sample_file_frame_->write((char *)&frame_info, sizeof(frame_info));
|
||||
}
|
||||
|
||||
g_free(decode_buff);
|
||||
|
@ -495,48 +438,44 @@ void RtpAudioStream::decodeAudio(QAudioDeviceInfo out_device)
|
|||
#define VISUAL_BUFF_BYTES (SAMPLE_BYTES * VISUAL_BUFF_LEN)
|
||||
void RtpAudioStream::decodeVisual()
|
||||
{
|
||||
guint64 read_bytes = 0;
|
||||
spx_uint32_t read_len = 0;
|
||||
gint32 read_buff_bytes = VISUAL_BUFF_BYTES;
|
||||
SAMPLE *read_buff = (SAMPLE *) g_malloc(read_buff_bytes);
|
||||
gint32 resample_buff_bytes = VISUAL_BUFF_BYTES;
|
||||
SAMPLE *resample_buff = (SAMPLE *) g_malloc(resample_buff_bytes);
|
||||
unsigned int sample_no = 0;
|
||||
rtp_frame_info frame_info;
|
||||
spx_uint32_t out_len;
|
||||
guint32 frame_num;
|
||||
rtp_frame_type type;
|
||||
|
||||
speex_resampler_set_rate(visual_resampler_, audio_out_rate_, visual_sample_rate_);
|
||||
|
||||
// Skip silence at begin of the stream
|
||||
sample_file_->seek(prepend_samples_ * SAMPLE_BYTES);
|
||||
sample_file_frame_->seek(0);
|
||||
|
||||
// Loop over every frame record
|
||||
while(sample_file_frame_->read((char *)&frame_info, sizeof(frame_info))) {
|
||||
// Resize buffer when needed
|
||||
if (frame_info.len > read_buff_bytes) {
|
||||
while ((frame_info.len > read_buff_bytes)) {
|
||||
read_buff_bytes *= 2;
|
||||
}
|
||||
read_buff = (SAMPLE *) g_realloc(read_buff, read_buff_bytes);
|
||||
}
|
||||
|
||||
read_bytes = sample_file_->read((char *)read_buff, frame_info.len);
|
||||
read_len = (spx_uint32_t)(read_bytes / SAMPLE_BYTES);
|
||||
// readFrameSamples() maintains size of buffer for us
|
||||
while (audio_file_->readFrameSamples(&read_buff_bytes, &read_buff, &read_len, &frame_num, &type)) {
|
||||
out_len = (spx_uint32_t)((read_len * visual_sample_rate_ ) / audio_out_rate_);
|
||||
|
||||
resample_buff = resizeBufferIfNeeded(resample_buff, &resample_buff_bytes, out_len * SAMPLE_BYTES);
|
||||
if (type == RTP_FRAME_AUDIO) {
|
||||
// We resample only audio samples
|
||||
resample_buff = resizeBufferIfNeeded(resample_buff, &resample_buff_bytes, out_len * SAMPLE_BYTES);
|
||||
|
||||
// Resample
|
||||
speex_resampler_process_int(visual_resampler_, 0, read_buff, &read_len, resample_buff, &out_len);
|
||||
// Resample
|
||||
speex_resampler_process_int(visual_resampler_, 0, read_buff, &read_len, resample_buff, &out_len);
|
||||
|
||||
// Create timestamp and visual sample
|
||||
for (unsigned i = 0; i < out_len; i++) {
|
||||
// Create timestamp and visual sample
|
||||
for (unsigned i = 0; i < out_len; i++) {
|
||||
double time = start_rel_time_ + (double) sample_no / visual_sample_rate_;
|
||||
packet_timestamps_[time] = frame_num;
|
||||
if (qAbs(resample_buff[i]) > max_sample_val_) max_sample_val_ = qAbs(resample_buff[i]);
|
||||
visual_samples_.append(resample_buff[i]);
|
||||
sample_no++;
|
||||
}
|
||||
} else {
|
||||
// Insert end of line mark
|
||||
double time = start_rel_time_ + (double) sample_no / visual_sample_rate_;
|
||||
packet_timestamps_[time] = frame_info.frame_num;
|
||||
if (qAbs(resample_buff[i]) > max_sample_val_) max_sample_val_ = qAbs(resample_buff[i]);
|
||||
visual_samples_.append(resample_buff[i]);
|
||||
sample_no++;
|
||||
packet_timestamps_[time] = frame_num;
|
||||
visual_samples_.append(SAMPLE_NaN);
|
||||
sample_no += out_len;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -572,7 +511,12 @@ const QVector<double> RtpAudioStream::visualSamples(int y_offset)
|
|||
QVector<double> adj_samples;
|
||||
double scaled_offset = y_offset * stack_offset_;
|
||||
for (int i = 0; i < visual_samples_.size(); i++) {
|
||||
adj_samples.append(((double)visual_samples_[i] * G_MAXINT16 / max_sample_val_used_) + scaled_offset);
|
||||
if (SAMPLE_NaN != visual_samples_[i]) {
|
||||
adj_samples.append(((double)visual_samples_[i] * G_MAXINT16 / max_sample_val_used_) + scaled_offset);
|
||||
} else {
|
||||
// Convert to break in graph line
|
||||
adj_samples.append(qQNaN());
|
||||
}
|
||||
}
|
||||
return adj_samples;
|
||||
}
|
||||
|
@ -763,14 +707,15 @@ bool RtpAudioStream::prepareForPlay(QAudioDeviceInfo out_device)
|
|||
start_pos = (qint64)(start_play_time_ * SAMPLE_BYTES * audio_out_rate_);
|
||||
// Round to SAMPLE_BYTES boundary
|
||||
start_pos = (start_pos / SAMPLE_BYTES) * SAMPLE_BYTES;
|
||||
size = sample_file_->size();
|
||||
size = audio_file_->sampleFileSize();
|
||||
if (stereo_required_) {
|
||||
// There is 2x more samples for stereo
|
||||
start_pos *= 2;
|
||||
size *= 2;
|
||||
}
|
||||
if (start_pos < size) {
|
||||
temp_file_ = new AudioRoutingFilter(sample_file_, stereo_required_, audio_routing_);
|
||||
audio_file_->setDataReadStage();
|
||||
temp_file_ = new AudioRoutingFilter(audio_file_, stereo_required_, audio_routing_);
|
||||
temp_file_->seek(start_pos);
|
||||
if (audio_output_) delete audio_output_;
|
||||
audio_output_ = new QAudioOutput(out_device, format, this);
|
||||
|
@ -827,18 +772,6 @@ void RtpAudioStream::stopPlaying()
|
|||
}
|
||||
}
|
||||
|
||||
void RtpAudioStream::writeSilence(qint64 samples)
|
||||
{
|
||||
if (samples < 1 || audio_out_rate_ == 0) return;
|
||||
|
||||
qint64 silence_bytes = samples * SAMPLE_BYTES;
|
||||
char *silence_buff = (char *) g_malloc0(silence_bytes);
|
||||
|
||||
RTP_STREAM_DEBUG("Writing " G_GUINT64_FORMAT " silence samples", samples);
|
||||
sample_file_->write(silence_buff, silence_bytes);
|
||||
g_free(silence_buff);
|
||||
}
|
||||
|
||||
void RtpAudioStream::outputStateChanged(QAudio::State new_state)
|
||||
{
|
||||
if (!audio_output_) return;
|
||||
|
@ -890,14 +823,14 @@ SAMPLE *RtpAudioStream::resizeBufferIfNeeded(SAMPLE *buff, gint32 *buff_bytes, q
|
|||
return buff;
|
||||
}
|
||||
|
||||
void RtpAudioStream::sampleFileSeek(qint64 samples)
|
||||
void RtpAudioStream::seekSample(qint64 samples)
|
||||
{
|
||||
sample_file_->seek(sizeof(SAMPLE) * samples);
|
||||
audio_file_->seekSample(samples);
|
||||
}
|
||||
|
||||
qint64 RtpAudioStream::sampleFileRead(SAMPLE *sample)
|
||||
qint64 RtpAudioStream::readSample(SAMPLE *sample)
|
||||
{
|
||||
return sample_file_->read((char *)sample, sizeof(SAMPLE));
|
||||
return audio_file_->readSample(sample);
|
||||
}
|
||||
|
||||
bool RtpAudioStream::savePayload(QIODevice *file)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <epan/address.h>
|
||||
#include <ui/rtp_stream.h>
|
||||
#include <ui/qt/utils/rtp_audio_routing.h>
|
||||
#include <ui/qt/utils/rtp_audio_file.h>
|
||||
#include <ui/rtp_media.h>
|
||||
|
||||
#include <QAudio>
|
||||
|
@ -34,14 +35,6 @@ class QAudioFormat;
|
|||
class QAudioOutput;
|
||||
class QIODevice;
|
||||
|
||||
struct _rtp_info;
|
||||
struct _rtp_sample;
|
||||
|
||||
// Structure used for storing frame num during visual waveform decoding
|
||||
typedef struct {
|
||||
qint64 len;
|
||||
guint32 frame_num;
|
||||
} rtp_frame_info;
|
||||
|
||||
class RtpAudioStream : public QObject
|
||||
{
|
||||
|
@ -149,10 +142,10 @@ public:
|
|||
void setStereoRequired(bool stereo_required) { stereo_required_ = stereo_required; }
|
||||
qint16 getMaxSampleValue() { return max_sample_val_; }
|
||||
void setMaxSampleValue(gint16 max_sample_val) { max_sample_val_used_ = max_sample_val; }
|
||||
void sampleFileSeek(qint64 samples);
|
||||
qint64 sampleFileRead(SAMPLE *sample);
|
||||
void seekSample(qint64 samples);
|
||||
qint64 readSample(SAMPLE *sample);
|
||||
qint64 getLeadSilenceSamples() { return prepend_samples_; }
|
||||
qint64 getTotalSamples() { return (sample_file_->size()/(qint64)sizeof(SAMPLE)); }
|
||||
qint64 getTotalSamples() { return (audio_file_->getTotalSamples()); }
|
||||
bool savePayload(QIODevice *file);
|
||||
guint getHash() { return rtpstream_id_to_hash(&(id_)); }
|
||||
rtpstream_id_t *getID() { return &(id_); }
|
||||
|
@ -172,8 +165,7 @@ private:
|
|||
bool first_packet_;
|
||||
|
||||
QVector<struct _rtp_packet *>rtp_packets_;
|
||||
QIODevice *sample_file_; // Stores waveform samples
|
||||
QIODevice *sample_file_frame_; // Stores rtp_packet_info per packet
|
||||
RtpAudioFile *audio_file_; // Stores waveform samples in sparse file
|
||||
QIODevice *temp_file_;
|
||||
struct _GHashTable *decoders_hash_;
|
||||
double global_start_rel_time_;
|
||||
|
@ -204,7 +196,6 @@ private:
|
|||
TimingMode timing_mode_;
|
||||
double start_play_time_;
|
||||
|
||||
void writeSilence(qint64 samples);
|
||||
const QString formatDescription(const QAudioFormat & format);
|
||||
QString currentOutputDevice();
|
||||
|
||||
|
|
|
@ -2121,6 +2121,22 @@ qint64 RtpPlayerDialog::saveAudioHeaderWAV(QFile *save_file, int channels, unsig
|
|||
return save_file->pos();
|
||||
}
|
||||
|
||||
bool RtpPlayerDialog::writeAudioSilenceSamples(QFile *out_file, qint64 samples, int stream_count)
|
||||
{
|
||||
uint8_t pd[2];
|
||||
|
||||
phton16(pd, 0x0000);
|
||||
for(int s=0; s < stream_count; s++) {
|
||||
for(qint64 i=0; i < samples; i++) {
|
||||
if (sizeof(SAMPLE) != out_file->write((char *)&pd, sizeof(SAMPLE))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RtpPlayerDialog::writeAudioStreamsSamples(QFile *out_file, QVector<RtpAudioStream *> streams, bool swap_bytes)
|
||||
{
|
||||
SAMPLE sample;
|
||||
|
@ -2133,7 +2149,7 @@ bool RtpPlayerDialog::writeAudioStreamsSamples(QFile *out_file, QVector<RtpAudio
|
|||
read = false;
|
||||
// Loop over all streams, read one sample from each, write to output
|
||||
foreach(RtpAudioStream *audio_stream, streams) {
|
||||
if (sizeof(sample) == audio_stream->sampleFileRead(&sample)) {
|
||||
if (sizeof(sample) == audio_stream->readSample(&sample)) {
|
||||
if (swap_bytes) {
|
||||
// same as phton16(), but more clear in compare
|
||||
// to else branch
|
||||
|
@ -2247,6 +2263,7 @@ void RtpPlayerDialog::saveAudio(bool sync_to_stream)
|
|||
{
|
||||
qint64 minSilenceSamples;
|
||||
qint64 startSample;
|
||||
qint64 lead_silence_samples;
|
||||
qint64 maxSample;
|
||||
QString path;
|
||||
QVector<RtpAudioStream *>streams;
|
||||
|
@ -2283,16 +2300,18 @@ void RtpPlayerDialog::saveAudio(bool sync_to_stream)
|
|||
}
|
||||
|
||||
if (sync_to_stream) {
|
||||
// Start of first stream
|
||||
// Skip start of first stream, no lead silence
|
||||
startSample = minSilenceSamples;
|
||||
lead_silence_samples = 0;
|
||||
} else {
|
||||
// Start of file
|
||||
// Full first stream, lead silence
|
||||
startSample = 0;
|
||||
lead_silence_samples = first_stream_rel_start_time_ * save_audio_rate;
|
||||
}
|
||||
|
||||
// Seek to correct start
|
||||
foreach(RtpAudioStream *audio_stream, streams) {
|
||||
audio_stream->sampleFileSeek(startSample);
|
||||
audio_stream->seekSample(startSample);
|
||||
}
|
||||
|
||||
QFile file(path);
|
||||
|
@ -2307,15 +2326,25 @@ void RtpPlayerDialog::saveAudio(bool sync_to_stream)
|
|||
QMessageBox::warning(this, tr("Error"), tr("Can't write header of AU file"));
|
||||
return;
|
||||
}
|
||||
if (lead_silence_samples > 0) {
|
||||
if (!writeAudioSilenceSamples(&file, lead_silence_samples, streams.count())) {
|
||||
QMessageBox::warning(this, tr("Warning"), tr("Save failed!"));
|
||||
}
|
||||
}
|
||||
if (!writeAudioStreamsSamples(&file, streams, true)) {
|
||||
QMessageBox::warning(this, tr("Warning"), tr("Save failed!"));
|
||||
}
|
||||
break;
|
||||
case save_audio_wav:
|
||||
if (-1 == saveAudioHeaderWAV(&file, streams.count(), save_audio_rate, (maxSample - startSample))) {
|
||||
if (-1 == saveAudioHeaderWAV(&file, streams.count(), save_audio_rate, (maxSample - startSample) + lead_silence_samples)) {
|
||||
QMessageBox::warning(this, tr("Error"), tr("Can't write header of WAV file"));
|
||||
return;
|
||||
}
|
||||
if (lead_silence_samples > 0) {
|
||||
if (!writeAudioSilenceSamples(&file, lead_silence_samples, streams.count())) {
|
||||
QMessageBox::warning(this, tr("Warning"), tr("Save failed!"));
|
||||
}
|
||||
}
|
||||
if (!writeAudioStreamsSamples(&file, streams, false)) {
|
||||
QMessageBox::warning(this, tr("Warning"), tr("Save failed!"));
|
||||
}
|
||||
|
|
|
@ -241,6 +241,7 @@ private:
|
|||
|
||||
qint64 saveAudioHeaderAU(QFile *save_file, int channels, unsigned audio_rate);
|
||||
qint64 saveAudioHeaderWAV(QFile *save_file, int channels, unsigned audio_rate, qint64 samples);
|
||||
bool writeAudioSilenceSamples(QFile *out_file, qint64 samples, int stream_count);
|
||||
bool writeAudioStreamsSamples(QFile *out_file, QVector<RtpAudioStream *> streams, bool swap_bytes);
|
||||
save_audio_t selectFileAudioFormatAndName(QString *file_path);
|
||||
save_payload_t selectFilePayloadFormatAndName(QString *file_path);
|
||||
|
|
|
@ -0,0 +1,360 @@
|
|||
/* rtp_audio_file.cpp
|
||||
*
|
||||
* Wireshark - Network traffic analyzer
|
||||
* By Gerald Combs <gerald@wireshark.org>
|
||||
* Copyright 1998 Gerald Combs
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
/*
|
||||
* RTP samples are stored in "sparse" file. File knows where are silence gaps
|
||||
* and they are handled special way (not stored).
|
||||
*
|
||||
* File uses Frame as piece of information. One Frame match audio of one
|
||||
* decoded packet or audio silence in between them. Frame holds information
|
||||
* about frame type (audio/silence), its length and realtime position and
|
||||
* sample possition (where decoded audio is really stored, with gaps omitted).
|
||||
*
|
||||
* There are three stages of the object use
|
||||
* - writing data by frames during decoding of the stream
|
||||
* - reading data by frames during creating the visual waveform
|
||||
* - reading data by bytes/samples during audio play or audio save
|
||||
*
|
||||
* There is no stage indication in the object, but there are different calls
|
||||
* used by the code. For last stage the object looks like QIODevice therefore
|
||||
* any read of it looks like reading of sequence of bytes.
|
||||
*
|
||||
* If audio starts later than start of the file, first Frame contains silence
|
||||
* record. It is leaved out at some cases.
|
||||
*/
|
||||
|
||||
#include "rtp_audio_file.h"
|
||||
#include <ws_attributes.h>
|
||||
|
||||
RtpAudioFile::RtpAudioFile():
|
||||
real_pos_(0)
|
||||
, real_size_(0)
|
||||
, sample_pos_(0)
|
||||
, sample_size_(0)
|
||||
{
|
||||
// ReadOnly because we write different way
|
||||
QIODevice::open(QIODevice::ReadOnly);
|
||||
QString tempname = QString("%1/wireshark_rtp_stream").arg(QDir::tempPath());
|
||||
sample_file_ = new QTemporaryFile(tempname, this);
|
||||
if (!sample_file_->open(QIODevice::ReadWrite)) {
|
||||
// We are out of file resources
|
||||
delete sample_file_;
|
||||
qWarning() << "Can't create temp file in " << tempname;
|
||||
throw -1;
|
||||
}
|
||||
sample_file_frame_ = new QBuffer(this);
|
||||
if (!sample_file_frame_->open(QIODevice::ReadWrite)) {
|
||||
// We are out of file resources
|
||||
delete sample_file_;
|
||||
delete sample_file_frame_;
|
||||
qWarning() << "Can't create temp file in memory";
|
||||
throw -1;
|
||||
}
|
||||
}
|
||||
|
||||
RtpAudioFile::~RtpAudioFile()
|
||||
{
|
||||
if (sample_file_) delete sample_file_;
|
||||
if (sample_file_frame_) delete sample_file_frame_;
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions for writing Frames
|
||||
*/
|
||||
void RtpAudioFile::setFrameWriteStage()
|
||||
{
|
||||
sample_file_->seek(0);
|
||||
sample_file_frame_->seek(0);
|
||||
real_pos_ = 0;
|
||||
real_size_ = 0;
|
||||
sample_pos_ = 0;
|
||||
sample_size_ = 0;
|
||||
}
|
||||
|
||||
void RtpAudioFile::frameUpdateRealCounters(qint64 written_bytes)
|
||||
{
|
||||
if (real_pos_ < real_size_) {
|
||||
// We are writing before end, calculate if we are over real_size_
|
||||
qint64 diff = real_pos_ + written_bytes - real_size_;
|
||||
|
||||
if (diff > 0) {
|
||||
// Update size
|
||||
real_size_ += diff;
|
||||
}
|
||||
} else {
|
||||
real_size_ += written_bytes;
|
||||
}
|
||||
real_pos_ += written_bytes;
|
||||
}
|
||||
|
||||
void RtpAudioFile::frameUpdateSampleCounters(qint64 written_bytes)
|
||||
{
|
||||
if (sample_pos_ < sample_size_) {
|
||||
// We are writing before end, calculate if we are over sample_size_
|
||||
qint64 diff = sample_pos_ + written_bytes - sample_size_;
|
||||
|
||||
if (diff > 0) {
|
||||
// Update size
|
||||
sample_size_ += diff;
|
||||
}
|
||||
} else {
|
||||
sample_size_ += written_bytes;
|
||||
}
|
||||
sample_pos_ += written_bytes;
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::frameWriteFrame(guint32 frame_num, qint64 real_pos, qint64 sample_pos, qint64 len, rtp_frame_type type)
|
||||
{
|
||||
rtp_frame_info frame_info;
|
||||
|
||||
frame_info.real_pos = real_pos;
|
||||
frame_info.sample_pos = sample_pos;
|
||||
frame_info.len = len;
|
||||
frame_info.frame_num = frame_num;
|
||||
frame_info.type = type;
|
||||
|
||||
return sample_file_frame_->write((char *)&frame_info, sizeof(frame_info));
|
||||
}
|
||||
|
||||
void RtpAudioFile::frameWriteSilence(guint32 frame_num, qint64 samples)
|
||||
{
|
||||
if (samples < 1) return;
|
||||
|
||||
qint64 silence_bytes = samples * SAMPLE_BYTES;
|
||||
|
||||
frameWriteFrame(frame_num, real_pos_, sample_pos_, silence_bytes, RTP_FRAME_SILENCE);
|
||||
frameUpdateRealCounters(silence_bytes);
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::frameWriteSamples(guint32 frame_num, const char *data, qint64 max_size)
|
||||
{
|
||||
gint64 written;
|
||||
|
||||
written = sample_file_->write(data, max_size);
|
||||
|
||||
if (written != -1) {
|
||||
frameWriteFrame(frame_num, real_pos_, sample_pos_, written, RTP_FRAME_AUDIO);
|
||||
frameUpdateRealCounters(written);
|
||||
frameUpdateSampleCounters(written);
|
||||
}
|
||||
|
||||
return written;
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions for reading Frames
|
||||
*/
|
||||
|
||||
void RtpAudioFile::setFrameReadStage(qint64 prepend_samples)
|
||||
{
|
||||
sample_file_frame_->seek(0);
|
||||
if (prepend_samples > 0) {
|
||||
// Skip first frame which contains openning silence
|
||||
sample_file_frame_->read((char *)&cur_frame_, sizeof(cur_frame_));
|
||||
}
|
||||
}
|
||||
|
||||
bool RtpAudioFile::readFrameSamples(gint32 *read_buff_bytes, SAMPLE **read_buff, spx_uint32_t *read_len, guint32 *frame_num, rtp_frame_type *type)
|
||||
{
|
||||
rtp_frame_info frame_info;
|
||||
guint64 read_bytes = 0;
|
||||
|
||||
if (!sample_file_frame_->read((char *)&frame_info, sizeof(frame_info))) {
|
||||
// Can't read frame, some error occured
|
||||
return false;
|
||||
}
|
||||
|
||||
*frame_num = frame_info.frame_num;
|
||||
*type = frame_info.type;
|
||||
|
||||
if (frame_info.type == RTP_FRAME_AUDIO) {
|
||||
// Resize buffer when needed
|
||||
if (frame_info.len > *read_buff_bytes) {
|
||||
while ((frame_info.len > *read_buff_bytes)) {
|
||||
*read_buff_bytes *= 2;
|
||||
}
|
||||
*read_buff = (SAMPLE *) g_realloc(*read_buff, *read_buff_bytes);
|
||||
}
|
||||
|
||||
sample_file_->seek(frame_info.sample_pos);
|
||||
read_bytes = sample_file_->read((char *)*read_buff, frame_info.len);
|
||||
} else {
|
||||
// For silence we do nothing
|
||||
read_bytes = frame_info.len;
|
||||
}
|
||||
|
||||
*read_len = (spx_uint32_t)(read_bytes / SAMPLE_BYTES);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions for reading data during play
|
||||
*/
|
||||
void RtpAudioFile::setDataReadStage()
|
||||
{
|
||||
sample_file_frame_->seek(0);
|
||||
sample_file_frame_->read((char *)&cur_frame_, sizeof(cur_frame_));
|
||||
real_pos_ = cur_frame_.real_pos;
|
||||
}
|
||||
|
||||
bool RtpAudioFile::open(QIODevice::OpenMode mode)
|
||||
{
|
||||
if (mode == QIODevice::ReadOnly) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::size() const
|
||||
{
|
||||
return real_size_;
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::pos() const
|
||||
{
|
||||
return real_pos_;
|
||||
}
|
||||
|
||||
/*
|
||||
* Seek starts from beginning of Frames and search one where offset belongs
|
||||
* to. It looks inefficient, but seek is used usually just to jump to 0 or
|
||||
* to skip first Frame where silence is stored.
|
||||
*/
|
||||
bool RtpAudioFile::seek(qint64 off)
|
||||
{
|
||||
if (real_size_ <= off) {
|
||||
// Can't seek above end of file
|
||||
return false;
|
||||
}
|
||||
|
||||
// Search for correct offset from first frame
|
||||
sample_file_frame_->seek(0);
|
||||
while (1) {
|
||||
// Read frame
|
||||
if (!sample_file_frame_->read((char *)&cur_frame_, sizeof(cur_frame_))) {
|
||||
// Can't read frame, some error occured
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((cur_frame_.real_pos + cur_frame_.len) > off) {
|
||||
// We found correct frame
|
||||
// Calculate offset in frame
|
||||
qint64 diff = off - cur_frame_.real_pos;
|
||||
qint64 new_real_pos = cur_frame_.real_pos + diff;
|
||||
qint64 new_sample_pos = cur_frame_.sample_pos + diff;
|
||||
|
||||
if (cur_frame_.type == RTP_FRAME_AUDIO) {
|
||||
// For audio frame we should to seek to correct place
|
||||
if (!sample_file_->seek(new_sample_pos)) {
|
||||
return false;
|
||||
}
|
||||
// Real seek was successful
|
||||
real_pos_ = new_real_pos;
|
||||
return true;
|
||||
} else {
|
||||
// For silence frame we blindly confirm it
|
||||
real_pos_ = new_real_pos;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::sampleFileSize()
|
||||
{
|
||||
return real_size_;
|
||||
}
|
||||
|
||||
void RtpAudioFile::seekSample(qint64 samples)
|
||||
{
|
||||
seek(sizeof(SAMPLE) * samples);
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::readFrameData(char *data , qint64 want_read)
|
||||
{
|
||||
// Calculate remaining data in frame
|
||||
qint64 remaining = cur_frame_.len - (real_pos_ - cur_frame_.real_pos);
|
||||
qint64 was_read;
|
||||
|
||||
if (remaining < want_read) {
|
||||
// Incorrect call, can't read more than is stored in frame
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (cur_frame_.type == RTP_FRAME_AUDIO) {
|
||||
was_read = sample_file_->read(data, want_read);
|
||||
real_pos_ += was_read;
|
||||
} else {
|
||||
memset(data, 0, want_read);
|
||||
real_pos_ += want_read;
|
||||
was_read = want_read;
|
||||
}
|
||||
|
||||
return was_read;
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::readSample(SAMPLE *sample)
|
||||
{
|
||||
return read((char *)sample, sizeof(SAMPLE));
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::getTotalSamples()
|
||||
{
|
||||
return (real_size_/(qint64)sizeof(SAMPLE));
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::readData(char *data, qint64 maxSize)
|
||||
{
|
||||
qint64 to_read = maxSize;
|
||||
qint64 can_read;
|
||||
qint64 was_read = 0;
|
||||
qint64 remaining;
|
||||
|
||||
while (1) {
|
||||
// Calculate remaining data in frame
|
||||
remaining = cur_frame_.len - (real_pos_ - cur_frame_.real_pos);
|
||||
if (remaining > to_read) {
|
||||
// Even we want to read more, we can read just till end of frame
|
||||
can_read = to_read;
|
||||
} else {
|
||||
can_read = remaining;
|
||||
}
|
||||
if (can_read==readFrameData(data, can_read)) {
|
||||
to_read -= can_read;
|
||||
data += can_read;
|
||||
was_read += can_read;
|
||||
if (real_pos_ >= cur_frame_.real_pos + cur_frame_.len) {
|
||||
// We exhausted the frame, read next one
|
||||
if (!sample_file_frame_->read((char *)&cur_frame_, sizeof(cur_frame_))) {
|
||||
// We are at the end of the file
|
||||
return was_read;
|
||||
}
|
||||
if ((cur_frame_.type == RTP_FRAME_AUDIO) && (!sample_file_->seek(cur_frame_.sample_pos))) {
|
||||
// We tried to seek to correct place, but it failed
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (to_read == 0) {
|
||||
return was_read;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qint64 RtpAudioFile::writeData(const char *data _U_, qint64 maxSize _U_)
|
||||
{
|
||||
// Writing is not supported
|
||||
return -1;
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
/* rtp_audio_file.h
|
||||
*
|
||||
* Wireshark - Network traffic analyzer
|
||||
* By Gerald Combs <gerald@wireshark.org>
|
||||
* Copyright 1998 Gerald Combs
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef RTP_AUDIO_FILE_H
|
||||
#define RTP_AUDIO_FILE_H
|
||||
|
||||
#include "config.h"
|
||||
#include <ui/rtp_media.h>
|
||||
|
||||
#ifdef HAVE_SPEEXDSP
|
||||
#include <speex/speex_resampler.h>
|
||||
#else
|
||||
#include "../../speexdsp/speex_resampler.h"
|
||||
#endif /* HAVE_SPEEXDSP */
|
||||
|
||||
#include <QIODevice>
|
||||
#include <QDir>
|
||||
#include <QTemporaryFile>
|
||||
#include <QDebug>
|
||||
#include <QBuffer>
|
||||
|
||||
struct _rtp_info;
|
||||
|
||||
typedef enum {
|
||||
RTP_FRAME_AUDIO = 0,
|
||||
RTP_FRAME_SILENCE
|
||||
} rtp_frame_type;
|
||||
|
||||
// Structure used for storing frame num during visual waveform decoding
|
||||
typedef struct {
|
||||
qint64 real_pos;
|
||||
qint64 sample_pos;
|
||||
qint64 len;
|
||||
guint32 frame_num;
|
||||
rtp_frame_type type;
|
||||
} rtp_frame_info;
|
||||
|
||||
|
||||
class RtpAudioFile: public QIODevice
|
||||
{
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
explicit RtpAudioFile();
|
||||
~RtpAudioFile();
|
||||
|
||||
// Functions for writing Frames
|
||||
void setFrameWriteStage();
|
||||
void frameWriteSilence(guint32 frame_num, qint64 samples);
|
||||
qint64 frameWriteSamples(guint32 frame_num, const char *data, qint64 max_size);
|
||||
|
||||
// Functions for reading Frames
|
||||
void setFrameReadStage(qint64 prepend_samples);
|
||||
bool readFrameSamples(gint32 *read_buff_bytes, SAMPLE **read_buff, spx_uint32_t *read_len, guint32 *frame_num, rtp_frame_type *type);
|
||||
|
||||
// Functions for reading data during play
|
||||
void setDataReadStage();
|
||||
bool open(QIODevice::OpenMode mode);
|
||||
qint64 size() const override;
|
||||
qint64 pos() const override;
|
||||
bool seek(qint64 off) override;
|
||||
qint64 sampleFileSize();
|
||||
void seekSample(qint64 samples);
|
||||
qint64 readSample(SAMPLE *sample);
|
||||
qint64 getTotalSamples();
|
||||
|
||||
protected:
|
||||
// Functions for reading data during play
|
||||
qint64 readData(char *data, qint64 maxSize) override;
|
||||
qint64 writeData(const char *data, qint64 maxSize) override;
|
||||
|
||||
private:
|
||||
QIODevice *sample_file_; // Stores waveform samples
|
||||
QIODevice *sample_file_frame_; // Stores rtp_packet_info per packet
|
||||
qint64 real_pos_;
|
||||
qint64 real_size_;
|
||||
qint64 sample_pos_;
|
||||
qint64 sample_size_;
|
||||
rtp_frame_info cur_frame_;
|
||||
|
||||
// Functions for writing Frames
|
||||
qint64 frameWriteFrame(guint32 frame_num, qint64 real_pos, qint64 sample_pos, qint64 len, rtp_frame_type type);
|
||||
void frameUpdateRealCounters(qint64 written_bytes);
|
||||
void frameUpdateSampleCounters(qint64 written_bytes);
|
||||
|
||||
// Functions for reading Frames
|
||||
|
||||
// Functions for reading data during play
|
||||
qint64 readFrameData(char *data , qint64 want_read);
|
||||
};
|
||||
|
||||
#endif // RTP_AUDIO_FILE_H
|
|
@ -33,6 +33,7 @@ extern "C" {
|
|||
typedef gint16 SAMPLE;
|
||||
#define SAMPLE_MAX G_MAXINT16
|
||||
#define SAMPLE_MIN G_MININT16
|
||||
#define SAMPLE_NaN SAMPLE_MIN
|
||||
#define SAMPLE_BYTES (sizeof(SAMPLE) / sizeof(char))
|
||||
|
||||
/* Defines an RTP packet */
|
||||
|
|
Loading…
Reference in New Issue