RTP Player: Export of audio can start at position of play cursor

Added additional option for audio exporting 'From cursor'.
Documentation updated.
This commit is contained in:
Jirka Novak 2021-04-27 22:01:16 +02:00 committed by Wireshark GitLab Utility
parent ebdb6784fa
commit 21b334fd82
4 changed files with 64 additions and 18 deletions

View File

@ -510,12 +510,13 @@ Wireshark is able to export decoded audio in .au or .wav file format. Prior to v
Export options available:
* for one or more selected non-muted streams
** Stream Synchronized Audio - streams are synchronized to earliest stream in export (there is no silence at beginning of it)
** File Synchronized Audio - streams starts at beginning of file, therefore silence can be at start of file
** From cursor - Streams are saved from play start cursor. If some streams are shorter, they are removed from the list before save and count of saved streams is lower than count of selected streams.
** Stream Synchronized Audio - File starts at the begin of earliest stream in export, therefore there is no silence at beginning of exported file.
** File Synchronized Audio - Streams starts at beginning of file, therefore silence can be at start of file.
* for just one selected stream
** Payload - just payload with no information about coded is stored in the file
Audio is exported as multi-channel file - one channel per RTP stream. One or two channels are equal to mono or stereo, but Wireshark can export e g. 100 channels. For later playing a tool with multi-channel support must be used (e.g. https://www.audacityteam.org/).
Audio is exported as multi-channel file - one channel per RTP stream. One or two channels are equal to mono or stereo, but Wireshark can export e.g. 100 channels. For playing a tool with multi-channel support must be used (e.g. https://www.audacityteam.org/).
Export of payload function is useful for codecs not supported by Wireshark.

View File

@ -2260,7 +2260,7 @@ QVector<RtpAudioStream *>RtpPlayerDialog::getSelectedAudibleNonmutedAudioStreams
return streams;
}
void RtpPlayerDialog::saveAudio(bool sync_to_stream)
void RtpPlayerDialog::saveAudio(save_mode_t save_mode)
{
qint64 minSilenceSamples;
qint64 startSample;
@ -2300,19 +2300,43 @@ void RtpPlayerDialog::saveAudio(bool sync_to_stream)
}
}
if (sync_to_stream) {
// Skip start of first stream, no lead silence
startSample = minSilenceSamples;
lead_silence_samples = 0;
} else {
// Full first stream, lead silence
startSample = 0;
lead_silence_samples = first_stream_rel_start_time_ * save_audio_rate;
switch (save_mode) {
case save_mode_from_cursor:
if (ui->todCheckBox->isChecked()) {
startSample = start_marker_time_ * save_audio_rate;
} else {
startSample = (start_marker_time_ - first_stream_rel_start_time_) * save_audio_rate;
}
lead_silence_samples = 0;
break;
case save_mode_sync_stream:
// Skip start of first stream, no lead silence
startSample = minSilenceSamples;
lead_silence_samples = 0;
break;
case save_mode_sync_file:
default:
// Full first stream, lead silence
startSample = 0;
lead_silence_samples = first_stream_rel_start_time_ * save_audio_rate;
break;
}
// Seek to correct start
foreach(RtpAudioStream *audio_stream, streams) {
audio_stream->seekSample(startSample);
QVector<RtpAudioStream *>temp = QVector<RtpAudioStream *>(streams);
// Remove streams shorter than startSample and
// seek to correct start for longer ones
foreach(RtpAudioStream *audio_stream, temp) {
if (startSample > audio_stream->getTotalSamples()) {
streams.removeAll(audio_stream);
} else {
audio_stream->seekSample(startSample);
}
}
if (streams.count() < 1) {
QMessageBox::warning(this, tr("Warning"), tr("No streams are suitable for save"));
return;
}
QFile file(path);
@ -2390,14 +2414,19 @@ void RtpPlayerDialog::savePayload()
file.close();
}
void RtpPlayerDialog::on_actionSaveAudioFromCursor_triggered()
{
saveAudio(save_mode_from_cursor);
}
void RtpPlayerDialog::on_actionSaveAudioSyncStream_triggered()
{
saveAudio(true);
saveAudio(save_mode_sync_stream);
}
void RtpPlayerDialog::on_actionSaveAudioSyncFile_triggered()
{
saveAudio(false);
saveAudio(save_mode_sync_file);
}
void RtpPlayerDialog::on_actionSavePayload_triggered()

View File

@ -48,6 +48,12 @@ typedef enum {
save_payload_data
} save_payload_t;
typedef enum {
save_mode_from_cursor,
save_mode_sync_stream,
save_mode_sync_file
} save_mode_t;
class RtpPlayerDialog : public WiresharkDialog
{
Q_OBJECT
@ -166,6 +172,7 @@ private slots:
void outputNotify();
void on_actionPlay_triggered();
void on_actionStop_triggered();
void on_actionSaveAudioFromCursor_triggered();
void on_actionSaveAudioSyncStream_triggered();
void on_actionSaveAudioSyncFile_triggered();
void on_actionSavePayload_triggered();
@ -246,7 +253,7 @@ private:
save_audio_t selectFileAudioFormatAndName(QString *file_path);
save_payload_t selectFilePayloadFormatAndName(QString *file_path);
QVector<RtpAudioStream *>getSelectedAudibleNonmutedAudioStreams();
void saveAudio(bool sync_to_stream);
void saveAudio(save_mode_t save_mode);
void savePayload();
void lockUI();
void unlockUI();

View File

@ -343,11 +343,20 @@
<property name="toolTipsVisible">
<bool>true</bool>
</property>
<addaction name="actionSaveAudioFromCursor"/>
<addaction name="actionSaveAudioSyncStream"/>
<addaction name="actionSaveAudioSyncFile"/>
<addaction name="separator"/>
<addaction name="actionSavePayload"/>
</widget>
<action name="actionSaveAudioFromCursor">
<property name="text">
<string>From &amp;cursor</string>
</property>
<property name="toolTip">
<string>Save audio data started at the cursor</string>
</property>
</action>
<action name="actionSaveAudioSyncStream">
<property name="text">
<string>&amp;Stream Synchronized Audio</string>