Impose limits on the number of records we read.

Start the limit at 2^32-1, as we use a guint32 to store the frame
number.

With Qt prior to Qt 6, lower the limit to 53 million packets; this
should fix issue #16908.
This commit is contained in:
Guy Harris 2020-10-26 16:00:40 -07:00
parent a88d72dc8e
commit 639891651f
3 changed files with 75 additions and 1 deletions

40
file.c
View File

@ -116,6 +116,23 @@ static void ref_time_packets(capture_file *cf);
/* Show the progress bar after this many seconds. */
#define PROGBAR_SHOW_DELAY 0.5
/*
* Maximum number of records we support in a file.
*
* It is, at most, the maximum value of a guint32, as we use a guint32
* for the frame number.
*
* We allow it to be set to a lower value; see issue #16908 for why
* we're doing this. Thanks, Qt!
*/
static guint32 max_records = G_MAXUINT32;
void
cf_set_max_records(guint max_records_arg)
{
max_records = max_records_arg;
}
/*
* We could probably use g_signal_...() instead of the callbacks below but that
* would require linking our CLI programs to libgobject and creating an object
@ -482,6 +499,7 @@ cf_read(capture_file *cf, gboolean reloading)
{
int err = 0;
gchar *err_info = NULL;
volatile gboolean too_many_records = FALSE;
gchar *name_ptr;
progdlg_t *volatile progbar = NULL;
GTimer *prog_timer = g_timer_new();
@ -569,7 +587,7 @@ cf_read(capture_file *cf, gboolean reloading)
ws_buffer_init(&buf, 1514);
TRY {
int count = 0;
guint32 count = 0;
gint64 file_pos;
gint64 data_offset;
@ -580,6 +598,14 @@ cf_read(capture_file *cf, gboolean reloading)
while ((wtap_read(cf->provider.wth, &rec, &buf, &err, &err_info,
&data_offset))) {
if (size >= 0) {
if (cf->count == max_records) {
/*
* Quit if we've already read the maximum number of
* records allowed.
*/
too_many_records = TRUE;
break;
}
count++;
file_pos = wtap_read_so_far(cf->provider.wth);
@ -732,6 +758,18 @@ cf_read(capture_file *cf, gboolean reloading)
if any. */
cfile_read_failure_alert_box(NULL, err, err_info);
return CF_READ_ERROR;
} else if (too_many_records) {
simple_message_box(ESD_TYPE_WARN, NULL,
"The remaining packets in the file were discarded.\n"
"\n"
"As a lot of packets from the original file will be missing,\n"
"remember to be careful when saving the current content to a file.\n"
"\n"
"The command-line utility editcap can be used to split "
"the file into multiple smaller files",
"The file contains more records than the maximum "
"supported number of records, %u.", max_records);
return CF_READ_ERROR;
} else
return CF_READ_OK;
}

8
file.h
View File

@ -80,6 +80,14 @@ typedef struct {
field_info *finfo;
} match_data;
/**
* Set maximum number of records per capture file.
*
* @param max_records maximum number of records to support.
*/
extern void
cf_set_max_records(guint max_records);
/**
* Add a capture file event callback.
*

View File

@ -407,6 +407,34 @@ int main(int argc, char *qt_argv[])
/* Start time in microseconds */
guint64 start_time = g_get_monotonic_time();
#if QT_VERSION < QT_VERSION_CHECK(6, 0, 0)
/*
* See:
*
* issue #16908;
*
* https://doc.qt.io/qt-5/qvector.html#maximum-size-and-out-of-memory-conditions
*
* https://forum.qt.io/topic/114950/qvector-realloc-throwing-sigsegv-when-very-large-surface3d-is-rendered
*
* for why we're doing this; the widget we use for the packet list
* uses QVector, so those limitations apply to it.
*
* Apparently, this will be fixed in Qt 6:
*
* https://github.com/qt/qtbase/commit/215ca735341b9487826023a7983382851ce8bf26
*
* https://github.com/qt/qtbase/commit/2a6cdec718934ca2cc7f6f9c616ebe62f6912123#diff-724f419b0bb0487c2629bb16cf534c4b268ddcee89b5177189b607f940cfd83dR192
*
* Hopefully QList won't cause any performance hits relative to
* QVector.
*
* We pick 53 million records as a value that should avoid the problem;
* see the Wireshark issue for why that value was chosen.
*/
cf_set_max_records(53000000);
#endif
/* Enable destinations for logging earlier in startup */
set_console_log_handler();
qInstallMessageHandler(g_log_message_handler);