Prime the epan_dissect_t with postdissector wanted fields if necessary.

This makes sure that postdissectors that indicate that they need certain
fields in the first pass will get them.

While we're at it:

Fix the field-fetching code in TRANSUM not to assume it got any
instances of the field being fetched.

Rename process_packet_first_pass() in sharkd to process_packet(), as
it's the only routine in sharkd that processes packets.

Rename process_packet() in tshark and tfshark to
process_packet_single_pass(), as it's what's used if we're only doing
one-pass analysis.

Clean up comments and whitespace.

Change-Id: I3769af952c66f5ca4b68002ad6213858ab9cab9b
Reviewed-on: https://code.wireshark.org/review/21063
Reviewed-by: Guy Harris <guy@alum.mit.edu>
This commit is contained in:
Guy Harris 2017-04-12 13:52:07 -07:00
parent 74f9b279e9
commit 847c25c5a7
9 changed files with 235 additions and 194 deletions

View File

@ -541,6 +541,17 @@ epan_dissect_prime_with_hfid(epan_dissect_t *edt, int hfid)
proto_tree_prime_with_hfid(edt->tree, hfid);
}
void
epan_dissect_prime_with_hfid_array(epan_dissect_t *edt, GArray *hfids)
{
guint i;
for (i = 0; i < hfids->len; i++) {
proto_tree_prime_with_hfid(edt->tree,
g_array_index(hfids, int, i));
}
}
/* ----------------------- */
const gchar *
epan_custom_set(epan_dissect_t *edt, GSList *field_ids,

View File

@ -225,6 +225,11 @@ WS_DLL_PUBLIC
void
epan_dissect_prime_with_hfid(epan_dissect_t *edt, int hfid);
/** Prime an epan_dissect_t's proto_tree with a set of fields/protocols specified by their hfids in a GArray */
WS_DLL_PUBLIC
void
epan_dissect_prime_with_hfid_array(epan_dissect_t *edt, GArray *hfids);
/** fill the dissect run output into the packet list columns */
WS_DLL_PUBLIC
void

View File

@ -3350,6 +3350,25 @@ postdissectors_want_fields(void)
return FALSE;
}
void
prime_epan_dissect_with_postdissector_wanted_fields(epan_dissect_t *edt)
{
guint i;
if (postdissectors == NULL) {
/*
* No postdissector expressed an interest in any fields.
*/
return;
}
for (i = 0; i < postdissectors->len; i++) {
if (POSTDISSECTORS(i).wanted_fields != NULL &&
POSTDISSECTORS(i).wanted_fields->len != 0)
epan_dissect_prime_with_hfid_array(edt,
POSTDISSECTORS(i).wanted_fields);
}
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*

View File

@ -25,6 +25,7 @@
#include "proto.h"
#include "tvbuff.h"
#include "epan.h"
#include "value_string.h"
#include "frame_data.h"
#include "packet_info.h"
@ -803,12 +804,15 @@ extern void call_all_postdissectors(tvbuff_t *tvb, packet_info *pinfo, proto_tre
/*
* Return TRUE if at least one postdissector wants fields, FALSE otherwise.
* XXX - at some point this should return a bag of all fields requested by
* all postdissectors, so we can prime the epan_dissect_t with them rather
* than constructing a bogus tap with a bogus filter.
*/
WS_DLL_PUBLIC gboolean postdissectors_want_fields(void);
/*
* Prime an epan_dissect_t with all the fields wanted by postdissectors.
*/
WS_DLL_PUBLIC void
prime_epan_dissect_with_postdissector_wanted_fields(epan_dissect_t *edt);
/** @} */
#ifdef __cplusplus

6
file.c
View File

@ -1163,6 +1163,12 @@ add_packet_to_packet_list(frame_data *fdata, capture_file *cf,
}
#endif
if (fdata->flags.visited) {
/* This is the first pass, so prime the epan_dissect_t with the
fields postdissectors want on the first pass. */
prime_epan_dissect_with_postdissector_wanted_fields(edt);
}
/* Dissect the frame. */
epan_dissect_run_with_taps(edt, cf->cd_t, phdr, frame_tvbuff_new(fdata, buf), fdata, cinfo);

View File

@ -157,46 +157,44 @@ int decode_smb(packet_info *pinfo _U_, proto_tree *tree, PKT_INFO* pkt_info, PKT
else
pkt_info->rrpd.c2s = FALSE;
extract_uint(tree, hf_of_interest[HF_INTEREST_SMB_MID].hf, field_uint, &field_value_count);
if (field_value_count)
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_SMB_MID].hf, field_uint, &field_value_count))
{
pkt_info->rrpd.calculation = RTE_CALC_SMB1;
pkt_info->pkt_of_interest = FALSE; /* can't process SMB1 at the moment */
return 0;
}
else
{
/* Default in case we don't have header information */
pkt_info->rrpd.session_id = 0;
pkt_info->rrpd.msg_id = 0;
pkt_info->rrpd.suffix = 1;
pkt_info->rrpd.decode_based = TRUE;
pkt_info->rrpd.calculation = RTE_CALC_SMB2;
pkt_info->pkt_of_interest = TRUE;
extract_ui64(tree, hf_of_interest[HF_INTEREST_SMB2_MSG_ID].hf, msg_id, &msg_id_count);
if (msg_id_count) /* test for header information */
if (field_value_count)
{
extract_ui64(tree, hf_of_interest[HF_INTEREST_SMB2_SES_ID].hf, ses_id, &ses_id_count);
for (size_t i = 0; (i < msg_id_count) && (i < MAX_SUBPKTS_PER_PACKET); i++)
{
subpackets[i].rrpd.c2s = pkt_info->rrpd.c2s;
subpackets[i].rrpd.ip_proto = pkt_info->rrpd.ip_proto;
subpackets[i].rrpd.stream_no = pkt_info->rrpd.stream_no;
subpackets[i].rrpd.session_id = ses_id[i];
subpackets[i].rrpd.msg_id = msg_id[i];
subpackets[i].rrpd.suffix = 1;
subpackets[i].rrpd.decode_based = TRUE;
subpackets[i].rrpd.calculation = RTE_CALC_SMB2;
subpackets[i].pkt_of_interest = TRUE;
}
return (int)msg_id_count;
pkt_info->rrpd.calculation = RTE_CALC_SMB1;
pkt_info->pkt_of_interest = FALSE; /* can't process SMB1 at the moment */
return 0;
}
}
/* Default in case we don't have header information */
pkt_info->rrpd.session_id = 0;
pkt_info->rrpd.msg_id = 0;
pkt_info->rrpd.suffix = 1;
pkt_info->rrpd.decode_based = TRUE;
pkt_info->rrpd.calculation = RTE_CALC_SMB2;
pkt_info->pkt_of_interest = TRUE;
extract_ui64(tree, hf_of_interest[HF_INTEREST_SMB2_MSG_ID].hf, msg_id, &msg_id_count);
if (msg_id_count) /* test for header information */
{
extract_ui64(tree, hf_of_interest[HF_INTEREST_SMB2_SES_ID].hf, ses_id, &ses_id_count);
for (size_t i = 0; (i < msg_id_count) && (i < MAX_SUBPKTS_PER_PACKET); i++)
{
subpackets[i].rrpd.c2s = pkt_info->rrpd.c2s;
subpackets[i].rrpd.ip_proto = pkt_info->rrpd.ip_proto;
subpackets[i].rrpd.stream_no = pkt_info->rrpd.stream_no;
subpackets[i].rrpd.session_id = ses_id[i];
subpackets[i].rrpd.msg_id = msg_id[i];
subpackets[i].rrpd.suffix = 1;
subpackets[i].rrpd.decode_based = TRUE;
subpackets[i].rrpd.calculation = RTE_CALC_SMB2;
subpackets[i].pkt_of_interest = TRUE;
}
return (int)msg_id_count;
}
return 1;
}
@ -208,29 +206,43 @@ int decode_gtcp(packet_info *pinfo, proto_tree *tree, PKT_INFO* pkt_info)
gboolean field_bool[MAX_RETURNED_ELEMENTS]; /* An extracted field array for unsigned integers */
size_t field_value_count; /* How many entries are there in the extracted field array */
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_TCP_STREAM].hf, field_uint, &field_value_count))
pkt_info->rrpd.stream_no = field_uint[0];
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_TCP_STREAM].hf, field_uint, &field_value_count)) {
if (field_value_count)
pkt_info->rrpd.stream_no = field_uint[0];
}
pkt_info->srcport = pinfo->srcport;
pkt_info->dstport = pinfo->destport;
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_TCP_LEN].hf, field_uint, &field_value_count))
pkt_info->len = field_uint[0];
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_TCP_LEN].hf, field_uint, &field_value_count)) {
if (field_value_count)
pkt_info->len = field_uint[0];
}
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_FLAGS_SYN].hf, field_bool, &field_value_count))
pkt_info->tcp_flags_syn = field_bool[0];
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_FLAGS_SYN].hf, field_bool, &field_value_count)) {
if (field_value_count)
pkt_info->tcp_flags_syn = field_bool[0];
}
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_FLAGS_ACK].hf, field_bool, &field_value_count))
pkt_info->tcp_flags_ack = field_bool[0];
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_FLAGS_ACK].hf, field_bool, &field_value_count)) {
if (field_value_count)
pkt_info->tcp_flags_ack = field_bool[0];
}
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_FLAGS_RESET].hf, field_bool, &field_value_count))
pkt_info->tcp_flags_reset = field_bool[0];
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_FLAGS_RESET].hf, field_bool, &field_value_count)) {
if (field_value_count)
pkt_info->tcp_flags_reset = field_bool[0];
}
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_RETRAN].hf, field_bool, &field_value_count))
pkt_info->tcp_retran = field_bool[0];
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_RETRAN].hf, field_bool, &field_value_count)) {
if (field_value_count)
pkt_info->tcp_retran = field_bool[0];
}
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_KEEP_ALIVE].hf, field_bool, &field_value_count))
pkt_info->tcp_keep_alive = field_bool[0];
if (!extract_bool(tree, hf_of_interest[HF_INTEREST_TCP_KEEP_ALIVE].hf, field_bool, &field_value_count)) {
if (field_value_count)
pkt_info->tcp_keep_alive = field_bool[0];
}
if (((wmem_map_lookup(preferences.tcp_svc_ports, GUINT_TO_POINTER(pkt_info->dstport)) != NULL) ||
(wmem_map_lookup(preferences.tcp_svc_ports, GUINT_TO_POINTER(pkt_info->srcport)) != NULL)) &&
@ -257,8 +269,10 @@ int decode_dns(packet_info *pinfo _U_, proto_tree *tree, PKT_INFO* pkt_info)
guint32 field_uint[MAX_RETURNED_ELEMENTS]; /* An extracted field array for unsigned integers */
size_t field_value_count; /* How many entries are there in the extracted field array */
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_DNS_ID].hf, field_uint, &field_value_count))
pkt_info->rrpd.msg_id = field_uint[0];
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_DNS_ID].hf, field_uint, &field_value_count)) {
if (field_value_count)
pkt_info->rrpd.msg_id = field_uint[0];
}
pkt_info->rrpd.session_id = 1;
pkt_info->rrpd.suffix = 1; /* need to do something tricky here as dns.id gets reused */
@ -278,11 +292,15 @@ int decode_gudp(packet_info *pinfo, proto_tree *tree, PKT_INFO* pkt_info)
pkt_info->srcport = pinfo->srcport;
pkt_info->dstport = pinfo->destport;
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_UDP_STREAM].hf, field_uint, &field_value_count))
pkt_info->rrpd.stream_no = field_uint[0];
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_UDP_STREAM].hf, field_uint, &field_value_count)) {
if (field_value_count)
pkt_info->rrpd.stream_no = field_uint[0];
}
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_UDP_LENGTH].hf, field_uint, &field_value_count))
pkt_info->len = field_uint[0];
if (!extract_uint(tree, hf_of_interest[HF_INTEREST_UDP_LENGTH].hf, field_uint, &field_value_count)) {
if (field_value_count)
pkt_info->len = field_uint[0];
}
if ((wmem_map_lookup(preferences.udp_svc_ports, GUINT_TO_POINTER(pkt_info->dstport)) != NULL) ||
(wmem_map_lookup(preferences.udp_svc_ports, GUINT_TO_POINTER(pkt_info->srcport)) != NULL))

View File

@ -271,7 +271,7 @@ sharkd_epan_new(capture_file *cf)
}
static gboolean
process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
process_packet(capture_file *cf, epan_dissect_t *edt,
gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd)
{
@ -307,6 +307,10 @@ process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
if (cf->dfcode)
epan_dissect_prime_with_dfilter(edt, cf->dfcode);
/* This is the first and only pass, so prime the epan_dissect_t
with the fields postdissectors want on the first pass. */
prime_epan_dissect_with_postdissector_wanted_fields(edt);
frame_data_set_before_dissect(&fdlocal, &cf->elapsed_time,
&ref, prev_dis);
if (ref == &fdlocal) {
@ -385,7 +389,7 @@ load_cap_file(capture_file *cf, int max_packet_count, gint64 max_byte_count)
}
while (wtap_read(cf->wth, &err, &err_info, &data_offset)) {
if (process_packet_first_pass(cf, edt, data_offset, wtap_phdr(cf->wth),
if (process_packet(cf, edt, data_offset, wtap_phdr(cf->wth),
wtap_buf_ptr(cf->wth))) {
/* Stop reading if we have the maximum number of packets;
* When the -c option has not been used, max_packet_count

119
tfshark.c
View File

@ -137,8 +137,9 @@ static output_fields_t* output_fields = NULL;
static const char *separator = "";
static int load_cap_file(capture_file *, int, gint64);
static gboolean process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset,
struct wtap_pkthdr *whdr, const guchar *pd, guint tap_flags);
static gboolean process_packet_single_pass(capture_file *cf,
epan_dissect_t *edt, gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd, guint tap_flags);
static void show_print_file_io_error(int err);
static gboolean write_preamble(capture_file *cf);
static gboolean print_packet(capture_file *cf, epan_dissect_t *edt);
@ -661,18 +662,29 @@ main(int argc, char *argv[])
goto clean_exit;
break;
case 'l': /* "Line-buffer" standard output */
/* This isn't line-buffering, strictly speaking, it's just
flushing the standard output after the information for
each packet is printed; however, that should be good
enough for all the purposes to which "-l" is put (and
is probably actually better for "-V", as it does fewer
writes).
/* The ANSI C standard does not appear to *require* that a line-buffered
stream be flushed to the host environment whenever a newline is
written, it just says that, on such a stream, characters "are
intended to be transmitted to or from the host environment as a
block when a new-line character is encountered".
See the comment in "process_packet()" for an explanation of
why we do that, and why we don't just use "setvbuf()" to
make the standard output line-buffered (short version: in
Windows, "line-buffered" is the same as "fully-buffered",
and the output buffer is only flushed when it fills up). */
The Visual C++ 6.0 C implementation doesn't do what is intended;
even if you set a stream to be line-buffered, it still doesn't
flush the buffer at the end of every line.
The whole reason for the "-l" flag in either tcpdump or TShark
is to allow the output of a live capture to be piped to a program
or script and to have that script see the information for the
packet as soon as it's printed, rather than having to wait until
a standard I/O buffer fills up.
So, if the "-l" flag is specified, we flush the standard output
at the end of a packet. This will do the right thing if we're
printing packet summary lines, and, as we print the entire protocol
tree for a single packet without waiting for anything to happen,
it should be as good as line-buffered mode if we're printing
protocol trees - arguably even better, as it may do fewer
writes. */
line_buffered = TRUE;
break;
case 'o': /* Override preference from command line */
@ -1060,8 +1072,8 @@ tfshark_epan_new(capture_file *cf)
static gboolean
process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd)
gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd)
{
frame_data fdlocal;
guint32 framenum;
@ -1087,6 +1099,10 @@ process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
if (cf->rfcode)
epan_dissect_prime_with_dfilter(edt, cf->rfcode);
/* This is the first pass, so prime the epan_dissect_t with the
fields postdissectors want on the first pass. */
prime_epan_dissect_with_postdissector_wanted_fields(edt);
frame_data_set_before_dissect(&fdlocal, &cf->elapsed_time,
&ref, prev_dis);
if (ref == &fdlocal) {
@ -1127,9 +1143,9 @@ process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
}
static gboolean
process_packet_second_pass(capture_file *cf, epan_dissect_t *edt, frame_data *fdata,
struct wtap_pkthdr *phdr, Buffer *buf,
guint tap_flags)
process_packet_second_pass(capture_file *cf, epan_dissect_t *edt,
frame_data *fdata, struct wtap_pkthdr *phdr,
Buffer *buf, guint tap_flags)
{
column_info *cinfo;
gboolean passed;
@ -1149,6 +1165,10 @@ process_packet_second_pass(capture_file *cf, epan_dissect_t *edt, frame_data *fd
if (cf->dfcode)
epan_dissect_prime_with_dfilter(edt, cf->dfcode);
/* This is the first and only pass, so prime the epan_dissect_t
with the fields postdissectors want on the first pass. */
prime_epan_dissect_with_postdissector_wanted_fields(edt);
col_custom_prime_edt(edt, &cf->cinfo);
/* We only need the columns if either
@ -1184,26 +1204,9 @@ process_packet_second_pass(capture_file *cf, epan_dissect_t *edt, frame_data *fd
this packet. */
print_packet(cf, edt);
/* The ANSI C standard does not appear to *require* that a line-buffered
stream be flushed to the host environment whenever a newline is
written, it just says that, on such a stream, characters "are
intended to be transmitted to or from the host environment as a
block when a new-line character is encountered".
The Visual C++ 6.0 C implementation doesn't do what is intended;
even if you set a stream to be line-buffered, it still doesn't
flush the buffer at the end of every line.
So, if the "-l" flag was specified, we flush the standard output
at the end of a packet. This will do the right thing if we're
printing packet summary lines, and, as we print the entire protocol
tree for a single packet without waiting for anything to happen,
it should be as good as line-buffered mode if we're printing
protocol trees. (The whole reason for the "-l" flag in either
tcpdump or TShark is to allow the output of a live capture to
be piped to a program or script and to have that script see the
information for the packet as soon as it's printed, rather than
having to wait until a standard I/O buffer fills up. */
/* If we're doing "line-buffering", flush the standard output
after every packet. See the comment above, for the "-l"
option, for an explanation of why we do that. */
if (line_buffered)
fflush(stdout);
@ -1350,7 +1353,7 @@ load_cap_file(capture_file *cf, int max_packet_count, gint64 max_byte_count)
}
while (local_wtap_read(cf, &file_phdr, &err, &err_info, &data_offset, &raw_data)) {
if (process_packet_first_pass(cf, edt, data_offset, &file_phdr/*wtap_phdr(cf->wth)*/,
wtap_buf_ptr(cf->wth))) {
wtap_buf_ptr(cf->wth))) {
/* Stop reading if we have the maximum number of packets;
* When the -c option has not been used, max_packet_count
@ -1417,8 +1420,9 @@ load_cap_file(capture_file *cf, int max_packet_count, gint64 max_byte_count)
process_packet_second_pass(cf, edt, fdata, &cf->phdr, &buf, tap_flags);
}
#else
if (!process_packet_second_pass(cf, edt, fdata, &cf->phdr, &buf, tap_flags))
return 2;
if (!process_packet_second_pass(cf, edt, fdata, &cf->phdr, &buf,
tap_flags))
return 2;
#endif
}
@ -1470,8 +1474,9 @@ load_cap_file(capture_file *cf, int max_packet_count, gint64 max_byte_count)
framenum++;
if (!process_packet(cf, edt, data_offset, &file_phdr/*wtap_phdr(cf->wth)*/,
raw_data, tap_flags))
if (!process_packet_single_pass(cf, edt, data_offset,
&file_phdr/*wtap_phdr(cf->wth)*/,
raw_data, tap_flags))
return 2;
/* Stop reading if we have the maximum number of packets;
@ -1577,8 +1582,9 @@ out:
}
static gboolean
process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset,
struct wtap_pkthdr *whdr, const guchar *pd, guint tap_flags)
process_packet_single_pass(capture_file *cf, epan_dissect_t *edt, gint64 offset,
struct wtap_pkthdr *whdr, const guchar *pd,
guint tap_flags)
{
frame_data fdata;
column_info *cinfo;
@ -1640,26 +1646,9 @@ process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset,
this packet. */
print_packet(cf, edt);
/* The ANSI C standard does not appear to *require* that a line-buffered
stream be flushed to the host environment whenever a newline is
written, it just says that, on such a stream, characters "are
intended to be transmitted to or from the host environment as a
block when a new-line character is encountered".
The Visual C++ 6.0 C implementation doesn't do what is intended;
even if you set a stream to be line-buffered, it still doesn't
flush the buffer at the end of every line.
So, if the "-l" flag was specified, we flush the standard output
at the end of a packet. This will do the right thing if we're
printing packet summary lines, and, as we print the entire protocol
tree for a single packet without waiting for anything to happen,
it should be as good as line-buffered mode if we're printing
protocol trees. (The whole reason for the "-l" flag in either
tcpdump or TShark is to allow the output of a live capture to
be piped to a program or script and to have that script see the
information for the packet as soon as it's printed, rather than
having to wait until a standard I/O buffer fills up. */
/* If we're doing "line-buffering", flush the standard output
after every packet. See the comment above, for the "-l"
option, for an explanation of why we do that. */
if (line_buffered)
fflush(stdout);

121
tshark.c
View File

@ -228,9 +228,9 @@ static char *output_file_name;
#endif /* HAVE_LIBPCAP */
static int load_cap_file(capture_file *, char *, int, gboolean, int, gint64);
static gboolean process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset,
struct wtap_pkthdr *whdr, const guchar *pd,
guint tap_flags);
static gboolean process_packet_single_pass(capture_file *cf,
epan_dissect_t *edt, gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd, guint tap_flags);
static void show_capture_file_io_error(const char *, int, gboolean);
static void show_print_file_io_error(int err);
static gboolean write_preamble(capture_file *cf);
@ -1166,18 +1166,29 @@ main(int argc, char *argv[])
goto clean_exit;
break;
case 'l': /* "Line-buffer" standard output */
/* This isn't line-buffering, strictly speaking, it's just
flushing the standard output after the information for
each packet is printed; however, that should be good
enough for all the purposes to which "-l" is put (and
is probably actually better for "-V", as it does fewer
writes).
/* The ANSI C standard does not appear to *require* that a line-buffered
stream be flushed to the host environment whenever a newline is
written, it just says that, on such a stream, characters "are
intended to be transmitted to or from the host environment as a
block when a new-line character is encountered".
See the comment in "process_packet()" for an explanation of
why we do that, and why we don't just use "setvbuf()" to
make the standard output line-buffered (short version: in
Windows, "line-buffered" is the same as "fully-buffered",
and the output buffer is only flushed when it fills up). */
The Visual C++ 6.0 C implementation doesn't do what is intended;
even if you set a stream to be line-buffered, it still doesn't
flush the buffer at the end of every line.
The whole reason for the "-l" flag in either tcpdump or TShark
is to allow the output of a live capture to be piped to a program
or script and to have that script see the information for the
packet as soon as it's printed, rather than having to wait until
a standard I/O buffer fills up.
So, if the "-l" flag is specified, we flush the standard output
at the end of a packet. This will do the right thing if we're
printing packet summary lines, and, as we print the entire protocol
tree for a single packet without waiting for anything to happen,
it should be as good as line-buffered mode if we're printing
protocol trees - arguably even better, as it may do fewer
writes. */
line_buffered = TRUE;
break;
case 'L': /* Print list of link-layer types and exit */
@ -2600,9 +2611,9 @@ capture_input_new_packets(capture_session *cap_session, int to_read)
wtap_close(cf->wth);
cf->wth = NULL;
} else {
ret = process_packet(cf, edt, data_offset, wtap_phdr(cf->wth),
wtap_buf_ptr(cf->wth),
tap_flags);
ret = process_packet_single_pass(cf, edt, data_offset,
wtap_phdr(cf->wth),
wtap_buf_ptr(cf->wth), tap_flags);
}
if (ret != FALSE) {
/* packet successfully read and gone through the "Read Filter" */
@ -2773,8 +2784,8 @@ capture_cleanup(int signum _U_)
static gboolean
process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd)
gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd)
{
frame_data fdlocal;
guint32 framenum;
@ -2810,6 +2821,10 @@ process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
if (cf->dfcode)
epan_dissect_prime_with_dfilter(edt, cf->dfcode);
/* This is the first pass, so prime the epan_dissect_t with the
fields postdissectors want on the first pass. */
prime_epan_dissect_with_postdissector_wanted_fields(edt);
frame_data_set_before_dissect(&fdlocal, &cf->elapsed_time,
&ref, prev_dis);
if (ref == &fdlocal) {
@ -2854,9 +2869,9 @@ process_packet_first_pass(capture_file *cf, epan_dissect_t *edt,
}
static gboolean
process_packet_second_pass(capture_file *cf, epan_dissect_t *edt, frame_data *fdata,
struct wtap_pkthdr *phdr, Buffer *buf,
guint tap_flags)
process_packet_second_pass(capture_file *cf, epan_dissect_t *edt,
frame_data *fdata, struct wtap_pkthdr *phdr,
Buffer *buf, guint tap_flags)
{
column_info *cinfo;
gboolean passed;
@ -2917,26 +2932,9 @@ process_packet_second_pass(capture_file *cf, epan_dissect_t *edt, frame_data *fd
this packet. */
print_packet(cf, edt);
/* The ANSI C standard does not appear to *require* that a line-buffered
stream be flushed to the host environment whenever a newline is
written, it just says that, on such a stream, characters "are
intended to be transmitted to or from the host environment as a
block when a new-line character is encountered".
The Visual C++ 6.0 C implementation doesn't do what is intended;
even if you set a stream to be line-buffered, it still doesn't
flush the buffer at the end of every line.
So, if the "-l" flag was specified, we flush the standard output
at the end of a packet. This will do the right thing if we're
printing packet summary lines, and, as we print the entire protocol
tree for a single packet without waiting for anything to happen,
it should be as good as line-buffered mode if we're printing
protocol trees. (The whole reason for the "-l" flag in either
tcpdump or TShark is to allow the output of a live capture to
be piped to a program or script and to have that script see the
information for the packet as soon as it's printed, rather than
having to wait until a standard I/O buffer fills up. */
/* If we're doing "line-buffering", flush the standard output
after every packet. See the comment above, for the "-l"
option, for an explanation of why we do that. */
if (line_buffered)
fflush(stdout);
@ -3121,7 +3119,7 @@ load_cap_file(capture_file *cf, char *save_file, int out_file_type,
tshark_debug("tshark: reading records for first pass");
while (wtap_read(cf->wth, &err, &err_info, &data_offset)) {
if (process_packet_first_pass(cf, edt, data_offset, wtap_phdr(cf->wth),
wtap_buf_ptr(cf->wth))) {
wtap_buf_ptr(cf->wth))) {
/* Stop reading if we have the maximum number of packets;
* When the -c option has not been used, max_packet_count
* starts at 0, which practically means, never stop reading.
@ -3332,9 +3330,8 @@ load_cap_file(capture_file *cf, char *save_file, int out_file_type,
tshark_debug("tshark: processing packet #%d", framenum);
if (process_packet(cf, edt, data_offset, wtap_phdr(cf->wth),
wtap_buf_ptr(cf->wth),
tap_flags)) {
if (process_packet_single_pass(cf, edt, data_offset, wtap_phdr(cf->wth),
wtap_buf_ptr(cf->wth), tap_flags)) {
/* Either there's no read filtering or this packet passed the
filter, so, if we're writing to a capture file, write
this packet out. */
@ -3524,8 +3521,9 @@ out:
}
static gboolean
process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset, struct wtap_pkthdr *whdr,
const guchar *pd, guint tap_flags)
process_packet_single_pass(capture_file *cf, epan_dissect_t *edt, gint64 offset,
struct wtap_pkthdr *whdr, const guchar *pd,
guint tap_flags)
{
frame_data fdata;
column_info *cinfo;
@ -3557,6 +3555,10 @@ process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset, struct wtap
if (cf->dfcode)
epan_dissect_prime_with_dfilter(edt, cf->dfcode);
/* This is the first and only pass, so prime the epan_dissect_t
with the fields postdissectors want on the first pass. */
prime_epan_dissect_with_postdissector_wanted_fields(edt);
col_custom_prime_edt(edt, &cf->cinfo);
/* We only need the columns if either
@ -3594,26 +3596,9 @@ process_packet(capture_file *cf, epan_dissect_t *edt, gint64 offset, struct wtap
this packet. */
print_packet(cf, edt);
/* The ANSI C standard does not appear to *require* that a line-buffered
stream be flushed to the host environment whenever a newline is
written, it just says that, on such a stream, characters "are
intended to be transmitted to or from the host environment as a
block when a new-line character is encountered".
The Visual C++ 6.0 C implementation doesn't do what is intended;
even if you set a stream to be line-buffered, it still doesn't
flush the buffer at the end of every line.
So, if the "-l" flag was specified, we flush the standard output
at the end of a packet. This will do the right thing if we're
printing packet summary lines, and, as we print the entire protocol
tree for a single packet without waiting for anything to happen,
it should be as good as line-buffered mode if we're printing
protocol trees. (The whole reason for the "-l" flag in either
tcpdump or TShark is to allow the output of a live capture to
be piped to a program or script and to have that script see the
information for the packet as soon as it's printed, rather than
having to wait until a standard I/O buffer fills up. */
/* If we're doing "line-buffering", flush the standard output
after every packet. See the comment above, for the "-l"
option, for an explanation of why we do that. */
if (line_buffered)
fflush(stdout);