wireshark/epan/dfilter/scanner.l

606 lines
14 KiB
Plaintext
Raw Normal View History

%top {
/* Include this before everything else, for various large-file definitions */
#include "config.h"
#include <wireshark.h>
#include <stdlib.h>
#include <errno.h>
#include <wsutil/str_util.h>
#include "dfilter-int.h"
#include "syntax-tree.h"
#include "grammar.h"
#include "dfunctions.h"
}
/*
* We want a reentrant scanner.
*/
%option reentrant
/*
* We don't use input, so don't generate code for it.
*/
%option noinput
/*
* We don't use unput, so don't generate code for it.
*/
%option nounput
/*
* We don't read interactively from the terminal.
*/
%option never-interactive
/*
* Prefix scanner routines with "df_" rather than "yy", so this scanner
* can coexist with other scanners.
*/
%option prefix="df_"
/*
* We're reading from a string, so we don't need yywrap.
*/
%option noyywrap
/*
* The type for the state we keep for a scanner.
*/
%option extra-type="df_scanner_state_t *"
/*
* We have to override the memory allocators so that we don't get
* "unused argument" warnings from the yyscanner argument (which
* we don't use, as we have a global memory allocator).
*
* We provide, as macros, our own versions of the routines generated by Flex,
* which just call malloc()/realloc()/free() (as the Flex versions do),
* discarding the extra argument.
*/
%option noyyalloc
%option noyyrealloc
%option noyyfree
%{
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 2001 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
/*
* Disable diagnostics in the code generated by Flex.
*/
DIAG_OFF_FLEX
df_lval_t *df_lval;
static int set_lval_str(int token, const char *token_value);
#define simple(token) set_lval_str(token, yytext)
static gboolean append_escaped_char(dfwork_t *dfw, GString *str, char c);
static gboolean parse_charconst(dfwork_t *dfw, const char *s, unsigned long *valuep);
/*
* Sleazy hack to suppress compiler warnings in yy_fatal_error().
*/
#define YY_EXIT_FAILURE ((void)yyscanner, 2)
/*
* Macros for the allocators, to discard the extra argument.
*/
#define df_alloc(size, yyscanner) (void *)malloc(size)
#define df_realloc(ptr, size, yyscanner) (void *)realloc((char *)(ptr), (size))
#define df_free(ptr, yyscanner) free((char *)ptr)
%}
WORD_CHAR [[:alnum:]_-]
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
hex2 [[:xdigit:]]{2}
MacAddress {hex2}:{hex2}:{hex2}:{hex2}:{hex2}:{hex2}|{hex2}-{hex2}-{hex2}-{hex2}-{hex2}-{hex2}|{hex2}\.{hex2}\.{hex2}\.{hex2}\.{hex2}\.{hex2}
hex4 [[:xdigit:]]{4}
QuadMacAddress {hex4}\.{hex4}\.{hex4}
dec-octet [0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]
IPv4address {dec-octet}\.{dec-octet}\.{dec-octet}\.{dec-octet}
h16 [0-9A-Fa-f]{1,4}
ls32 {h16}:{h16}|{IPv4address}
IPv6address ({h16}:){6}{ls32}|::({h16}:){5}{ls32}|({h16})?::({h16}:){4}{ls32}|(({h16}:){0,1}{h16})?::({h16}:){3}{ls32}|(({h16}:){0,2}{h16})?::({h16}:){2}{ls32}|(({h16}:){0,3}{h16})?::{h16}:{ls32}|(({h16}:){0,4}{h16})?::{ls32}|(({h16}:){0,5}{h16})?::{h16}|(({h16}:){0,6}{h16})?::
v4-cidr-prefix \/[[:digit:]]{1,2}
v6-cidr-prefix \/[[:digit:]]{1,3}
dfilter: Add special syntax for literals and names The syntax for protocols and some literals like numbers and bytes/addresses can be ambiguous. Some protocols can be parsed as a literal, for example the protocol "fc" (Fibre Channel) can be parsed as 0xFC. If a numeric protocol is registered that will also take precedence over any literal, according to the current rules, thereby breaking numerical comparisons to that number. The same for an hypothetical protocol named "true", etc. To allow the user to disambiguate this meaning introduce new syntax. Any value prefixed with ':' or enclosed in <,> will be treated as a literal value only. The value :fc or <fc> will always mean 0xFC, under any context. Never a protocol whose filter name is "fc". Likewise any value prefixed with a dot will always be parsed as an identifier (protocol or protocol field) in the language. Never any literal value parsed from the token "fc". This allows the user to be explicit about the meaning, and between the two explicit methods plus the ambiguous one it doesn't completely break any one meaning. The difference can be seen in the following two programs: Filter: frame == fc Constants: Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 5 00002 READ_TREE fc -> reg#1 00003 IF-FALSE-GOTO 5 00004 ANY_EQ reg#0 == reg#1 00005 RETURN -------- Filter: frame == :fc Constants: 00000 PUT_FVALUE fc <FT_PROTOCOL> -> reg#1 Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 3 00002 ANY_EQ reg#0 == reg#1 00003 RETURN The filter "frame == fc" is the same as "filter == .fc", according to the current heuristic, except the first form will try to parse it as a literal if the name does not correspond to any registered protocol. By treating a leading dot as a name in the language we necessarily disallow writing floats with a leading dot. We will also disallow writing with an ending dot when using unparsed values. This is a backward incompatibility but has the happy side effect of making the expression {1...2} unambiguous. This could either mean "1 .. .2" or "1. .. 2". If we require a leading and ending digit then the meaning is clear: 1.0..0.2 -> 1.0 .. 0.2 Fixes #17731.
2022-02-22 21:55:05 +00:00
%x RANGE
%x DQUOTE
%x SQUOTE
dfilter: Refactor macro tree references This replaces the current macro reference system with a completely different implementation. Instead of a macro a reference is a syntax element. A reference is a constant that can be filled in the dfilter code after compilation from an existing protocol tree. It is best understood as a field value that can be read from a fixed tree that is not the frame being filtered. Usually this fixed tree is the currently selected frame when the filter is applied. This allows comparing fields in the filtered frame with fields in the selected frame. Because the field reference syntax uses the same sigil notation as a macro we have to use a heuristic to distinguish them: if the name has a dot it is a field reference, otherwise it is a macro name. The reference is synctatically validated at compile time. There are two main advantages to this implementation (and a couple of minor ones): The protocol tree for each selected frame is only walked if we have a display filter and if the display filter uses references. Also only the actual reference values are copied, intead of loading the entire tree into a hash table (in textual form even). The other advantage is that the reference is tested like a protocol field against all the values in the selected frame (if there is more than one). Currently the reference fields are not "primed" during dissection, so the entire tree is walked to find a particular reference (this is similar to the previous implementation). If the display filter contains a valid reference and the reference is not loaded at the time the filter is run the result is the same as a non existing field for a regular READ_TREE instruction. Fixes #17599.
2022-03-27 14:26:46 +00:00
%x REFERENCE
%%
[[:blank:]\n]+
"(" return simple(TOKEN_LPAREN);
")" return simple(TOKEN_RPAREN);
"," return simple(TOKEN_COMMA);
"{" return simple(TOKEN_LBRACE);
".." return simple(TOKEN_DOTDOT);
"}" return simple(TOKEN_RBRACE);
"+" return simple(TOKEN_PLUS);
"-" return simple(TOKEN_MINUS);
"*" return simple(TOKEN_STAR);
"/" return simple(TOKEN_RSLASH);
"%" return simple(TOKEN_PERCENT);
"==" return simple(TOKEN_TEST_ANY_EQ);
"eq" return simple(TOKEN_TEST_ANY_EQ);
"any_eq" return simple(TOKEN_TEST_ANY_EQ);
"!=" return simple(TOKEN_TEST_ALL_NE);
"ne" return simple(TOKEN_TEST_ALL_NE);
"all_ne" return simple(TOKEN_TEST_ALL_NE);
"===" return simple(TOKEN_TEST_ALL_EQ);
"all_eq" return simple(TOKEN_TEST_ALL_EQ);
"!==" return simple(TOKEN_TEST_ANY_NE);
"~=" {
add_deprecated_token(yyextra->dfw, "The operator \"~=\" is deprecated, use \"!==\" instead.");
return simple(TOKEN_TEST_ANY_NE);
}
"any_ne" return simple(TOKEN_TEST_ANY_NE);
">" return simple(TOKEN_TEST_GT);
"gt" return simple(TOKEN_TEST_GT);
">=" return simple(TOKEN_TEST_GE);
"ge" return simple(TOKEN_TEST_GE);
"<" return simple(TOKEN_TEST_LT);
"lt" return simple(TOKEN_TEST_LT);
"<=" return simple(TOKEN_TEST_LE);
"le" return simple(TOKEN_TEST_LE);
"contains" return simple(TOKEN_TEST_CONTAINS);
"~" return simple(TOKEN_TEST_MATCHES);
"matches" return simple(TOKEN_TEST_MATCHES);
"!" return simple(TOKEN_TEST_NOT);
"not" return simple(TOKEN_TEST_NOT);
"&&" return simple(TOKEN_TEST_AND);
"and" return simple(TOKEN_TEST_AND);
"||" return simple(TOKEN_TEST_OR);
"or" return simple(TOKEN_TEST_OR);
"in" return simple(TOKEN_TEST_IN);
"&" return simple(TOKEN_BITWISE_AND);
"bitwise_and" return simple(TOKEN_BITWISE_AND);
dfilter: Refactor macro tree references This replaces the current macro reference system with a completely different implementation. Instead of a macro a reference is a syntax element. A reference is a constant that can be filled in the dfilter code after compilation from an existing protocol tree. It is best understood as a field value that can be read from a fixed tree that is not the frame being filtered. Usually this fixed tree is the currently selected frame when the filter is applied. This allows comparing fields in the filtered frame with fields in the selected frame. Because the field reference syntax uses the same sigil notation as a macro we have to use a heuristic to distinguish them: if the name has a dot it is a field reference, otherwise it is a macro name. The reference is synctatically validated at compile time. There are two main advantages to this implementation (and a couple of minor ones): The protocol tree for each selected frame is only walked if we have a display filter and if the display filter uses references. Also only the actual reference values are copied, intead of loading the entire tree into a hash table (in textual form even). The other advantage is that the reference is tested like a protocol field against all the values in the selected frame (if there is more than one). Currently the reference fields are not "primed" during dissection, so the entire tree is walked to find a particular reference (this is similar to the previous implementation). If the display filter contains a valid reference and the reference is not loaded at the time the filter is run the result is the same as a non existing field for a regular READ_TREE instruction. Fixes #17599.
2022-03-27 14:26:46 +00:00
"${" {
BEGIN(REFERENCE);
return simple(TOKEN_REF_OPEN);
}
<REFERENCE>[^}]+ {
return set_lval_str(TOKEN_REFERENCE, yytext);
}
<REFERENCE>"}" {
BEGIN(INITIAL);
return simple(TOKEN_REF_CLOSE);
}
<REFERENCE><<EOF>> {
dfilter_fail(yyextra->dfw, "Right brace missing from field reference.");
return SCAN_FAILED;
}
"[" {
BEGIN(RANGE);
return simple(TOKEN_LBRACKET);
}
<RANGE>[^],]+ {
return set_lval_str(TOKEN_RANGE, yytext);
}
<RANGE>"," {
return simple(TOKEN_COMMA);
}
<RANGE>"]" {
BEGIN(INITIAL);
return simple(TOKEN_RBRACKET);
}
<RANGE><<EOF>> {
dfilter_fail(yyextra->dfw, "The right bracket was missing from a slice.");
return SCAN_FAILED;
}
[rR]{0,1}\042 {
/* start quote of a quoted string */
2021-10-31 15:52:05 +00:00
/*
* The example of how to scan for strings was taken from
* the flex manual, from the section "Start Conditions".
* See: https://westes.github.io/flex/manual/Start-Conditions.html
*/
BEGIN(DQUOTE);
yyextra->quoted_string = g_string_new("");
2021-10-31 15:52:05 +00:00
if (yytext[0] == 'r' || yytext[0] == 'R') {
/*
* This is a raw string (like in Python). Rules: 1) The two
* escape sequences are \\ and \". 2) Backslashes are
* preserved. 3) Double quotes in the string must be escaped.
* Corollary: Strings cannot end with an odd number of
* backslashes.
* Example: r"a\b\x12\"\\" is the string (including the implicit NUL terminator)
* {'a', '\\', 'b', '\\', 'x', '1', '2', '\\', '"', '\\'. '\\', '\0'}
*/
yyextra->raw_string = TRUE;
}
else {
yyextra->raw_string = FALSE;
}
}
<DQUOTE><<EOF>> {
/* unterminated string */
2021-10-31 15:52:05 +00:00
g_string_free(yyextra->quoted_string, TRUE);
yyextra->quoted_string = NULL;
dfilter_fail(yyextra->dfw, "The final quote was missing from a quoted string.");
return SCAN_FAILED;
}
<DQUOTE>\042 {
/* end quote */
BEGIN(INITIAL);
df_lval->value = g_string_free(yyextra->quoted_string, FALSE);
yyextra->quoted_string = NULL;
return TOKEN_STRING;
}
<DQUOTE>\\[0-7]{1,3} {
/* octal sequence */
if (yyextra->raw_string) {
g_string_append(yyextra->quoted_string, yytext);
}
else {
unsigned long result;
result = strtoul(yytext + 1, NULL, 8);
if (result == 0) {
g_string_free(yyextra->quoted_string, TRUE);
yyextra->quoted_string = NULL;
dfilter_fail(yyextra->dfw, "%s (NUL byte) cannot be used with a regular string.", yytext);
return SCAN_FAILED;
}
if (result > 0xff) {
g_string_free(yyextra->quoted_string, TRUE);
yyextra->quoted_string = NULL;
dfilter_fail(yyextra->dfw, "%s is larger than 255.", yytext);
return SCAN_FAILED;
}
g_string_append_c(yyextra->quoted_string, (gchar) result);
}
}
<DQUOTE>\\x[[:xdigit:]]{1,2} {
/* hex sequence */
/*
* C standard does not place a limit on the number of hex
* digits after \x... but we do. \xNN can have 1 or two Ns, not more.
*/
if (yyextra->raw_string) {
g_string_append(yyextra->quoted_string, yytext);
}
else {
unsigned long result;
result = strtoul(yytext + 2, NULL, 16);
if (result == 0) {
g_string_free(yyextra->quoted_string, TRUE);
yyextra->quoted_string = NULL;
dfilter_fail(yyextra->dfw, "%s (NUL byte) cannot be used with a regular string.", yytext);
return SCAN_FAILED;
}
g_string_append_c(yyextra->quoted_string, (gchar) result);
}
}
<DQUOTE>\\. {
/* escaped character */
if (yyextra->raw_string) {
g_string_append(yyextra->quoted_string, yytext);
}
else if (!append_escaped_char(yyextra->dfw, yyextra->quoted_string, yytext[1])) {
2021-11-24 09:54:17 +00:00
g_string_free(yyextra->quoted_string, TRUE);
yyextra->quoted_string = NULL;
return SCAN_FAILED;
}
}
<DQUOTE>[^\\\042]+ {
/* non-escaped string */
g_string_append(yyextra->quoted_string, yytext);
}
\047 {
/* start quote of a quoted character value */
BEGIN(SQUOTE);
yyextra->quoted_string = g_string_new("'");
}
<SQUOTE><<EOF>> {
/* unterminated character value */
2021-10-31 15:52:05 +00:00
g_string_free(yyextra->quoted_string, TRUE);
yyextra->quoted_string = NULL;
dfilter_fail(yyextra->dfw, "The final quote was missing from a character constant.");
return SCAN_FAILED;
}
<SQUOTE>\047 {
/* end quote */
BEGIN(INITIAL);
g_string_append_c(yyextra->quoted_string, '\'');
df_lval->value = g_string_free(yyextra->quoted_string, FALSE);
yyextra->quoted_string = NULL;
if (!parse_charconst(yyextra->dfw, df_lval->value, &df_lval->number)) {
return SCAN_FAILED;
}
return TOKEN_CHARCONST;
}
<SQUOTE>\\. {
/* escaped character */
g_string_append(yyextra->quoted_string, yytext);
}
<SQUOTE>[^\\\047]+ {
/* non-escaped string */
g_string_append(yyextra->quoted_string, yytext);
}
/* None of the patterns below can match ".." anywhere in the token string. */
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
{MacAddress}|{QuadMacAddress} {
/* MAC Address literal. */
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
return set_lval_str(TOKEN_UNPARSED, yytext);
}
{IPv4address}{v4-cidr-prefix}? {
/* IPv4 with or without prefix. */
return set_lval_str(TOKEN_UNPARSED, yytext);
dfilter: Add special syntax for literals and names The syntax for protocols and some literals like numbers and bytes/addresses can be ambiguous. Some protocols can be parsed as a literal, for example the protocol "fc" (Fibre Channel) can be parsed as 0xFC. If a numeric protocol is registered that will also take precedence over any literal, according to the current rules, thereby breaking numerical comparisons to that number. The same for an hypothetical protocol named "true", etc. To allow the user to disambiguate this meaning introduce new syntax. Any value prefixed with ':' or enclosed in <,> will be treated as a literal value only. The value :fc or <fc> will always mean 0xFC, under any context. Never a protocol whose filter name is "fc". Likewise any value prefixed with a dot will always be parsed as an identifier (protocol or protocol field) in the language. Never any literal value parsed from the token "fc". This allows the user to be explicit about the meaning, and between the two explicit methods plus the ambiguous one it doesn't completely break any one meaning. The difference can be seen in the following two programs: Filter: frame == fc Constants: Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 5 00002 READ_TREE fc -> reg#1 00003 IF-FALSE-GOTO 5 00004 ANY_EQ reg#0 == reg#1 00005 RETURN -------- Filter: frame == :fc Constants: 00000 PUT_FVALUE fc <FT_PROTOCOL> -> reg#1 Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 3 00002 ANY_EQ reg#0 == reg#1 00003 RETURN The filter "frame == fc" is the same as "filter == .fc", according to the current heuristic, except the first form will try to parse it as a literal if the name does not correspond to any registered protocol. By treating a leading dot as a name in the language we necessarily disallow writing floats with a leading dot. We will also disallow writing with an ending dot when using unparsed values. This is a backward incompatibility but has the happy side effect of making the expression {1...2} unambiguous. This could either mean "1 .. .2" or "1. .. 2". If we require a leading and ending digit then the meaning is clear: 1.0..0.2 -> 1.0 .. 0.2 Fixes #17731.
2022-02-22 21:55:05 +00:00
}
{IPv6address}{v6-cidr-prefix}? {
/* IPv6 with or without prefix. */
return set_lval_str(TOKEN_UNPARSED, yytext);
dfilter: Add special syntax for literals and names The syntax for protocols and some literals like numbers and bytes/addresses can be ambiguous. Some protocols can be parsed as a literal, for example the protocol "fc" (Fibre Channel) can be parsed as 0xFC. If a numeric protocol is registered that will also take precedence over any literal, according to the current rules, thereby breaking numerical comparisons to that number. The same for an hypothetical protocol named "true", etc. To allow the user to disambiguate this meaning introduce new syntax. Any value prefixed with ':' or enclosed in <,> will be treated as a literal value only. The value :fc or <fc> will always mean 0xFC, under any context. Never a protocol whose filter name is "fc". Likewise any value prefixed with a dot will always be parsed as an identifier (protocol or protocol field) in the language. Never any literal value parsed from the token "fc". This allows the user to be explicit about the meaning, and between the two explicit methods plus the ambiguous one it doesn't completely break any one meaning. The difference can be seen in the following two programs: Filter: frame == fc Constants: Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 5 00002 READ_TREE fc -> reg#1 00003 IF-FALSE-GOTO 5 00004 ANY_EQ reg#0 == reg#1 00005 RETURN -------- Filter: frame == :fc Constants: 00000 PUT_FVALUE fc <FT_PROTOCOL> -> reg#1 Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 3 00002 ANY_EQ reg#0 == reg#1 00003 RETURN The filter "frame == fc" is the same as "filter == .fc", according to the current heuristic, except the first form will try to parse it as a literal if the name does not correspond to any registered protocol. By treating a leading dot as a name in the language we necessarily disallow writing floats with a leading dot. We will also disallow writing with an ending dot when using unparsed values. This is a backward incompatibility but has the happy side effect of making the expression {1...2} unambiguous. This could either mean "1 .. .2" or "1. .. 2". If we require a leading and ending digit then the meaning is clear: 1.0..0.2 -> 1.0 .. 0.2 Fixes #17731.
2022-02-22 21:55:05 +00:00
}
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
[[:xdigit:]]+:[[:xdigit:]:]* {
/* Bytes. */
return set_lval_str(TOKEN_UNPARSED, yytext);
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
}
"<"[^>=]+">" {
/* Literal in-between angle brackets (cannot be parsed as a protocol field). */
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
return set_lval_str(TOKEN_LITERAL, yytext);
dfilter: Add special syntax for literals and names The syntax for protocols and some literals like numbers and bytes/addresses can be ambiguous. Some protocols can be parsed as a literal, for example the protocol "fc" (Fibre Channel) can be parsed as 0xFC. If a numeric protocol is registered that will also take precedence over any literal, according to the current rules, thereby breaking numerical comparisons to that number. The same for an hypothetical protocol named "true", etc. To allow the user to disambiguate this meaning introduce new syntax. Any value prefixed with ':' or enclosed in <,> will be treated as a literal value only. The value :fc or <fc> will always mean 0xFC, under any context. Never a protocol whose filter name is "fc". Likewise any value prefixed with a dot will always be parsed as an identifier (protocol or protocol field) in the language. Never any literal value parsed from the token "fc". This allows the user to be explicit about the meaning, and between the two explicit methods plus the ambiguous one it doesn't completely break any one meaning. The difference can be seen in the following two programs: Filter: frame == fc Constants: Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 5 00002 READ_TREE fc -> reg#1 00003 IF-FALSE-GOTO 5 00004 ANY_EQ reg#0 == reg#1 00005 RETURN -------- Filter: frame == :fc Constants: 00000 PUT_FVALUE fc <FT_PROTOCOL> -> reg#1 Instructions: 00000 READ_TREE frame -> reg#0 00001 IF-FALSE-GOTO 3 00002 ANY_EQ reg#0 == reg#1 00003 RETURN The filter "frame == fc" is the same as "filter == .fc", according to the current heuristic, except the first form will try to parse it as a literal if the name does not correspond to any registered protocol. By treating a leading dot as a name in the language we necessarily disallow writing floats with a leading dot. We will also disallow writing with an ending dot when using unparsed values. This is a backward incompatibility but has the happy side effect of making the expression {1...2} unambiguous. This could either mean "1 .. .2" or "1. .. 2". If we require a leading and ending digit then the meaning is clear: 1.0..0.2 -> 1.0 .. 0.2 Fixes #17731.
2022-02-22 21:55:05 +00:00
}
[:.]?[[:alnum:]_]{WORD_CHAR}*(\.{WORD_CHAR}+)* {
dfilter: Allow arithmetic expressions without spaces To allow an arithmetic expressions without spaces, such as "1+2", we cannot match the expression in other lexical rules using "+". Because of longest match this becomes the token LITERAL or UNPARSED with semantic value "1+2". The same goes for all the other arithmetic operators. So we need to remove [+-*/%] from "word chars" and add very specific patterns (that won't mistakenly match an arithmetic expression) for those literal or unparsed tokens we want to support using these characters. The plus was not a problem but right slash is used for CIDR, minus for mac address separator, etc. There are still some corner case. 11-22-33-44-55-66 is a mac address and not the arithmetic expression with six terms "eleven minus twenty two minus etc." (if we ever support more than two terms in the grammar, which we don't currently). We lift some patterns from the flex manual to match on IPv4 and IPv6 (ugly) and add MAC address. Other hypothetical literal lexical values using [+-*/%] are already supported enclosed in angle brackets but the cases of MAC/IPv4/IPv6 are are very common and moreover we need to do the utmost to not break backward compatibily here. Before: $ dftest "_ws.ftypes.int32 == 1+2" dftest: "1+2" is not a valid number. After: $ dftest "_ws.ftypes.int32 == 1+2" Filter: _ws.ftypes.int32 == 1+2 Instructions: 00000 READ_TREE _ws.ftypes.int32 -> reg#0 00001 IF_FALSE_GOTO 4 00002 ADD 1 <FT_INT32> + 2 <FT_INT32> -> reg#1 00003 ANY_EQ reg#0 == reg#1 00004 RETURN
2022-04-04 18:58:35 +00:00
/* Identifier or literal or unparsed. */
if (yytext[0] == '.')
return set_lval_str(TOKEN_IDENTIFIER, yytext);
if (yytext[0] == ':')
return set_lval_str(TOKEN_LITERAL, yytext);
return set_lval_str(TOKEN_UNPARSED, yytext);
}
. {
/* Default */
if (isprint_string(yytext))
dfilter_fail(yyextra->dfw, "\"%s\" was unexpected in this context.", yytext);
else
dfilter_fail(yyextra->dfw, "Non-printable ASCII characters may only appear inside double-quotes.");
return SCAN_FAILED;
}
%%
/*
* Turn diagnostics back on, so we check the code that we've written.
*/
DIAG_ON_FLEX
static int
set_lval_str(int token, const char *token_value)
{
df_lval->value = g_strdup(token_value);
return token;
}
static gboolean
append_escaped_char(dfwork_t *dfw, GString *str, char c)
{
switch (c) {
case 'a':
c = '\a';
break;
case 'b':
c = '\b';
break;
case 'f':
c = '\f';
break;
case 'n':
c = '\n';
break;
case 'r':
c = '\r';
break;
case 't':
c = '\t';
break;
case 'v':
c = '\v';
break;
case '\\':
case '\'':
case '\"':
break;
default:
dfilter_fail(dfw, "\\%c is not a valid character escape sequence", c);
return FALSE;
}
g_string_append_c(str, c);
return TRUE;
}
static gboolean
parse_charconst(dfwork_t *dfw, const char *s, unsigned long *valuep)
{
const char *cp;
unsigned long value;
cp = s + 1; /* skip the leading ' */
if (*cp == '\'') {
dfilter_fail(dfw, "Empty character constant.");
return FALSE;
}
if (*cp == '\\') {
/*
* C escape sequence.
* An escape sequence is an octal number \NNN,
* an hex number \xNN, or one of \' \" \\ \a \b \f \n \r \t \v
* that stands for the byte value of the equivalent
* C-escape in ASCII encoding.
*/
cp++;
switch (*cp) {
case '\0':
dfilter_fail(dfw, "%s isn't a valid character constant.", s);
return FALSE;
case 'a':
value = '\a';
break;
case 'b':
value = '\b';
break;
case 'f':
value = '\f';
break;
case 'n':
value = '\n';
break;
case 'r':
value = '\r';
break;
case 't':
value = '\t';
break;
case 'v':
value = '\v';
break;
case '\'':
value = '\'';
break;
case '\\':
value = '\\';
break;
case '"':
value = '"';
break;
case 'x':
cp++;
if (*cp >= '0' && *cp <= '9')
value = *cp - '0';
else if (*cp >= 'A' && *cp <= 'F')
value = 10 + (*cp - 'A');
else if (*cp >= 'a' && *cp <= 'f')
value = 10 + (*cp - 'a');
else {
dfilter_fail(dfw, "%s isn't a valid character constant.", s);
return FALSE;
}
cp++;
if (*cp != '\'') {
value <<= 4;
if (*cp >= '0' && *cp <= '9')
value |= *cp - '0';
else if (*cp >= 'A' && *cp <= 'F')
value |= 10 + (*cp - 'A');
else if (*cp >= 'a' && *cp <= 'f')
value |= 10 + (*cp - 'a');
else {
dfilter_fail(dfw, "%s isn't a valid character constant.", s);
return FALSE;
}
}
break;
default:
/* Octal */
if (*cp >= '0' && *cp <= '7')
value = *cp - '0';
else {
dfilter_fail(dfw, "%s isn't a valid character constant.", s);
return FALSE;
}
if (*(cp + 1) != '\'') {
cp++;
value <<= 3;
if (*cp >= '0' && *cp <= '7')
value |= *cp - '0';
else {
dfilter_fail(dfw, "%s isn't a valid character constant.", s);
return FALSE;
}
if (*(cp + 1) != '\'') {
cp++;
value <<= 3;
if (*cp >= '0' && *cp <= '7')
value |= *cp - '0';
else {
dfilter_fail(dfw, "%s isn't a valid character constant.", s);
return FALSE;
}
}
}
if (value > 0xFF) {
dfilter_fail(dfw, "%s is too large to be a valid character constant.", s);
return FALSE;
}
}
} else {
value = *cp;
if (!g_ascii_isprint(value)) {
dfilter_fail(dfw, "Non-printable value '0x%02lx' in character constant.", value);
return FALSE;
}
}
cp++;
if ((*cp != '\'') || (*(cp + 1) != '\0')){
dfilter_fail(dfw, "%s is too long to be a valid character constant.", s);
return FALSE;
}
*valuep = value;
return TRUE;
}