2016-12-02 19:18:50 +00:00
|
|
|
%top {
|
|
|
|
/* Include this before everything else, for various large-file definitions */
|
|
|
|
#include "config.h"
|
2021-10-17 12:17:47 +00:00
|
|
|
#include <wireshark.h>
|
2021-10-06 15:03:19 +00:00
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
|
|
|
#include "dfilter-int.h"
|
|
|
|
#include "syntax-tree.h"
|
|
|
|
#include "grammar.h"
|
|
|
|
#include "dfunctions.h"
|
2016-12-02 19:18:50 +00:00
|
|
|
}
|
|
|
|
|
2016-03-31 01:44:01 +00:00
|
|
|
/*
|
|
|
|
* We want a reentrant scanner.
|
|
|
|
*/
|
|
|
|
%option reentrant
|
|
|
|
|
2013-02-10 19:13:07 +00:00
|
|
|
/*
|
|
|
|
* We don't use input, so don't generate code for it.
|
|
|
|
*/
|
|
|
|
%option noinput
|
|
|
|
|
2011-04-23 19:03:05 +00:00
|
|
|
/*
|
|
|
|
* We don't use unput, so don't generate code for it.
|
|
|
|
*/
|
|
|
|
%option nounput
|
|
|
|
|
|
|
|
/*
|
2015-11-05 23:35:04 +00:00
|
|
|
* We don't read interactively from the terminal.
|
2011-04-23 19:03:05 +00:00
|
|
|
*/
|
|
|
|
%option never-interactive
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefix scanner routines with "df_" rather than "yy", so this scanner
|
|
|
|
* can coexist with other scanners.
|
|
|
|
*/
|
|
|
|
%option prefix="df_"
|
|
|
|
|
2015-12-05 03:52:51 +00:00
|
|
|
/*
|
|
|
|
* We're reading from a string, so we don't need yywrap.
|
|
|
|
*/
|
|
|
|
%option noyywrap
|
|
|
|
|
2016-03-31 01:44:01 +00:00
|
|
|
/*
|
|
|
|
* The type for the state we keep for a scanner.
|
|
|
|
*/
|
|
|
|
%option extra-type="df_scanner_state_t *"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to override the memory allocators so that we don't get
|
|
|
|
* "unused argument" warnings from the yyscanner argument (which
|
|
|
|
* we don't use, as we have a global memory allocator).
|
|
|
|
*
|
|
|
|
* We provide, as macros, our own versions of the routines generated by Flex,
|
|
|
|
* which just call malloc()/realloc()/free() (as the Flex versions do),
|
|
|
|
* discarding the extra argument.
|
|
|
|
*/
|
|
|
|
%option noyyalloc
|
|
|
|
%option noyyrealloc
|
|
|
|
%option noyyfree
|
|
|
|
|
2011-04-23 19:03:05 +00:00
|
|
|
%{
|
2001-02-01 20:31:21 +00:00
|
|
|
/*
|
2006-05-21 05:12:17 +00:00
|
|
|
* Wireshark - Network traffic analyzer
|
|
|
|
* By Gerald Combs <gerald@wireshark.org>
|
2001-02-01 20:31:21 +00:00
|
|
|
* Copyright 2001 Gerald Combs
|
2008-01-31 19:50:38 +00:00
|
|
|
*
|
2018-03-06 14:31:02 +00:00
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
2001-02-01 20:21:25 +00:00
|
|
|
*/
|
|
|
|
|
2018-02-16 07:17:04 +00:00
|
|
|
/*
|
|
|
|
* Disable diagnostics in the code generated by Flex.
|
|
|
|
*/
|
|
|
|
DIAG_OFF_FLEX
|
2008-04-25 18:26:54 +00:00
|
|
|
|
2021-10-26 09:09:36 +00:00
|
|
|
df_lval_t *df_lval;
|
2001-02-01 20:21:25 +00:00
|
|
|
|
2021-09-26 21:22:50 +00:00
|
|
|
static int set_lval_str(int token, const char *token_value);
|
2021-10-26 09:09:36 +00:00
|
|
|
#define SIMPLE(token) set_lval_str(token, yytext)
|
2003-07-25 03:44:05 +00:00
|
|
|
|
2016-03-31 01:44:01 +00:00
|
|
|
/*
|
|
|
|
* Sleazy hack to suppress compiler warnings in yy_fatal_error().
|
|
|
|
*/
|
|
|
|
#define YY_EXIT_FAILURE ((void)yyscanner, 2)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Macros for the allocators, to discard the extra argument.
|
|
|
|
*/
|
|
|
|
#define df_alloc(size, yyscanner) (void *)malloc(size)
|
|
|
|
#define df_realloc(ptr, size, yyscanner) (void *)realloc((char *)(ptr), (size))
|
|
|
|
#define df_free(ptr, yyscanner) free((char *)ptr)
|
|
|
|
|
2001-02-01 20:21:25 +00:00
|
|
|
%}
|
|
|
|
|
2021-10-25 20:27:40 +00:00
|
|
|
%x RANGE
|
2003-07-25 03:44:05 +00:00
|
|
|
%x DQUOTE
|
2016-09-19 01:48:50 +00:00
|
|
|
%x SQUOTE
|
2001-02-01 20:21:25 +00:00
|
|
|
|
|
|
|
%%
|
|
|
|
|
2018-04-17 15:49:17 +00:00
|
|
|
[[:blank:]\n]+ {
|
|
|
|
/* Ignore whitespace, unless set elements are being parsed. Perhaps it
|
|
|
|
* should have used commas from the beginning, but now we are stuck with
|
|
|
|
* whitespace as separators. */
|
|
|
|
if (yyextra->in_set) {
|
2021-10-26 09:09:36 +00:00
|
|
|
return set_lval_str(TOKEN_WHITESPACE, " ");
|
2018-04-17 15:49:17 +00:00
|
|
|
}
|
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
|
2021-10-01 10:31:20 +00:00
|
|
|
"(" return SIMPLE(TOKEN_LPAREN);
|
|
|
|
")" return SIMPLE(TOKEN_RPAREN);
|
2021-10-27 17:20:38 +00:00
|
|
|
|
|
|
|
[[:blank:]\n]*","[[:blank:]\n]* {
|
|
|
|
return set_lval_str(TOKEN_COMMA, ",");
|
|
|
|
}
|
2018-04-17 15:49:17 +00:00
|
|
|
|
|
|
|
"{"[[:blank:]\n]* {
|
|
|
|
yyextra->in_set = TRUE;
|
2021-10-26 09:09:36 +00:00
|
|
|
return set_lval_str(TOKEN_LBRACE, "{");
|
|
|
|
}
|
|
|
|
|
|
|
|
[[:blank:]\n]*".."[[:blank:]\n]* {
|
|
|
|
return set_lval_str(TOKEN_DOTDOT, "..");
|
2018-04-17 15:49:17 +00:00
|
|
|
}
|
2021-10-26 09:09:36 +00:00
|
|
|
|
2018-04-17 15:49:17 +00:00
|
|
|
[[:blank:]\n]*"}" {
|
|
|
|
yyextra->in_set = FALSE;
|
2021-10-26 09:09:36 +00:00
|
|
|
return set_lval_str(TOKEN_RBRACE, "}");
|
2018-04-17 15:49:17 +00:00
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
|
dfilter: Fix "!=" relation to be free of contradictions
Wireshark defines the relation of equality A == B as
A any_eq B <=> An == Bn for at least one An, Bn.
More accurately I think this is (formally) an equivalence
relation, not true equality.
Whichever definition for "==" we choose we must keep the
definition of "!=" as !(A == B), otherwise it will
lead to logical contradictions like (A == B) AND (A != B)
being true.
Fix the '!=' relation to match the definition of equality:
A != B <=> !(A == B) <=> A all_ne B <=> An != Bn, for
every n.
This has been the recomended way to write "not equal" for a
long time in the documentation, even to the point where != was
deprecated, but it just wasn't implemented consistently in the
language, which has understandably been a persistent source
of confusion. Even a field that is normally well-behaved
with "!=" like "ip.src" or "ip.dst" will produce unexpected
results with encapsulations like IP-over-IP.
The opcode ALL_NE could have been implemented in the compiler
instead using NOT and ANY_EQ but I chose to implement it in
bytecode. It just seemed more elegant and efficient
but the difference was not very significant.
Keep around "~=" for any_ne relation, in case someone depends
on that, and because we don't have an operator for true equality:
A strict_equal B <=> A all_eq B <=> !(A any_ne B).
If there is only one value then any_ne and all_ne are the same
comparison operation.
Implementing this change did not require fixing any tests so it
is unlikely the relation "~=" (any_ne) will be very useful.
Note that the behaviour of the '<' (less than) comparison relation
is a separate, more subtle issue. In the general case the definition
of '<' that is used is only a partial order.
2021-10-18 20:07:06 +00:00
|
|
|
"==" return SIMPLE(TOKEN_TEST_ANY_EQ);
|
|
|
|
"eq" return SIMPLE(TOKEN_TEST_ANY_EQ);
|
|
|
|
"!=" return SIMPLE(TOKEN_TEST_ALL_NE);
|
|
|
|
"ne" return SIMPLE(TOKEN_TEST_ALL_NE);
|
|
|
|
"~=" return SIMPLE(TOKEN_TEST_ANY_NE);
|
|
|
|
"any_ne" return SIMPLE(TOKEN_TEST_ANY_NE);
|
2021-10-01 10:31:20 +00:00
|
|
|
">" return SIMPLE(TOKEN_TEST_GT);
|
|
|
|
"gt" return SIMPLE(TOKEN_TEST_GT);
|
|
|
|
">=" return SIMPLE(TOKEN_TEST_GE);
|
|
|
|
"ge" return SIMPLE(TOKEN_TEST_GE);
|
|
|
|
"<" return SIMPLE(TOKEN_TEST_LT);
|
|
|
|
"lt" return SIMPLE(TOKEN_TEST_LT);
|
|
|
|
"<=" return SIMPLE(TOKEN_TEST_LE);
|
|
|
|
"le" return SIMPLE(TOKEN_TEST_LE);
|
|
|
|
"bitwise_and" return SIMPLE(TOKEN_TEST_BITWISE_AND);
|
|
|
|
"&" return SIMPLE(TOKEN_TEST_BITWISE_AND);
|
|
|
|
"contains" return SIMPLE(TOKEN_TEST_CONTAINS);
|
|
|
|
"~" return SIMPLE(TOKEN_TEST_MATCHES);
|
|
|
|
"matches" return SIMPLE(TOKEN_TEST_MATCHES);
|
|
|
|
"!" return SIMPLE(TOKEN_TEST_NOT);
|
|
|
|
"not" return SIMPLE(TOKEN_TEST_NOT);
|
|
|
|
"&&" return SIMPLE(TOKEN_TEST_AND);
|
|
|
|
"and" return SIMPLE(TOKEN_TEST_AND);
|
|
|
|
"||" return SIMPLE(TOKEN_TEST_OR);
|
|
|
|
"or" return SIMPLE(TOKEN_TEST_OR);
|
|
|
|
"in" return SIMPLE(TOKEN_TEST_IN);
|
2001-02-01 20:21:25 +00:00
|
|
|
|
2021-10-25 20:27:40 +00:00
|
|
|
"[" {
|
|
|
|
BEGIN(RANGE);
|
2021-10-01 10:31:20 +00:00
|
|
|
return SIMPLE(TOKEN_LBRACKET);
|
2001-02-01 20:21:25 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 20:27:40 +00:00
|
|
|
<RANGE>[^],]+ {
|
|
|
|
return set_lval_str(TOKEN_RANGE, yytext);
|
2001-03-02 17:04:25 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 20:27:40 +00:00
|
|
|
<RANGE>"," {
|
2021-10-01 10:31:20 +00:00
|
|
|
return SIMPLE(TOKEN_COMMA);
|
2001-03-02 17:04:25 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 20:27:40 +00:00
|
|
|
<RANGE>"]" {
|
2001-02-01 20:21:25 +00:00
|
|
|
BEGIN(INITIAL);
|
2021-10-01 10:31:20 +00:00
|
|
|
return SIMPLE(TOKEN_RBRACKET);
|
2001-02-01 20:21:25 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 20:27:40 +00:00
|
|
|
<RANGE><<EOF>> {
|
|
|
|
dfilter_fail(yyextra->dfw, "The right bracket was missing from a slice.");
|
|
|
|
return SCAN_FAILED;
|
2004-06-03 07:17:24 +00:00
|
|
|
}
|
|
|
|
|
2021-05-30 02:38:12 +00:00
|
|
|
[rR]{0,1}\042 {
|
2016-09-19 01:48:50 +00:00
|
|
|
/* start quote of a quoted string */
|
2003-07-25 03:44:05 +00:00
|
|
|
/* The example of how to scan for strings was taken from
|
|
|
|
the flex 2.5.4 manual, from the section "Start Conditions".
|
|
|
|
See:
|
2004-02-11 22:52:54 +00:00
|
|
|
http://www.gnu.org/software/flex/manual/html_node/flex_11.html */
|
2003-07-25 03:44:05 +00:00
|
|
|
|
|
|
|
BEGIN(DQUOTE);
|
2004-01-07 05:24:04 +00:00
|
|
|
/* A previous filter that failed to compile due to
|
|
|
|
a missing end quote will have left quoted_string set
|
|
|
|
to something. Clear it now that we are starting
|
|
|
|
a new quoted string. */
|
2016-03-31 01:44:01 +00:00
|
|
|
if (yyextra->quoted_string) {
|
|
|
|
g_string_free(yyextra->quoted_string, TRUE);
|
2004-01-07 05:24:04 +00:00
|
|
|
/* Don't set quoted_string to NULL, as we
|
|
|
|
do in other quoted_string-cleanup code, as we're
|
|
|
|
about to set it in the next line. */
|
|
|
|
}
|
2016-03-31 01:44:01 +00:00
|
|
|
yyextra->quoted_string = g_string_new("");
|
2021-05-30 02:38:12 +00:00
|
|
|
if (yytext[0] == 'r' || yytext[0] == 'R') {
|
|
|
|
/*
|
|
|
|
* This is a raw string (like in Python). Rules: 1) The two
|
|
|
|
* escape sequences are \\ and \". 2) Backslashes are
|
|
|
|
* preserved. 3) Double quotes in the string must be escaped.
|
|
|
|
* Corollary: Strings cannot end with an odd number of
|
|
|
|
* backslashes.
|
|
|
|
* Example: r"a\b\x12\"\\" is the string (including the implicit NUL terminator)
|
|
|
|
* {'a', '\\', 'b', '\\', 'x', '1', '2', '\\', '"', '\\'. '\\', '\0'}
|
|
|
|
*/
|
|
|
|
yyextra->raw_string = TRUE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
yyextra->raw_string = FALSE;
|
|
|
|
}
|
2003-07-25 03:44:05 +00:00
|
|
|
}
|
|
|
|
|
2004-02-11 22:52:54 +00:00
|
|
|
<DQUOTE><<EOF>> {
|
|
|
|
/* unterminated string */
|
|
|
|
/* The example of how to handle unclosed strings was taken from
|
|
|
|
the flex 2.5.4 manual, from the section "End-of-file rules".
|
|
|
|
See:
|
|
|
|
http://www.gnu.org/software/flex/manual/html_node/flex_13.html */
|
|
|
|
|
2016-03-31 01:44:01 +00:00
|
|
|
dfilter_fail(yyextra->dfw, "The final quote was missing from a quoted string.");
|
2004-02-11 22:52:54 +00:00
|
|
|
return SCAN_FAILED;
|
|
|
|
}
|
|
|
|
|
2007-08-11 22:05:44 +00:00
|
|
|
<DQUOTE>\042 {
|
2003-07-25 03:44:05 +00:00
|
|
|
/* end quote */
|
|
|
|
BEGIN(INITIAL);
|
2021-10-29 12:53:32 +00:00
|
|
|
df_lval->value = g_string_free(yyextra->quoted_string, FALSE);
|
2016-03-31 01:44:01 +00:00
|
|
|
yyextra->quoted_string = NULL;
|
2021-10-29 12:53:32 +00:00
|
|
|
return TOKEN_STRING;
|
2003-07-25 03:44:05 +00:00
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
|
2003-07-25 03:44:05 +00:00
|
|
|
<DQUOTE>\\[0-7]{1,3} {
|
|
|
|
/* octal sequence */
|
2021-05-30 02:38:12 +00:00
|
|
|
if (yyextra->raw_string) {
|
|
|
|
g_string_append(yyextra->quoted_string, yytext);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
unsigned long result;
|
|
|
|
result = strtoul(yytext + 1, NULL, 8);
|
dfilter: Disallow embedded NUL bytes in regular strings
When byte escape sequences, that is hex \xhh or octal \0ddd,
are interpreted at the lexical level it is not possible to
use strings with embedded NUL bytes. The NUL byte is interpreted
as a C string terminator. As a consequence, for example, the
strings "AB" and "AB\x00CDE" compare as the same. This leads to
unexpected false matches and a poor user experience.
Disallow embedded NULs for regular strings (strings literals that
do not begin with 'r' or 'R') for this reason.
It is possible to use a raw string instead (eg: r"AB\x00C")
to match embedded NUL bytes, although that only works with regular
expressions. Normal escape rules would also work with regular
expressions (eg: "AB\\x00C"). This is the same string as the previous
one, written in an alternate form. What won't work is "AB\x00C", this
string is synctatically invalid.
So the expression: data matches r"AB\x00C"
will match the bytes {'A', 'B', '\0', '\C'}.
However the expression: data contains r"AB\x00C"
won't match the fvalue above. Because the "contains" operator
doesn't compile a regular expression it literally tries to
contains-match the bytes {'A', 'B', '\\', 'x', '0', '0', 'C'}.
Therefore raw strings are very convenient but it is still necessary
to be aware that the matches operator has an extra level of indirection
than other string operators (same as in Python).
Fixes #16156.
2021-05-30 07:40:30 +00:00
|
|
|
if (result == 0) {
|
|
|
|
g_string_free(yyextra->quoted_string, TRUE);
|
|
|
|
yyextra->quoted_string = NULL;
|
|
|
|
dfilter_fail(yyextra->dfw, "%s (NUL byte) cannot be used with a regular string.", yytext);
|
|
|
|
return SCAN_FAILED;
|
|
|
|
}
|
2021-05-30 02:38:12 +00:00
|
|
|
if (result > 0xff) {
|
|
|
|
g_string_free(yyextra->quoted_string, TRUE);
|
|
|
|
yyextra->quoted_string = NULL;
|
|
|
|
dfilter_fail(yyextra->dfw, "%s is larger than 255.", yytext);
|
|
|
|
return SCAN_FAILED;
|
|
|
|
}
|
|
|
|
g_string_append_c(yyextra->quoted_string, (gchar) result);
|
2003-07-25 03:44:05 +00:00
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
}
|
|
|
|
|
2003-07-25 03:44:05 +00:00
|
|
|
<DQUOTE>\\x[[:xdigit:]]{1,2} {
|
|
|
|
/* hex sequence */
|
2021-05-30 02:38:12 +00:00
|
|
|
if (yyextra->raw_string) {
|
|
|
|
g_string_append(yyextra->quoted_string, yytext);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
unsigned long result;
|
|
|
|
result = strtoul(yytext + 2, NULL, 16);
|
dfilter: Disallow embedded NUL bytes in regular strings
When byte escape sequences, that is hex \xhh or octal \0ddd,
are interpreted at the lexical level it is not possible to
use strings with embedded NUL bytes. The NUL byte is interpreted
as a C string terminator. As a consequence, for example, the
strings "AB" and "AB\x00CDE" compare as the same. This leads to
unexpected false matches and a poor user experience.
Disallow embedded NULs for regular strings (strings literals that
do not begin with 'r' or 'R') for this reason.
It is possible to use a raw string instead (eg: r"AB\x00C")
to match embedded NUL bytes, although that only works with regular
expressions. Normal escape rules would also work with regular
expressions (eg: "AB\\x00C"). This is the same string as the previous
one, written in an alternate form. What won't work is "AB\x00C", this
string is synctatically invalid.
So the expression: data matches r"AB\x00C"
will match the bytes {'A', 'B', '\0', '\C'}.
However the expression: data contains r"AB\x00C"
won't match the fvalue above. Because the "contains" operator
doesn't compile a regular expression it literally tries to
contains-match the bytes {'A', 'B', '\\', 'x', '0', '0', 'C'}.
Therefore raw strings are very convenient but it is still necessary
to be aware that the matches operator has an extra level of indirection
than other string operators (same as in Python).
Fixes #16156.
2021-05-30 07:40:30 +00:00
|
|
|
if (result == 0) {
|
|
|
|
g_string_free(yyextra->quoted_string, TRUE);
|
|
|
|
yyextra->quoted_string = NULL;
|
|
|
|
dfilter_fail(yyextra->dfw, "%s (NUL byte) cannot be used with a regular string.", yytext);
|
|
|
|
return SCAN_FAILED;
|
|
|
|
}
|
2021-05-30 02:38:12 +00:00
|
|
|
g_string_append_c(yyextra->quoted_string, (gchar) result);
|
|
|
|
}
|
2003-07-25 03:44:05 +00:00
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
|
|
|
|
|
2003-07-25 03:44:05 +00:00
|
|
|
<DQUOTE>\\. {
|
|
|
|
/* escaped character */
|
2021-05-30 02:38:12 +00:00
|
|
|
if (yyextra->raw_string) {
|
|
|
|
g_string_append(yyextra->quoted_string, yytext);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
g_string_append_c(yyextra->quoted_string, yytext[1]);
|
|
|
|
}
|
2003-07-25 03:44:05 +00:00
|
|
|
}
|
|
|
|
|
2007-08-11 22:05:44 +00:00
|
|
|
<DQUOTE>[^\\\042]+ {
|
2003-07-25 03:44:05 +00:00
|
|
|
/* non-escaped string */
|
2016-03-31 01:44:01 +00:00
|
|
|
g_string_append(yyextra->quoted_string, yytext);
|
2003-07-25 03:44:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-19 01:48:50 +00:00
|
|
|
\047 {
|
|
|
|
/* start quote of a quoted character value */
|
|
|
|
/* The example of how to scan for strings was taken from
|
|
|
|
the Flex manual, from the section "Start Conditions".
|
|
|
|
See:
|
|
|
|
http://flex.sourceforge.net/manual/Start-Conditions.html#Start-Conditions */
|
|
|
|
|
|
|
|
BEGIN(SQUOTE);
|
|
|
|
/* A previous filter that failed to compile due to
|
|
|
|
a missing end quote will have left quoted_string set
|
|
|
|
to something. Clear it now that we are starting
|
|
|
|
a new quoted string. */
|
|
|
|
if (yyextra->quoted_string) {
|
|
|
|
g_string_free(yyextra->quoted_string, TRUE);
|
|
|
|
/* Don't set quoted_string to NULL, as we
|
|
|
|
do in other quoted_string-cleanup code, as we're
|
|
|
|
about to set it in the next line. */
|
|
|
|
}
|
|
|
|
yyextra->quoted_string = g_string_new("'");
|
|
|
|
}
|
|
|
|
|
|
|
|
<SQUOTE><<EOF>> {
|
|
|
|
/* unterminated character value */
|
|
|
|
/* The example of how to handle unclosed strings was taken from
|
|
|
|
the Flex manual, from the section "End-of-file rules".
|
|
|
|
See:
|
|
|
|
http://flex.sourceforge.net/manual/EOF.html#EOF.html */
|
|
|
|
|
|
|
|
dfilter_fail(yyextra->dfw, "The final quote was missing from a character constant.");
|
|
|
|
return SCAN_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
<SQUOTE>\047 {
|
|
|
|
/* end quote */
|
|
|
|
BEGIN(INITIAL);
|
|
|
|
g_string_append_c(yyextra->quoted_string, '\'');
|
2021-10-29 12:53:32 +00:00
|
|
|
df_lval->value = g_string_free(yyextra->quoted_string, FALSE);
|
2016-09-19 01:48:50 +00:00
|
|
|
yyextra->quoted_string = NULL;
|
2021-10-29 12:53:32 +00:00
|
|
|
return TOKEN_CHARCONST;
|
2016-09-19 01:48:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
<SQUOTE>\\. {
|
|
|
|
/* escaped character */
|
|
|
|
g_string_append(yyextra->quoted_string, yytext);
|
|
|
|
}
|
|
|
|
|
|
|
|
<SQUOTE>[^\\\047]+ {
|
|
|
|
/* non-escaped string */
|
|
|
|
g_string_append(yyextra->quoted_string, yytext);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-27 18:19:36 +00:00
|
|
|
/* None of the patterns below can match ".." anywhere in the token string. */
|
|
|
|
|
2021-10-29 10:43:23 +00:00
|
|
|
[-+[:alnum:]_:./]+ {
|
|
|
|
char *s, *value;
|
|
|
|
|
|
|
|
/* Hack (but lesser) to make sure that ".." is interpreted as a token on its own. */
|
|
|
|
if ((s = strstr(yytext, "..")) != NULL) {
|
|
|
|
/* If it starts with ".." it is its own token. */
|
|
|
|
if (yytext[0] == '.' && yytext[1] == '.') {
|
|
|
|
yyless(2);
|
|
|
|
df_lval->value = g_strdup("..");
|
|
|
|
return TOKEN_DOTDOT;
|
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
|
2021-10-29 10:43:23 +00:00
|
|
|
/* Match only the prefix before "..". */
|
|
|
|
*s = '\0';
|
|
|
|
value = g_strdup(yytext);
|
|
|
|
*s = '.'; /* Restore */
|
|
|
|
yyless(strlen(value));
|
|
|
|
df_lval->value = value;
|
|
|
|
return TOKEN_UNPARSED;
|
2018-04-16 11:02:41 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 10:43:23 +00:00
|
|
|
/* It is a variable or a literal value (CIDR, bytes, number, ...). */
|
2021-09-26 21:22:50 +00:00
|
|
|
return set_lval_str(TOKEN_UNPARSED, yytext);
|
2001-02-01 20:21:25 +00:00
|
|
|
}
|
|
|
|
|
2001-06-22 16:29:15 +00:00
|
|
|
. {
|
2021-10-27 18:19:36 +00:00
|
|
|
/* Default */
|
|
|
|
dfilter_fail(yyextra->dfw, "\"%s\" was unexpected in this context.", yytext);
|
|
|
|
return SCAN_FAILED;
|
2001-06-22 16:29:15 +00:00
|
|
|
}
|
2001-02-01 20:21:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
%%
|
|
|
|
|
2018-02-16 07:17:04 +00:00
|
|
|
/*
|
|
|
|
* Turn diagnostics back on, so we check the code that we've written.
|
|
|
|
*/
|
|
|
|
DIAG_ON_FLEX
|
2017-08-18 19:11:47 +00:00
|
|
|
|
2001-02-27 19:23:30 +00:00
|
|
|
static int
|
2021-09-26 21:22:50 +00:00
|
|
|
set_lval_str(int token, const char *token_value)
|
2001-02-01 20:21:25 +00:00
|
|
|
{
|
2021-10-26 09:09:36 +00:00
|
|
|
df_lval->value = g_strdup(token_value);
|
2021-09-26 21:22:50 +00:00
|
|
|
return token;
|
|
|
|
}
|