dect
/
libpcap
Archived
13
0
Fork 0

For Linux, add to the pcap_md structure a pointer to a memory-mapped

region and the size of the region; use that pointer rather than the bp
or buffer member (that means we don't have to worry about
pcap_cleanup_live_common() attempting to free that buffer).  Use the
saved size when unmapping the memory-mapped region.

Use that for Linux USB memory-mapped access as well - and unmap the
memory-mapped region when we close the pcap_t, because we *do* have to
unmap it.
This commit is contained in:
Guy Harris 2009-07-11 11:59:04 -07:00
parent 6f7074d4b0
commit 3efa666174
3 changed files with 33 additions and 34 deletions

View File

@ -133,6 +133,8 @@ struct pcap_md {
u_int packets_read; /* count of packets read with recvfrom() */
bpf_u_int32 oldmode; /* mode to restore when turning monitor mode off */
char *mondevice; /* mac80211 monitor device we created */
u_char *mmapbuf; /* memory-mapped region pointer */
size_t mmapbuflen; /* size of region */
u_int tp_version; /* version of tpacket_hdr for mmaped ring */
u_int tp_hdrlen; /* hdrlen of tpacket_hdr for mmaped ring */
#endif /* linux */

View File

@ -2574,20 +2574,6 @@ prepare_tpacket_socket(pcap_t *handle)
return 1;
}
static void
compute_ring_block(int frame_size, unsigned *block_size, unsigned *frames_per_block)
{
/* compute the minumum block size that will handle this frame.
* The block has to be page size aligned.
* The max block size allowed by the kernel is arch-dependent and
* it's not explicitly checked here. */
*block_size = getpagesize();
while (*block_size < frame_size)
*block_size <<= 1;
*frames_per_block = *block_size/frame_size;
}
static int
create_ring(pcap_t *handle)
{
@ -2603,7 +2589,17 @@ create_ring(pcap_t *handle)
TPACKET_ALIGN(handle->md.tp_hdrlen) +
sizeof(struct sockaddr_ll));
req.tp_frame_nr = handle->opt.buffer_size/req.tp_frame_size;
compute_ring_block(req.tp_frame_size, &req.tp_block_size, &frames_per_block);
/* compute the minumum block size that will handle this frame.
* The block has to be page size aligned.
* The max block size allowed by the kernel is arch-dependent and
* it's not explicitly checked here. */
req.tp_block_size = getpagesize();
while (req.tp_block_size < req.tp_frame_size)
req.tp_block_size <<= 1;
frames_per_block = req.tp_block_size/req.tp_frame_size;
req.tp_block_nr = req.tp_frame_nr / frames_per_block;
/* req.tp_frame_nr is requested to match frames_per_block*req.tp_block_nr */
@ -2632,10 +2628,11 @@ retry:
}
/* memory map the rx ring */
ringsize = req.tp_block_nr * req.tp_block_size;
handle->bp = mmap(0, ringsize, PROT_READ| PROT_WRITE, MAP_SHARED,
handle->fd, 0);
if (handle->bp == MAP_FAILED) {
handle->md.mmapbuflen = req.tp_block_nr * req.tp_block_size;
ringsize = handle->md.mmapbuflen;
handle->md.mmapbuf = mmap(0, handle->md.mmapbuflen,
PROT_READ|PROT_WRITE, MAP_SHARED, handle->fd, 0);
if (handle->md.mmapbuf == MAP_FAILED) {
snprintf(handle->errbuf, PCAP_ERRBUF_SIZE,
"can't mmap rx ring: %s", pcap_strerror(errno));
@ -2659,7 +2656,7 @@ retry:
/* fill the header ring with proper frame ptr*/
handle->offset = 0;
for (i=0; i<req.tp_block_nr; ++i) {
void *base = &handle->bp[i*req.tp_block_size];
void *base = &handle->md.mmapbuf[i*req.tp_block_size];
for (j=0; j<frames_per_block; ++j, ++handle->offset) {
RING_GET_FRAME(handle) = base;
base += req.tp_frame_size;
@ -2682,14 +2679,10 @@ destroy_ring(pcap_t *handle)
(void *) &req, sizeof(req));
/* if ring is mapped, unmap it*/
if (handle->bp) {
/* need to re-compute the ring size */
unsigned frames_per_block, block_size;
compute_ring_block(handle->bufsize, &block_size, &frames_per_block);
/* do not perform sanity check here: we can't recover any error */
munmap(handle->bp, block_size * handle->cc / frames_per_block);
handle->bp = 0;
if (handle->md.mmapbuf) {
/* do not test for mmap failure, as we can't recover from any error */
munmap(handle->md.mmapbuf, handle->md.mmapbuflen);
handle->md.mmapbuf = NULL;
}
}

View File

@ -201,8 +201,10 @@ int usb_mmap(pcap_t* handle)
if (len < 0)
return 0;
handle->buffer = mmap(0, len, PROT_READ, MAP_SHARED, handle->fd, 0);
return handle->buffer != MAP_FAILED;
handle->md.mmapbuflen = len;
handle->md.mmapbuf = mmap(0, handle->md.mmapbuflen, PROT_READ,
MAP_SHARED, handle->fd, 0);
return handle->md.mmapbuf != MAP_FAILED;
}
#define CTRL_TIMEOUT (5*1000) /* milliseconds */
@ -799,7 +801,7 @@ usb_read_linux_mmap(pcap_t *handle, int max_packets, pcap_handler callback, u_ch
nflush = fetch.nfetch;
for (i=0; i<fetch.nfetch; ++i) {
/* discard filler */
hdr = (pcap_usb_header*) &handle->buffer[vec[i]];
hdr = (pcap_usb_header*) &handle->md.mmapbuf[vec[i]];
if (hdr->event_type == '@')
continue;
@ -833,8 +835,10 @@ usb_read_linux_mmap(pcap_t *handle, int max_packets, pcap_handler callback, u_ch
static void
usb_cleanup_linux_mmap(pcap_t* handle)
{
/* buffer must not be freed because it's memory mapped */
/* XXX - does it need to be unmapped? */
handle->buffer = NULL;
/* if we have a memory-mapped buffer, unmap it */
if (handle->md.mmapbuf != NULL) {
munmap(handle->md.mmapbuf, handle->md.mmapbuflen);
handle->md.mmapbuf = NULL;
}
pcap_cleanup_live_common(handle);
}