Archived
14
0
Fork 0

Staging: zram: Rename ramzswap to zram in code

Automated renames in code:
 - rzs* -> zram*
 - RZS* -> ZRAM*
 - ramzswap* -> zram*

Manual changes:
 - Edited comments/messages mentioning "swap"

Signed-off-by: Nitin Gupta <ngupta@vflare.org>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Nitin Gupta 2010-06-01 13:31:25 +05:30 committed by Greg Kroah-Hartman
parent 16a4bfb9e9
commit f1e3cfff4d
3 changed files with 256 additions and 262 deletions

View file

@ -1,5 +1,5 @@
/*
* Compressed RAM based swap device
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
*
@ -12,7 +12,7 @@
* Project home: http://compcache.googlecode.com
*/
#define KMSG_COMPONENT "ramzswap"
#define KMSG_COMPONENT "zram"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
@ -26,35 +26,33 @@
#include <linux/slab.h>
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/vmalloc.h>
#include "zram_drv.h"
/* Globals */
static int ramzswap_major;
static struct ramzswap *devices;
static int zram_major;
static struct zram *devices;
/* Module params (documentation at end) */
static unsigned int num_devices;
static int rzs_test_flag(struct ramzswap *rzs, u32 index,
enum rzs_pageflags flag)
static int zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
return rzs->table[index].flags & BIT(flag);
return zram->table[index].flags & BIT(flag);
}
static void rzs_set_flag(struct ramzswap *rzs, u32 index,
enum rzs_pageflags flag)
static void zram_set_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
rzs->table[index].flags |= BIT(flag);
zram->table[index].flags |= BIT(flag);
}
static void rzs_clear_flag(struct ramzswap *rzs, u32 index,
enum rzs_pageflags flag)
static void zram_clear_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
rzs->table[index].flags &= ~BIT(flag);
zram->table[index].flags &= ~BIT(flag);
}
static int page_zero_filled(void *ptr)
@ -72,50 +70,50 @@ static int page_zero_filled(void *ptr)
return 1;
}
static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
{
if (!rzs->disksize) {
if (!zram->disksize) {
pr_info(
"disk size not provided. You can use disksize_kb module "
"param to specify size.\nUsing default: (%u%% of RAM).\n",
default_disksize_perc_ram
);
rzs->disksize = default_disksize_perc_ram *
zram->disksize = default_disksize_perc_ram *
(totalram_bytes / 100);
}
if (rzs->disksize > 2 * (totalram_bytes)) {
if (zram->disksize > 2 * (totalram_bytes)) {
pr_info(
"There is little point creating a ramzswap of greater than "
"There is little point creating a zram of greater than "
"twice the size of memory since we expect a 2:1 compression "
"ratio. Note that ramzswap uses about 0.1%% of the size of "
"the swap device when not in use so a huge ramzswap is "
"ratio. Note that zram uses about 0.1%% of the size of "
"the disk when not in use so a huge zram is "
"wasteful.\n"
"\tMemory Size: %zu kB\n"
"\tSize you selected: %zu kB\n"
"Continuing anyway ...\n",
totalram_bytes >> 10, rzs->disksize
totalram_bytes >> 10, zram->disksize
);
}
rzs->disksize &= PAGE_MASK;
zram->disksize &= PAGE_MASK;
}
static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
struct ramzswap_ioctl_stats *s)
static void zram_ioctl_get_stats(struct zram *zram,
struct zram_ioctl_stats *s)
{
s->disksize = rzs->disksize;
s->disksize = zram->disksize;
#if defined(CONFIG_RAMZSWAP_STATS)
#if defined(CONFIG_ZRAM_STATS)
{
struct ramzswap_stats *rs = &rzs->stats;
struct zram_stats *rs = &zram->stats;
size_t succ_writes, mem_used;
unsigned int good_compress_perc = 0, no_compress_perc = 0;
mem_used = xv_get_total_size_bytes(rzs->mem_pool)
mem_used = xv_get_total_size_bytes(zram->mem_pool)
+ (rs->pages_expand << PAGE_SHIFT);
succ_writes = rzs_stat64_read(rzs, &rs->num_writes) -
rzs_stat64_read(rzs, &rs->failed_writes);
succ_writes = zram_stat64_read(zram, &rs->num_writes) -
zram_stat64_read(zram, &rs->failed_writes);
if (succ_writes && rs->pages_stored) {
good_compress_perc = rs->good_compress * 100
@ -124,12 +122,12 @@ static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
/ rs->pages_stored;
}
s->num_reads = rzs_stat64_read(rzs, &rs->num_reads);
s->num_writes = rzs_stat64_read(rzs, &rs->num_writes);
s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads);
s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes);
s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io);
s->notify_free = rzs_stat64_read(rzs, &rs->notify_free);
s->num_reads = zram_stat64_read(zram, &rs->num_reads);
s->num_writes = zram_stat64_read(zram, &rs->num_writes);
s->failed_reads = zram_stat64_read(zram, &rs->failed_reads);
s->failed_writes = zram_stat64_read(zram, &rs->failed_writes);
s->invalid_io = zram_stat64_read(zram, &rs->invalid_io);
s->notify_free = zram_stat64_read(zram, &rs->notify_free);
s->pages_zero = rs->pages_zero;
s->good_compress_pct = good_compress_perc;
@ -141,34 +139,34 @@ static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
s->compr_data_size = rs->compr_size;
s->mem_used_total = mem_used;
}
#endif /* CONFIG_RAMZSWAP_STATS */
#endif /* CONFIG_ZRAM_STATS */
}
static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
static void zram_free_page(struct zram *zram, size_t index)
{
u32 clen;
void *obj;
struct page *page = rzs->table[index].page;
u32 offset = rzs->table[index].offset;
struct page *page = zram->table[index].page;
u32 offset = zram->table[index].offset;
if (unlikely(!page)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
*/
if (rzs_test_flag(rzs, index, RZS_ZERO)) {
rzs_clear_flag(rzs, index, RZS_ZERO);
rzs_stat_dec(&rzs->stats.pages_zero);
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
zram_clear_flag(zram, index, ZRAM_ZERO);
zram_stat_dec(&zram->stats.pages_zero);
}
return;
}
if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
clen = PAGE_SIZE;
__free_page(page);
rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED);
rzs_stat_dec(&rzs->stats.pages_expand);
zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_dec(&zram->stats.pages_expand);
goto out;
}
@ -176,16 +174,16 @@ static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
kunmap_atomic(obj, KM_USER0);
xv_free(rzs->mem_pool, page, offset);
xv_free(zram->mem_pool, page, offset);
if (clen <= PAGE_SIZE / 2)
rzs_stat_dec(&rzs->stats.good_compress);
zram_stat_dec(&zram->stats.good_compress);
out:
rzs->stats.compr_size -= clen;
rzs_stat_dec(&rzs->stats.pages_stored);
zram->stats.compr_size -= clen;
zram_stat_dec(&zram->stats.pages_stored);
rzs->table[index].page = NULL;
rzs->table[index].offset = 0;
zram->table[index].page = NULL;
zram->table[index].offset = 0;
}
static void handle_zero_page(struct page *page)
@ -199,14 +197,14 @@ static void handle_zero_page(struct page *page)
flush_dcache_page(page);
}
static void handle_uncompressed_page(struct ramzswap *rzs,
static void handle_uncompressed_page(struct zram *zram,
struct page *page, u32 index)
{
unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0);
cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
rzs->table[index].offset;
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
zram->table[index].offset;
memcpy(user_mem, cmem, PAGE_SIZE);
kunmap_atomic(user_mem, KM_USER0);
@ -215,14 +213,14 @@ static void handle_uncompressed_page(struct ramzswap *rzs,
flush_dcache_page(page);
}
static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
static int zram_read(struct zram *zram, struct bio *bio)
{
int i;
u32 index;
struct bio_vec *bvec;
rzs_stat64_inc(rzs, &rzs->stats.num_reads);
zram_stat64_inc(zram, &zram->stats.num_reads);
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
bio_for_each_segment(bvec, bio, i) {
@ -234,13 +232,13 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
page = bvec->bv_page;
if (rzs_test_flag(rzs, index, RZS_ZERO)) {
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
handle_zero_page(page);
continue;
}
/* Requested page is not present in compressed area */
if (unlikely(!rzs->table[index].page)) {
if (unlikely(!zram->table[index].page)) {
pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
/* Do nothing */
@ -248,16 +246,16 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
}
/* Page is stored uncompressed since it's incompressible */
if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
handle_uncompressed_page(rzs, page, index);
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
handle_uncompressed_page(zram, page, index);
continue;
}
user_mem = kmap_atomic(page, KM_USER0);
clen = PAGE_SIZE;
cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
rzs->table[index].offset;
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
zram->table[index].offset;
ret = lzo1x_decompress_safe(
cmem + sizeof(*zheader),
@ -271,7 +269,7 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n",
ret, index);
rzs_stat64_inc(rzs, &rzs->stats.failed_reads);
zram_stat64_inc(zram, &zram->stats.failed_reads);
goto out;
}
@ -288,13 +286,13 @@ out:
return 0;
}
static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
static int zram_write(struct zram *zram, struct bio *bio)
{
int i;
u32 index;
struct bio_vec *bvec;
rzs_stat64_inc(rzs, &rzs->stats.num_writes);
zram_stat64_inc(zram, &zram->stats.num_writes);
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
@ -307,82 +305,83 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
unsigned char *user_mem, *cmem, *src;
page = bvec->bv_page;
src = rzs->compress_buffer;
src = zram->compress_buffer;
/*
* System overwrites unused sectors. Free memory associated
* with this sector now.
*/
if (rzs->table[index].page ||
rzs_test_flag(rzs, index, RZS_ZERO))
ramzswap_free_page(rzs, index);
if (zram->table[index].page ||
zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index);
mutex_lock(&rzs->lock);
mutex_lock(&zram->lock);
user_mem = kmap_atomic(page, KM_USER0);
if (page_zero_filled(user_mem)) {
kunmap_atomic(user_mem, KM_USER0);
mutex_unlock(&rzs->lock);
rzs_stat_inc(&rzs->stats.pages_zero);
rzs_set_flag(rzs, index, RZS_ZERO);
mutex_unlock(&zram->lock);
zram_stat_inc(&zram->stats.pages_zero);
zram_set_flag(zram, index, ZRAM_ZERO);
continue;
}
ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
rzs->compress_workmem);
zram->compress_workmem);
kunmap_atomic(user_mem, KM_USER0);
if (unlikely(ret != LZO_E_OK)) {
mutex_unlock(&rzs->lock);
mutex_unlock(&zram->lock);
pr_err("Compression failed! err=%d\n", ret);
rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
zram_stat64_inc(zram, &zram->stats.failed_writes);
goto out;
}
/*
* Page is incompressible. Store it as-is (uncompressed)
* since we do not want to return too many swap write
* since we do not want to return too many disk write
* errors which has side effect of hanging the system.
*/
if (unlikely(clen > max_zpage_size)) {
clen = PAGE_SIZE;
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (unlikely(!page_store)) {
mutex_unlock(&rzs->lock);
mutex_unlock(&zram->lock);
pr_info("Error allocating memory for "
"incompressible page: %u\n", index);
rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
zram_stat64_inc(zram,
&zram->stats.failed_writes);
goto out;
}
offset = 0;
rzs_set_flag(rzs, index, RZS_UNCOMPRESSED);
rzs_stat_inc(&rzs->stats.pages_expand);
rzs->table[index].page = page_store;
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand);
zram->table[index].page = page_store;
src = kmap_atomic(page, KM_USER0);
goto memstore;
}
if (xv_malloc(rzs->mem_pool, clen + sizeof(*zheader),
&rzs->table[index].page, &offset,
if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
&zram->table[index].page, &offset,
GFP_NOIO | __GFP_HIGHMEM)) {
mutex_unlock(&rzs->lock);
mutex_unlock(&zram->lock);
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
zram_stat64_inc(zram, &zram->stats.failed_writes);
goto out;
}
memstore:
rzs->table[index].offset = offset;
zram->table[index].offset = offset;
cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
rzs->table[index].offset;
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
zram->table[index].offset;
#if 0
/* Back-reference needed for memory defragmentation */
if (!rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)) {
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
zheader = (struct zobj_header *)cmem;
zheader->table_idx = index;
cmem += sizeof(*zheader);
@ -392,16 +391,16 @@ memstore:
memcpy(cmem, src, clen);
kunmap_atomic(cmem, KM_USER1);
if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
kunmap_atomic(src, KM_USER0);
/* Update stats */
rzs->stats.compr_size += clen;
rzs_stat_inc(&rzs->stats.pages_stored);
zram->stats.compr_size += clen;
zram_stat_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2)
rzs_stat_inc(&rzs->stats.good_compress);
zram_stat_inc(&zram->stats.good_compress);
mutex_unlock(&rzs->lock);
mutex_unlock(&zram->lock);
index++;
}
@ -417,10 +416,10 @@ out:
/*
* Check if request is within bounds and page aligned.
*/
static inline int valid_io_request(struct ramzswap *rzs, struct bio *bio)
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
if (unlikely(
(bio->bi_sector >= (rzs->disksize >> SECTOR_SHIFT)) ||
(bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
(bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
(bio->bi_size & (PAGE_SIZE - 1)))) {
@ -432,160 +431,160 @@ static inline int valid_io_request(struct ramzswap *rzs, struct bio *bio)
}
/*
* Handler function for all ramzswap I/O requests.
* Handler function for all zram I/O requests.
*/
static int ramzswap_make_request(struct request_queue *queue, struct bio *bio)
static int zram_make_request(struct request_queue *queue, struct bio *bio)
{
int ret = 0;
struct ramzswap *rzs = queue->queuedata;
struct zram *zram = queue->queuedata;
if (unlikely(!rzs->init_done)) {
if (unlikely(!zram->init_done)) {
bio_io_error(bio);
return 0;
}
if (!valid_io_request(rzs, bio)) {
rzs_stat64_inc(rzs, &rzs->stats.invalid_io);
if (!valid_io_request(zram, bio)) {
zram_stat64_inc(zram, &zram->stats.invalid_io);
bio_io_error(bio);
return 0;
}
switch (bio_data_dir(bio)) {
case READ:
ret = ramzswap_read(rzs, bio);
ret = zram_read(zram, bio);
break;
case WRITE:
ret = ramzswap_write(rzs, bio);
ret = zram_write(zram, bio);
break;
}
return ret;
}
static void reset_device(struct ramzswap *rzs)
static void reset_device(struct zram *zram)
{
size_t index;
/* Do not accept any new I/O request */
rzs->init_done = 0;
zram->init_done = 0;
/* Free various per-device buffers */
kfree(rzs->compress_workmem);
free_pages((unsigned long)rzs->compress_buffer, 1);
kfree(zram->compress_workmem);
free_pages((unsigned long)zram->compress_buffer, 1);
rzs->compress_workmem = NULL;
rzs->compress_buffer = NULL;
zram->compress_workmem = NULL;
zram->compress_buffer = NULL;
/* Free all pages that are still in this ramzswap device */
for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) {
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
struct page *page;
u16 offset;
page = rzs->table[index].page;
offset = rzs->table[index].offset;
page = zram->table[index].page;
offset = zram->table[index].offset;
if (!page)
continue;
if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
__free_page(page);
else
xv_free(rzs->mem_pool, page, offset);
xv_free(zram->mem_pool, page, offset);
}
vfree(rzs->table);
rzs->table = NULL;
vfree(zram->table);
zram->table = NULL;
xv_destroy_pool(rzs->mem_pool);
rzs->mem_pool = NULL;
xv_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL;
/* Reset stats */
memset(&rzs->stats, 0, sizeof(rzs->stats));
memset(&zram->stats, 0, sizeof(zram->stats));
rzs->disksize = 0;
zram->disksize = 0;
}
static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
static int zram_ioctl_init_device(struct zram *zram)
{
int ret;
size_t num_pages;
if (rzs->init_done) {
if (zram->init_done) {
pr_info("Device already initialized!\n");
return -EBUSY;
}
ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!rzs->compress_workmem) {
zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!zram->compress_workmem) {
pr_err("Error allocating compressor working memory!\n");
ret = -ENOMEM;
goto fail;
}
rzs->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
if (!rzs->compress_buffer) {
zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
goto fail;
}
num_pages = rzs->disksize >> PAGE_SHIFT;
rzs->table = vmalloc(num_pages * sizeof(*rzs->table));
if (!rzs->table) {
pr_err("Error allocating ramzswap address table\n");
num_pages = zram->disksize >> PAGE_SHIFT;
zram->table = vmalloc(num_pages * sizeof(*zram->table));
if (!zram->table) {
pr_err("Error allocating zram address table\n");
/* To prevent accessing table entries during cleanup */
rzs->disksize = 0;
zram->disksize = 0;
ret = -ENOMEM;
goto fail;
}
memset(rzs->table, 0, num_pages * sizeof(*rzs->table));
memset(zram->table, 0, num_pages * sizeof(*zram->table));
set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT);
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
/* ramzswap devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
rzs->mem_pool = xv_create_pool();
if (!rzs->mem_pool) {
zram->mem_pool = xv_create_pool();
if (!zram->mem_pool) {
pr_err("Error creating memory pool\n");
ret = -ENOMEM;
goto fail;
}
rzs->init_done = 1;
zram->init_done = 1;
pr_debug("Initialization done!\n");
return 0;
fail:
reset_device(rzs);
reset_device(zram);
pr_err("Initialization failed: err=%d\n", ret);
return ret;
}
static int ramzswap_ioctl_reset_device(struct ramzswap *rzs)
static int zram_ioctl_reset_device(struct zram *zram)
{
if (rzs->init_done)
reset_device(rzs);
if (zram->init_done)
reset_device(zram);
return 0;
}
static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
static int zram_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret = 0;
size_t disksize_kb;
struct ramzswap *rzs = bdev->bd_disk->private_data;
struct zram *zram = bdev->bd_disk->private_data;
switch (cmd) {
case RZSIO_SET_DISKSIZE_KB:
if (rzs->init_done) {
case ZRAMIO_SET_DISKSIZE_KB:
if (zram->init_done) {
ret = -EBUSY;
goto out;
}
@ -594,14 +593,14 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
ret = -EFAULT;
goto out;
}
rzs->disksize = disksize_kb << 10;
zram->disksize = disksize_kb << 10;
pr_info("Disk size set to %zu kB\n", disksize_kb);
break;
case RZSIO_GET_STATS:
case ZRAMIO_GET_STATS:
{
struct ramzswap_ioctl_stats *stats;
if (!rzs->init_done) {
struct zram_ioctl_stats *stats;
if (!zram->init_done) {
ret = -ENOTTY;
goto out;
}
@ -610,7 +609,7 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
ret = -ENOMEM;
goto out;
}
ramzswap_ioctl_get_stats(rzs, stats);
zram_ioctl_get_stats(zram, stats);
if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
kfree(stats);
ret = -EFAULT;
@ -619,11 +618,11 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
kfree(stats);
break;
}
case RZSIO_INIT:
ret = ramzswap_ioctl_init_device(rzs);
case ZRAMIO_INIT:
ret = zram_ioctl_init_device(zram);
break;
case RZSIO_RESET:
case ZRAMIO_RESET:
/* Do not reset an active device! */
if (bdev->bd_holders) {
ret = -EBUSY;
@ -634,7 +633,7 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
if (bdev)
fsync_bdev(bdev);
ret = ramzswap_ioctl_reset_device(rzs);
ret = zram_ioctl_reset_device(zram);
break;
default:
@ -646,88 +645,88 @@ out:
return ret;
}
void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index)
void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
{
struct ramzswap *rzs;
struct zram *zram;
rzs = bdev->bd_disk->private_data;
ramzswap_free_page(rzs, index);
rzs_stat64_inc(rzs, &rzs->stats.notify_free);
zram = bdev->bd_disk->private_data;
zram_free_page(zram, index);
zram_stat64_inc(zram, &zram->stats.notify_free);
}
static const struct block_device_operations ramzswap_devops = {
.ioctl = ramzswap_ioctl,
.swap_slot_free_notify = ramzswap_slot_free_notify,
static const struct block_device_operations zram_devops = {
.ioctl = zram_ioctl,
.swap_slot_free_notify = zram_slot_free_notify,
.owner = THIS_MODULE
};
static int create_device(struct ramzswap *rzs, int device_id)
static int create_device(struct zram *zram, int device_id)
{
int ret = 0;
mutex_init(&rzs->lock);
spin_lock_init(&rzs->stat64_lock);
mutex_init(&zram->lock);
spin_lock_init(&zram->stat64_lock);
rzs->queue = blk_alloc_queue(GFP_KERNEL);
if (!rzs->queue) {
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
ret = -ENOMEM;
goto out;
}
blk_queue_make_request(rzs->queue, ramzswap_make_request);
rzs->queue->queuedata = rzs;
blk_queue_make_request(zram->queue, zram_make_request);
zram->queue->queuedata = zram;
/* gendisk structure */
rzs->disk = alloc_disk(1);
if (!rzs->disk) {
blk_cleanup_queue(rzs->queue);
zram->disk = alloc_disk(1);
if (!zram->disk) {
blk_cleanup_queue(zram->queue);
pr_warning("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
goto out;
}
rzs->disk->major = ramzswap_major;
rzs->disk->first_minor = device_id;
rzs->disk->fops = &ramzswap_devops;
rzs->disk->queue = rzs->queue;
rzs->disk->private_data = rzs;
snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id);
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->fops = &zram_devops;
zram->disk->queue = zram->queue;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
/* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */
set_capacity(rzs->disk, 0);
/* Actual capacity set using ZRAMIO_SET_DISKSIZE_KB ioctl */
set_capacity(zram->disk, 0);
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
*/
blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE);
blk_queue_logical_block_size(rzs->disk->queue, PAGE_SIZE);
blk_queue_io_min(rzs->disk->queue, PAGE_SIZE);
blk_queue_io_opt(rzs->disk->queue, PAGE_SIZE);
blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
add_disk(rzs->disk);
add_disk(zram->disk);
rzs->init_done = 0;
zram->init_done = 0;
out:
return ret;
}
static void destroy_device(struct ramzswap *rzs)
static void destroy_device(struct zram *zram)
{
if (rzs->disk) {
del_gendisk(rzs->disk);
put_disk(rzs->disk);
if (zram->disk) {
del_gendisk(zram->disk);
put_disk(zram->disk);
}
if (rzs->queue)
blk_cleanup_queue(rzs->queue);
if (zram->queue)
blk_cleanup_queue(zram->queue);
}
static int __init ramzswap_init(void)
static int __init zram_init(void)
{
int ret, dev_id;
@ -738,8 +737,8 @@ static int __init ramzswap_init(void)
goto out;
}
ramzswap_major = register_blkdev(0, "ramzswap");
if (ramzswap_major <= 0) {
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_warning("Unable to get major number\n");
ret = -EBUSY;
goto out;
@ -752,7 +751,7 @@ static int __init ramzswap_init(void)
/* Allocate the device array and initialize each one */
pr_info("Creating %u devices ...\n", num_devices);
devices = kzalloc(num_devices * sizeof(struct ramzswap), GFP_KERNEL);
devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!devices) {
ret = -ENOMEM;
goto unregister;
@ -770,36 +769,36 @@ free_devices:
while (dev_id)
destroy_device(&devices[--dev_id]);
unregister:
unregister_blkdev(ramzswap_major, "ramzswap");
unregister_blkdev(zram_major, "zram");
out:
return ret;
}
static void __exit ramzswap_exit(void)
static void __exit zram_exit(void)
{
int i;
struct ramzswap *rzs;
struct zram *zram;
for (i = 0; i < num_devices; i++) {
rzs = &devices[i];
zram = &devices[i];
destroy_device(rzs);
if (rzs->init_done)
reset_device(rzs);
destroy_device(zram);
if (zram->init_done)
reset_device(zram);
}
unregister_blkdev(ramzswap_major, "ramzswap");
unregister_blkdev(zram_major, "zram");
kfree(devices);
pr_debug("Cleanup done!\n");
}
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of ramzswap devices");
MODULE_PARM_DESC(num_devices, "Number of zram devices");
module_init(ramzswap_init);
module_exit(ramzswap_exit);
module_init(zram_init);
module_exit(zram_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Based Swap Device");
MODULE_DESCRIPTION("Compressed RAM Block Device");

View file

@ -1,5 +1,5 @@
/*
* Compressed RAM based swap device
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
*
@ -12,8 +12,8 @@
* Project home: http://compcache.googlecode.com
*/
#ifndef _RAMZSWAP_DRV_H_
#define _RAMZSWAP_DRV_H_
#ifndef _ZRAM_DRV_H_
#define _ZRAM_DRV_H_
#include <linux/spinlock.h>
#include <linux/mutex.h>
@ -41,7 +41,7 @@ struct zobj_header {
/*-- Configurable parameters */
/* Default ramzswap disk size: 25% of total RAM */
/* Default zram disk size: 25% of total RAM */
static const unsigned default_disksize_perc_ram = 25;
/*
@ -63,23 +63,20 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
/* Flags for ramzswap pages (table[page_no].flags) */
enum rzs_pageflags {
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
/* Page is stored uncompressed */
RZS_UNCOMPRESSED,
ZRAM_UNCOMPRESSED,
/* Page consists entirely of zeros */
RZS_ZERO,
ZRAM_ZERO,
__NR_RZS_PAGEFLAGS,
__NR_ZRAM_PAGEFLAGS,
};
/*-- Data structures */
/*
* Allocated for each swap slot, indexed by page no.
* These table entries must fit exactly in a page.
*/
/* Allocated for each disk page */
struct table {
struct page *page;
u16 offset;
@ -87,17 +84,17 @@ struct table {
u8 flags;
} __attribute__((aligned(4)));
struct ramzswap_stats {
struct zram_stats {
/* basic stats */
size_t compr_size; /* compressed size of pages stored -
* needed to enforce memlimit */
/* more stats */
#if defined(CONFIG_RAMZSWAP_STATS)
#if defined(CONFIG_ZRAM_STATS)
u64 num_reads; /* failed + successful */
u64 num_writes; /* --do-- */
u64 failed_reads; /* should NEVER! happen */
u64 failed_writes; /* can happen when memory is too low */
u64 invalid_io; /* non-swap I/O requests */
u64 invalid_io; /* non-page-aligned I/O requests */
u64 notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
@ -106,7 +103,7 @@ struct ramzswap_stats {
#endif
};
struct ramzswap {
struct zram {
struct xv_pool *mem_pool;
void *compress_workmem;
void *compress_buffer;
@ -118,51 +115,50 @@ struct ramzswap {
struct gendisk *disk;
int init_done;
/*
* This is limit on amount of *uncompressed* worth of data
* we can hold. When backing swap device is provided, it is
* set equal to device size.
* This is the limit on amount of *uncompressed* worth of data
* we can store in a disk.
*/
size_t disksize; /* bytes */
struct ramzswap_stats stats;
struct zram_stats stats;
};
/*-- */
/* Debugging and Stats */
#if defined(CONFIG_RAMZSWAP_STATS)
static void rzs_stat_inc(u32 *v)
#if defined(CONFIG_ZRAM_STATS)
static void zram_stat_inc(u32 *v)
{
*v = *v + 1;
}
static void rzs_stat_dec(u32 *v)
static void zram_stat_dec(u32 *v)
{
*v = *v - 1;
}
static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v)
static void zram_stat64_inc(struct zram *zram, u64 *v)
{
spin_lock(&rzs->stat64_lock);
spin_lock(&zram->stat64_lock);
*v = *v + 1;
spin_unlock(&rzs->stat64_lock);
spin_unlock(&zram->stat64_lock);
}
static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
static u64 zram_stat64_read(struct zram *zram, u64 *v)
{
u64 val;
spin_lock(&rzs->stat64_lock);
spin_lock(&zram->stat64_lock);
val = *v;
spin_unlock(&rzs->stat64_lock);
spin_unlock(&zram->stat64_lock);
return val;
}
#else
#define rzs_stat_inc(v)
#define rzs_stat_dec(v)
#define rzs_stat64_inc(r, v)
#define rzs_stat64_read(r, v)
#endif /* CONFIG_RAMZSWAP_STATS */
#define zram_stat_inc(v)
#define zram_stat_dec(v)
#define zram_stat64_inc(r, v)
#define zram_stat64_read(r, v)
#endif /* CONFIG_ZRAM_STATS */
#endif

View file

@ -1,5 +1,5 @@
/*
* Compressed RAM based swap device
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
*
@ -12,17 +12,16 @@
* Project home: http://compcache.googlecode.com
*/
#ifndef _RAMZSWAP_IOCTL_H_
#define _RAMZSWAP_IOCTL_H_
#ifndef _ZRAM_IOCTL_H_
#define _ZRAM_IOCTL_H_
struct ramzswap_ioctl_stats {
u64 disksize; /* user specified or equal to backing swap
* size (if present) */
struct zram_ioctl_stats {
u64 disksize; /* disksize in bytes (user specifies in KB) */
u64 num_reads; /* failed + successful */
u64 num_writes; /* --do-- */
u64 failed_reads; /* should NEVER! happen */
u64 failed_writes; /* can happen when memory is too low */
u64 invalid_io; /* non-swap I/O requests */
u64 invalid_io; /* non-page-aligned I/O requests */
u64 notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 good_compress_pct; /* no. of pages with compression ratio<=50% */
@ -34,9 +33,9 @@ struct ramzswap_ioctl_stats {
u64 mem_used_total;
} __attribute__ ((packed, aligned(4)));
#define RZSIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
#define RZSIO_GET_STATS _IOR('z', 1, struct ramzswap_ioctl_stats)
#define RZSIO_INIT _IO('z', 2)
#define RZSIO_RESET _IO('z', 3)
#define ZRAMIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
#define ZRAMIO_GET_STATS _IOR('z', 1, struct zram_ioctl_stats)
#define ZRAMIO_INIT _IO('z', 2)
#define ZRAMIO_RESET _IO('z', 3)
#endif