dect
/
linux-2.6
Archived
13
0
Fork 0

IB/srp: consolidate hot-path variables into cache lines

Put the variables accessed together in the hot-path into common
cachelines, and separate them by RW vs RO to avoid false dirtying.
We keep a local copy of the lkey and rkey in the target to avoid
traversing pointers (and associated cache lines) to find them.

Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: David Dillow <dillowda@ornl.gov>
This commit is contained in:
David Dillow 2010-11-26 15:34:46 -05:00
parent e968467822
commit 9af762719e
2 changed files with 26 additions and 17 deletions

View File

@ -768,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
buf->key = cpu_to_be32(dev->mr->rkey);
buf->key = cpu_to_be32(target->rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) {
@ -793,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->desc_list[i].va =
cpu_to_be64(ib_sg_dma_address(ibdev, sg));
buf->desc_list[i].key =
cpu_to_be32(dev->mr->rkey);
cpu_to_be32(target->rkey);
buf->desc_list[i].len = cpu_to_be32(dma_len);
datalen += dma_len;
}
@ -806,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->table_desc.va =
cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
buf->table_desc.key =
cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
cpu_to_be32(target->rkey);
buf->table_desc.len =
cpu_to_be32(count * sizeof (struct srp_direct_buf));
@ -883,7 +883,7 @@ static int srp_post_send(struct srp_target_port *target,
list.addr = iu->dma;
list.length = len;
list.lkey = target->srp_host->srp_dev->mr->lkey;
list.lkey = target->lkey;
wr.next = NULL;
wr.wr_id = (uintptr_t) iu;
@ -902,7 +902,7 @@ static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
list.addr = iu->dma;
list.length = iu->size;
list.lkey = target->srp_host->srp_dev->mr->lkey;
list.lkey = target->lkey;
wr.next = NULL;
wr.wr_id = (uintptr_t) iu;
@ -1955,6 +1955,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
target->lkey = host->srp_dev->mr->lkey;
target->rkey = host->srp_dev->mr->rkey;
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);

View File

@ -117,6 +117,24 @@ struct srp_request {
};
struct srp_target_port {
/* These are RW in the hot path, and commonly used together */
struct list_head free_tx;
struct list_head free_reqs;
spinlock_t lock;
s32 req_lim;
/* These are read-only in the hot path */
struct ib_cq *send_cq ____cacheline_aligned_in_smp;
struct ib_cq *recv_cq;
struct ib_qp *qp;
u32 lkey;
u32 rkey;
enum srp_target_state state;
/* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines.
*/
__be64 id_ext;
__be64 ioc_guid;
__be64 service_id;
@ -133,23 +151,13 @@ struct srp_target_port {
int path_query_id;
struct ib_cm_id *cm_id;
struct ib_cq *recv_cq;
struct ib_cq *send_cq;
struct ib_qp *qp;
int max_ti_iu_len;
s32 req_lim;
int zero_req_lim;
struct srp_iu *rx_ring[SRP_RQ_SIZE];
spinlock_t lock;
struct list_head free_tx;
struct srp_iu *tx_ring[SRP_SQ_SIZE];
struct list_head free_reqs;
struct srp_iu *rx_ring[SRP_RQ_SIZE];
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
struct work_struct work;
@ -157,7 +165,6 @@ struct srp_target_port {
struct list_head list;
struct completion done;
int status;
enum srp_target_state state;
int qp_in_error;
struct completion tsk_mgmt_done;