ocf/0004-read_bf-add-per-queue-line-data-cache.patch
Kemeng Shi de38fbd7f8 OCF: enable xcache
This patchset enable xcache. More details can be found in patches log.

Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
2023-01-10 17:12:54 +08:00

288 lines
10 KiB
Diff

From a09c79ba0cb154a0c0c1264bbb89248b7f8e956e Mon Sep 17 00:00:00 2001
From: Kemeng Shi <shikemeng@huawei.com>
Date: Tue, 10 Jan 2023 17:37:32 +0800
Subject: [PATCH 4/4] read_bf: add per-queue line data cache
Add per-queue line data cache to reduce cost from hugepage allocation
for read backfill.
Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
---
src/engine/xcache_engine_common.c | 2 +-
src/engine/xcache_engine_rd.c | 81 +++++++++++++++----------------
src/xcache_queue.c | 24 ++++++++-
src/xcache_queue.h | 6 +++
4 files changed, 68 insertions(+), 45 deletions(-)
diff --git a/src/engine/xcache_engine_common.c b/src/engine/xcache_engine_common.c
index eb1decb..9079cd0 100644
--- a/src/engine/xcache_engine_common.c
+++ b/src/engine/xcache_engine_common.c
@@ -355,7 +355,7 @@ static int xcache_wr_lb_common_end(struct xcache_backdev_io *backdev_io, struct
metadata_clear_valid_sec(cache, line, start_secotr, last_sector);
ocf_metadata_end_collision_shared_access(cache, line);
- ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
return 0;
}
diff --git a/src/engine/xcache_engine_rd.c b/src/engine/xcache_engine_rd.c
index 6ac3b7e..4b64975 100644
--- a/src/engine/xcache_engine_rd.c
+++ b/src/engine/xcache_engine_rd.c
@@ -10,10 +10,14 @@
#include "xcache_engine_common.h"
#include "../xcache_queue.h"
-static void xcache_read_bf_done(ocf_cache_t cache, struct xcache_backdev_io *base_io)
+static inline void xcache_read_bf_done(ocf_cache_t cache, struct xcache_backdev_io *base_io)
{
+ ocf_queue_t q = xcache_io_queue(base_io->xcache_io);
+
xcache_unlock_wr(ocf_cache_line_concurrency(cache), base_io->line);
- xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ xcache_queue_free_line_data(q, cache, base_io->data);
+ base_io->data = NULL;
+ xcache_queue_free_backdev_io(q, base_io);
}
static int xcache_read_bf_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
@@ -26,10 +30,6 @@ static int xcache_read_bf_cb(struct xcache_backdev_io *base_io, struct backdev_i
ocf_cache_log(cache, log_err, "read bf failed\n");
ocf_core_stats_cache_error_update(core, OCF_WRITE);
}
- if (env_atomic_read(&bf_io->remaining) == 1) {
- ctx_data_free(cache->owner, base_io->data);
- }
- base_io->data = NULL;
xcache_read_bf_done(cache, base_io);
return 0;
}
@@ -69,7 +69,7 @@ static ctx_data_t *xcache_get_bf_data(struct xcache_io *io, uint64_t addr, uint6
uint64_t from = addr - start_byte;
ctx_data_t *dst;
- dst = ctx_data_alloc(cache->owner, ((size + PAGE_SIZE - 1) / PAGE_SIZE));
+ dst = xcache_queue_alloc_line_data(xcache_io_queue(io), cache);
if (dst == NULL) {
return NULL;
}
@@ -78,14 +78,7 @@ static ctx_data_t *xcache_get_bf_data(struct xcache_io *io, uint64_t addr, uint6
return dst;
}
-static void xcache_free_bf_data(struct xcache_io *io, ctx_data_t *data)
-{
- ocf_cache_t cache = xcache_io_cache(io);
-
- ctx_data_free(cache->owner, data);
-}
-
-static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, uint64_t size, uint64_t buf_offset)
+static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, uint64_t size)
{
struct xcache_io *bf_io = base_io->xcache_io;
ocf_cache_t cache = xcache_io_cache(bf_io);
@@ -101,7 +94,7 @@ static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, u
cache_addr = cache_line_to_addr(cache, line, xcache_addr_offset(cache, addr));
base_io->end = xcache_read_bf_cb;
- xcache_backdev_submit_io(base_io, true, cache_addr, size, buf_offset, OCF_WRITE);
+ xcache_backdev_submit_io(base_io, true, cache_addr, size, 0, OCF_WRITE);
return 0;
out:
@@ -135,7 +128,7 @@ static void bf_io_end(struct xcache_io *bf_io, int error)
xcache_queue_free_xcache_io(bf_io->io_queue, bf_io);
}
-static struct xcache_io *xcache_get_bf_io(struct xcache_io *ori_io)
+static struct xcache_io *xcache_get_bf_xcache_io(struct xcache_io *ori_io)
{
struct xcache_io *bf_io = xcache_queue_alloc_xcache_io(ori_io->io_queue);
@@ -152,41 +145,54 @@ static struct xcache_io *xcache_get_bf_io(struct xcache_io *ori_io)
return bf_io;
}
-static void xcache_free_bf_io(struct xcache_io *bf_io)
+static void xcache_free_bf_xcache_io(struct xcache_io *bf_io)
{
xcache_queue_free_xcache_io(bf_io->io_queue, bf_io);
}
+static int xcache_submit_read_bf_line(struct xcache_io *io, struct xcache_io *bf_io, uint64_t bf_addr, uint64_t bf_size, ocf_cache_line_t line)
+{
+ ocf_cache_t cache = xcache_io_cache(bf_io);
+ struct xcache_backdev_io *base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+ if (base_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf base_io failed\n");
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ return -1;
+ }
+
+ base_io->data = xcache_get_bf_data(io, bf_addr, bf_size);
+ if (base_io->data == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf_data failed\n");
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(io), base_io);
+ return -1;
+ }
+ base_io->xcache_io = bf_io;
+ base_io->line = line;
+
+ return xcache_do_read_bf(base_io, bf_addr, bf_size);
+}
+
static void xcache_submit_read_bf(struct xcache_io *io, uint64_t addr, uint64_t size)
{
ocf_cache_t cache = xcache_io_cache(io);
ocf_core_t core = xcache_io_core(io);
ocf_core_id_t core_id = ocf_core_get_id(core);
uint64_t line_size = ocf_line_size(cache);
- bool bf_submit = false;
uint64_t core_line_first, core_line_last, core_line;
ocf_cache_line_t line;
uint64_t bf_addr, bf_size;
struct xcache_backdev_io *base_io;
struct xcache_io *bf_io;
- ctx_data_t *bf_data;
- bf_io = xcache_get_bf_io(io);
+ bf_io = xcache_get_bf_xcache_io(io);
if (bf_io == NULL) {
ocf_cache_log(cache, log_err, "alloc bf_io failed\n");
xcache_read_bf_error(io, addr, size);
return;
}
- bf_data = xcache_get_bf_data(io, addr, size);
- if (bf_data == NULL) {
- ocf_cache_log(cache, log_err, "alloc bf_data failed\n");
- xcache_free_bf_io(bf_io);
- xcache_read_bf_error(io, addr, size);
- return;
- }
-
xcache_io_get_line_range(cache, addr, size, &core_line_first, &core_line_last);
bf_addr = addr;
bf_size = xcache_line_to_addr(cache, core_line_first + 1) - bf_addr;
@@ -201,23 +207,12 @@ static void xcache_submit_read_bf(struct xcache_io *io, uint64_t addr, uint64_t
continue;
}
- base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
- if (base_io == NULL) {
- ocf_cache_log(cache, log_err, "alloc bf base_io failed\n");
- xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ if (xcache_submit_read_bf_line(io, bf_io, bf_addr, bf_size, line) != 0) {
+ ocf_cache_log(cache, log_err, "read bf line failed\n");
continue;
}
- base_io->xcache_io = bf_io;
- base_io->line = line;
- base_io->data = bf_data;
- if (xcache_do_read_bf(base_io, bf_addr, bf_size, bf_addr - addr) == 0) {
- bf_submit = true;
- }
}
- if (!bf_submit) {
- xcache_free_bf_data(io, bf_data);
- }
xcache_io_put(bf_io);
}
@@ -327,7 +322,7 @@ static int xcache_read_lb_cb(struct xcache_backdev_io *backdev_io, struct backde
struct xcache_io *io = backdev_io->xcache_io;
ocf_cache_t cache = xcache_io_cache(io);
- ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
return 0;
}
diff --git a/src/xcache_queue.c b/src/xcache_queue.c
index e2c3926..01e0445 100644
--- a/src/xcache_queue.c
+++ b/src/xcache_queue.c
@@ -27,6 +27,7 @@ int xcache_queue_ctx_init(ocf_queue_t queue)
INIT_LIST_HEAD(&queue_ctx->xcache_io_list);
queue_ctx->xcache_io_no = 0;
+ queue_ctx->bf_data_num = 0;
queue->priv1 = (void *)queue_ctx;
return 0;
}
@@ -204,7 +205,6 @@ void ocf_queue_run_single(ocf_queue_t q)
queue_entry_run(get_entry_type(entry), entry);
}
-#define QUEUE_CACHE_SIZE 128
// only called by request in queue to avoid lock
struct xcache_backdev_io *xcache_queue_alloc_backdev_io(ocf_queue_t q)
{
@@ -262,6 +262,28 @@ void xcache_queue_free_xcache_io(ocf_queue_t q, struct xcache_io *io)
queue_ctx->xcache_io_no++;
}
+ctx_data_t *xcache_queue_alloc_line_data(ocf_queue_t q, ocf_cache_t cache)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->bf_data_num > 0) {
+ return queue_ctx->bf_data[--queue_ctx->bf_data_num];
+ } else {
+ return ctx_data_alloc(cache->owner, (ocf_line_size(cache) + PAGE_SIZE - 1) / PAGE_SIZE);
+ }
+}
+
+void xcache_queue_free_line_data(ocf_queue_t q, ocf_cache_t cache, ctx_data_t *data)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->bf_data_num < QUEUE_CACHE_SIZE) {
+ queue_ctx->bf_data[queue_ctx->bf_data_num++] = data;
+ } else {
+ ctx_data_free(cache->owner, data);
+ }
+}
+
static void xcache_queue_push_entry(ocf_queue_t q, struct queue_entry *entry, bool at_head, bool allow_sync, enum entry_type type)
{
ocf_cache_t cache = ocf_queue_get_cache(q);
diff --git a/src/xcache_queue.h b/src/xcache_queue.h
index 3412a2a..9a9fd23 100644
--- a/src/xcache_queue.h
+++ b/src/xcache_queue.h
@@ -3,6 +3,8 @@
#include "ocf/xcache.h"
+#define QUEUE_CACHE_SIZE 128
+
struct xcache_queue_ctx {
struct list_head backdev_io_list;
int backdev_io_no;
@@ -10,6 +12,8 @@ struct xcache_queue_ctx {
int flush_io_no;
struct list_head xcache_io_list;
int xcache_io_no;
+ void *bf_data[QUEUE_CACHE_SIZE];
+ int bf_data_num;
};
int xcache_queue_ctx_init(ocf_queue_t queue);
@@ -26,4 +30,6 @@ void xcache_queue_push_backdev_io_front(struct xcache_backdev_io *base_io, bool
struct xcache_io *xcache_queue_alloc_xcache_io(ocf_queue_t q);
void xcache_queue_free_xcache_io(ocf_queue_t q, struct xcache_io *io);
+ctx_data_t *xcache_queue_alloc_line_data(ocf_queue_t q, ocf_cache_t cache);
+void xcache_queue_free_line_data(ocf_queue_t q, ocf_cache_t cache, ctx_data_t *data);
#endif
--
2.30.0