OCF: enable xcache

This patchset enable xcache. More details can be found in patches log.

Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
This commit is contained in:
Kemeng Shi 2023-01-09 11:42:13 +08:00
parent ffbc94c2e4
commit de38fbd7f8
5 changed files with 5396 additions and 1 deletions

View File

@ -0,0 +1,621 @@
From 7ea35ad8a7ebb93579b2567dd494d1ac6969fcb0 Mon Sep 17 00:00:00 2001
From: Kemeng Shi <shikemeng@huawei.com>
Date: Tue, 10 Jan 2023 23:06:29 +0800
Subject: [PATCH 1/4] ocf: export function in OCF for further use
1. export ocf_cleanr_run
2. export ocf_hb_id_naked_[un]lock
3. provide more general metadata flush interface .flush_do_asynch_common
4. exrpot some ocf_lru_list operations
5. provide more general alock ocf_io_alock_[un]lock_wr
Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
---
src/cleaning/cleaning.c | 46 +++++---
src/cleaning/cleaning.h | 3 +
src/concurrency/ocf_metadata_concurrency.c | 4 +-
src/concurrency/ocf_metadata_concurrency.h | 7 ++
src/engine/engine_common.c | 4 +-
src/metadata/metadata_raw.c | 87 ++++++++++-----
src/metadata/metadata_raw.h | 40 +++++++
src/ocf_lru.c | 6 +-
src/ocf_lru.h | 7 ++
src/utils/utils_alock.c | 122 +++++++++++++++++++++
src/utils/utils_alock.h | 11 ++
11 files changed, 285 insertions(+), 52 deletions(-)
diff --git a/src/cleaning/cleaning.c b/src/cleaning/cleaning.c
index 94421f0..c7eef8f 100644
--- a/src/cleaning/cleaning.c
+++ b/src/cleaning/cleaning.c
@@ -70,47 +70,57 @@ static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache)
return 1;
}
-static void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+int ocf_cleaner_run_prepare(ocf_cleaner_t cleaner, ocf_queue_t queue)
{
ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
- ocf_mngt_cache_unlock(cache);
- ocf_queue_put(cleaner->io_queue);
- cleaner->end(cleaner, interval);
-}
-
-void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
-{
- ocf_cache_t cache;
-
- OCF_CHECK_NULL(cleaner);
- OCF_CHECK_NULL(queue);
-
- cache = ocf_cleaner_get_cache(cleaner);
-
/* Do not involve cleaning when cache is not running
* (error, etc.).
*/
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
ocf_mngt_cache_is_locked(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
/* Sleep in case there is management operation in progress. */
if (ocf_mngt_cache_trylock(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
ocf_mngt_cache_unlock(cache);
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
ocf_queue_get(queue);
cleaner->io_queue = queue;
+ return 0;
+}
+
+void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+{
+ ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
+
+ ocf_mngt_cache_unlock(cache);
+ ocf_queue_put(cleaner->io_queue);
+ cleaner->end(cleaner, interval);
+}
+
+void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
+{
+ ocf_cache_t cache;
+
+ OCF_CHECK_NULL(cleaner);
+ OCF_CHECK_NULL(queue);
+
+ if (ocf_cleaner_run_prepare(cleaner, queue) != 0) {
+ return;
+ }
+
+ cache = ocf_cleaner_get_cache(cleaner);
ocf_cleaning_perform_cleaning(cache, ocf_cleaner_run_complete);
}
diff --git a/src/cleaning/cleaning.h b/src/cleaning/cleaning.h
index 007dac0..f514393 100644
--- a/src/cleaning/cleaning.h
+++ b/src/cleaning/cleaning.h
@@ -53,4 +53,7 @@ void ocf_kick_cleaner(ocf_cache_t cache);
void ocf_stop_cleaner(ocf_cache_t cache);
+int ocf_cleaner_run_prepare(ocf_cleaner_t cleaner, ocf_queue_t queue);
+void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval);
+
#endif
diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c
index 52059a0..794e27f 100644
--- a/src/concurrency/ocf_metadata_concurrency.c
+++ b/src/concurrency/ocf_metadata_concurrency.c
@@ -212,7 +212,7 @@ void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock,
number. Preffered way to lock multiple hash buckets is to use
request lock rountines ocf_req_hash_(un)lock_(rd/wr).
*/
-static inline void ocf_hb_id_naked_lock(
+void ocf_hb_id_naked_lock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
@@ -226,7 +226,7 @@ static inline void ocf_hb_id_naked_lock(
ENV_BUG();
}
-static inline void ocf_hb_id_naked_unlock(
+void ocf_hb_id_naked_unlock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h
index 97262d8..43f4d1d 100644
--- a/src/concurrency/ocf_metadata_concurrency.h
+++ b/src/concurrency/ocf_metadata_concurrency.h
@@ -177,4 +177,11 @@ void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_loc
uint32_t page);
void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock,
uint32_t page);
+
+void ocf_hb_id_naked_lock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw);
+void ocf_hb_id_naked_unlock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw);
#endif
diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c
index a789b13..5b30250 100644
--- a/src/engine/engine_common.c
+++ b/src/engine/engine_common.c
@@ -584,7 +584,7 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
req->info.hit_no, req->core_line_count);
}
-void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
+void __attribute__((weak)) ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
@@ -614,7 +614,7 @@ void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
ocf_queue_kick(q, allow_sync);
}
-void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
+void __attribute__((weak)) ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c
index 15287e7..efef69c 100644
--- a/src/metadata/metadata_raw.c
+++ b/src/metadata/metadata_raw.c
@@ -349,6 +349,8 @@ struct _raw_ram_flush_ctx {
struct ocf_metadata_raw *raw;
struct ocf_request *req;
ocf_req_end_t complete;
+ void *io;
+ ocf_metadata_io_ctx_end_t io_end;
env_atomic flush_req_cnt;
int error;
};
@@ -369,8 +371,7 @@ static void _raw_ram_flush_do_asynch_io_complete(ocf_cache_t cache,
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
/* Call metadata flush completed call back */
- ctx->req->error |= ctx->error;
- ctx->complete(ctx->req, ctx->error);
+ ctx->io_end(ctx->io, ctx->error);
env_free(ctx);
}
@@ -429,17 +430,17 @@ int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
return 0;
}
-static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
+static void __raw_ram_flush_do_asynch_add_pages(struct ocf_metadata_line_getter *getter,
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
- int *pages_to_flush) {
+ int *pages_to_flush)
+{
int i, j = 0;
- int line_no = req->core_line_count;
- struct ocf_map_info *map;
+ int line_no = line_getter_line_num(getter);
+ ocf_cache_line_t line;
for (i = 0; i < line_no; i++) {
- map = &req->map[i];
- if (map->flush) {
- pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
+ if (line_getter_flush_line(getter, i, &line)) {
+ pages_tab[j] = _RAW_RAM_PAGE(raw, line);
j++;
}
}
@@ -447,37 +448,31 @@ static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
*pages_to_flush = j;
}
-static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
- struct ocf_request *req, struct ocf_metadata_raw *raw,
- ocf_req_end_t complete)
+static int _raw_ram_flush_asynch_common(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
{
int result = 0, i;
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
uint32_t *pages_tab;
- int line_no = req->core_line_count;
+ int line_no = line_getter_line_num(line_getter);
int pages_to_flush;
uint32_t start_page = 0;
uint32_t count = 0;
struct _raw_ram_flush_ctx *ctx;
- ENV_BUG_ON(!complete);
+ ENV_BUG_ON(!io_ctx->io_end);
OCF_DEBUG_TRACE(cache);
- if (!req->info.flush_metadata) {
- /* Nothing to flush call flush callback */
- complete(req, 0);
- return 0;
- }
-
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
- complete(req, -OCF_ERR_NO_MEM);
+ io_context_end(io_ctx, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
- ctx->req = req;
- ctx->complete = complete;
+ ctx->io = io_ctx->io;
+ ctx->io_end = io_ctx->io_end;
ctx->raw = raw;
env_atomic_set(&ctx->flush_req_cnt, 1);
@@ -487,7 +482,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
if (!pages_tab) {
env_free(ctx);
- complete(req, -OCF_ERR_NO_MEM);
+ io_context_end(io_ctx, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
}
@@ -496,7 +491,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
* to prevent freeing of asynchronous context
*/
- __raw_ram_flush_do_asynch_add_pages(req, pages_tab, raw,
+ __raw_ram_flush_do_asynch_add_pages(line_getter, pages_tab, raw,
&pages_to_flush);
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
@@ -526,9 +521,9 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
env_atomic_inc(&ctx->flush_req_cnt);
- result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
+ result |= metadata_io_write_i_asynch(cache, io_ctx->queue, ctx,
raw->ssd_pages_offset + start_page, count,
- req->ioi.io.flags,
+ io_ctx->io_flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);
@@ -547,6 +542,43 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
return result;
}
+static int req_line_num(void *getter)
+{
+ struct ocf_request *req = (struct ocf_request *)getter;
+
+ return req->core_line_count;
+}
+
+static bool req_flush_line(void *getter, int index, ocf_cache_line_t *line)
+{
+ struct ocf_request *req = (struct ocf_request *)getter;
+
+ if (!req->map[index].flush) {
+ return false;
+ }
+
+ *line = req->map[index].coll_idx;
+ return true;
+}
+
+static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete)
+{
+ struct ocf_metadata_io_context io_ctx = {
+ .io = (void *)req,
+ .io_flags = req->ioi.io.flags,
+ .io_end = (ocf_metadata_io_ctx_end_t)complete,
+ .queue = req->io_queue,
+ };
+ struct ocf_metadata_line_getter line_getter = {
+ .getter = (void *)req,
+ .get_line_num = req_line_num,
+ .get_flush_line = req_flush_line,
+ };
+ return _raw_ram_flush_asynch_common(cache, raw, &io_ctx, &line_getter);
+}
+
/*******************************************************************************
* RAW Interfaces definitions
******************************************************************************/
@@ -566,6 +598,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
.flush_do_asynch = _raw_ram_flush_do_asynch,
+ .flush_do_asynch_common = _raw_ram_flush_asynch_common
},
[metadata_raw_type_dynamic] = {
.init = raw_dynamic_init,
diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h
index 0357774..57f7b75 100644
--- a/src/metadata/metadata_raw.h
+++ b/src/metadata/metadata_raw.h
@@ -93,6 +93,35 @@ struct ocf_metadata_raw {
struct ocf_alock *mio_conc;
};
+struct ocf_metadata_line_getter {
+ void *getter;
+ int (*get_line_num)(void *getter);
+ bool (*get_flush_line)(void *getter, int index, ocf_cache_line_t *line);
+};
+
+static inline int line_getter_line_num(struct ocf_metadata_line_getter *getter)
+{
+ return getter->get_line_num(getter->getter);
+}
+
+static inline bool line_getter_flush_line(struct ocf_metadata_line_getter *getter,
+ int index, ocf_cache_line_t *line)
+{
+ return getter->get_flush_line(getter->getter, index, line);
+}
+
+typedef void (*ocf_metadata_io_ctx_end_t)(void *io, int error);
+struct ocf_metadata_io_context {
+ void *io;
+ int io_flags;
+ ocf_queue_t queue;
+ ocf_metadata_io_ctx_end_t io_end;
+};
+
+static inline void io_context_end(struct ocf_metadata_io_context *ctx, int error)
+{
+ ctx->io_end(ctx->io, error);
+}
/**
* RAW container interface
*/
@@ -137,8 +166,12 @@ struct raw_iface {
int (*flush_do_asynch)(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
+
+ int (*flush_do_asynch_common)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ struct ocf_metadata_io_context *io_ctx, struct ocf_metadata_line_getter *line_getter);
};
+
/**
* @brief Initialize RAW instance
*
@@ -288,6 +321,13 @@ static inline int ocf_metadata_raw_flush_do_asynch(ocf_cache_t cache,
return raw->iface->flush_do_asynch(cache, req, raw, complete);
}
+static inline int ocf_metadata_raw_flush_do_asynch_common(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
+{
+ return raw->iface->flush_do_asynch_common(cache, raw, io_ctx, line_getter);
+}
+
/*
* Check if line is valid for specified RAW descriptor
*/
diff --git a/src/ocf_lru.c b/src/ocf_lru.c
index e9c3882..8e323c0 100644
--- a/src/ocf_lru.c
+++ b/src/ocf_lru.c
@@ -221,7 +221,7 @@ void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node->next = end_marker;
}
-static struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
+struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t lru_idx, bool clean)
{
if (part->id == PARTITION_FREELIST)
@@ -261,7 +261,7 @@ static inline void ocf_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
add_lru_head(cache, dst_list, cline);
}
-static void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
+void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
@@ -658,7 +658,7 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_cleaner_fire(cache, &attribs);
}
-static void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
+void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
ocf_core_t core;
diff --git a/src/ocf_lru.h b/src/ocf_lru.h
index a71b5fd..ae50b5e 100644
--- a/src/ocf_lru.h
+++ b/src/ocf_lru.h
@@ -33,4 +33,11 @@ void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
uint32_t ocf_lru_num_free(ocf_cache_t cache);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
+void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
+ struct ocf_part *src_part, struct ocf_part *dst_part);
+struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
+ uint32_t lru_idx, bool clean);
+void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
+ ocf_core_id_t core_id, ocf_part_id_t part_id);
+
#endif
diff --git a/src/utils/utils_alock.c b/src/utils/utils_alock.c
index 25f41a6..183682a 100644
--- a/src/utils/utils_alock.c
+++ b/src/utils/utils_alock.c
@@ -799,3 +799,125 @@ uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);
}
+
+int ocf_io_alock_lock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry,
+ ocf_io_lock_prepare_wait prepare_wait_fn,
+ ocf_io_lock_prepare_wake prepare_wake_fn,
+ void *io)
+{
+ struct ocf_alock_waiter *waiter;
+ unsigned long flags = 0;
+ int ret = OCF_LOCK_NOT_ACQUIRED;
+
+ if (ocf_alock_trylock_entry_wr(alock, entry)) {
+ return OCF_LOCK_ACQUIRED;
+ }
+
+ ocf_alock_waitlist_lock(alock, entry, flags);
+
+ /* At the moment list is protected, double check if the cache entry is
+ * unlocked
+ */
+ if (ocf_alock_trylock_entry_wr(alock, entry)) {
+ ret = OCF_LOCK_ACQUIRED;
+ goto unlock;
+ }
+
+ waiter = env_allocator_new(alock->allocator);
+ if (!waiter) {
+ ret = -OCF_ERR_NO_MEM;
+ goto unlock;
+ }
+
+ /* Setup waiters filed */
+ waiter->entry = entry;
+ waiter->req = (struct ocf_request *)io;
+ waiter->cmpl = prepare_wake_fn;
+ waiter->rw = OCF_WRITE;
+ INIT_LIST_HEAD(&waiter->item);
+
+ prepare_wait_fn(io);
+ /* Add to waiters list */
+ ocf_alock_waitlist_add(alock, entry, waiter);
+
+unlock:
+ ocf_alock_waitlist_unlock(alock, entry, flags);
+
+ return ret;
+}
+
+static inline void ocf_io_alock_unlock_wr_common(struct ocf_alock *alock,
+ const ocf_cache_line_t entry)
+{
+ bool locked = false;
+ bool exchanged = true;
+
+ uint32_t idx = _WAITERS_LIST_ITEM(entry);
+ struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
+ struct ocf_alock_waiter *waiter;
+
+ struct list_head *iter, *next;
+
+ /*
+ * Lock exchange scenario
+ * 1. WR -> IDLE
+ * 2. WR -> RD
+ * 3. WR -> WR
+ */
+
+ /* Check is requested page is on the list */
+ list_for_each_safe(iter, next, &lst->head) {
+ waiter = list_entry(iter, struct ocf_alock_waiter, item);
+
+ if (entry != waiter->entry)
+ continue;
+
+ if (exchanged) {
+ if (waiter->rw == OCF_WRITE)
+ locked = ocf_alock_trylock_entry_wr2wr(alock, entry);
+ else if (waiter->rw == OCF_READ)
+ locked = ocf_alock_trylock_entry_wr2rd(alock, entry);
+ else
+ ENV_BUG();
+ } else {
+ if (waiter->rw == OCF_WRITE)
+ locked = ocf_alock_trylock_entry_wr(alock, entry);
+ else if (waiter->rw == OCF_READ)
+ locked = ocf_alock_trylock_entry_rd(alock, entry);
+ else
+ ENV_BUG();
+ }
+
+ if (locked) {
+ exchanged = false;
+ list_del(iter);
+
+ waiter->cmpl(waiter->req);
+
+ env_allocator_del(alock->allocator, waiter);
+ } else {
+ break;
+ }
+ }
+
+ if (exchanged) {
+ /* No exchange, no waiters on the list, unlock and return
+ * WR -> IDLE
+ */
+ ocf_alock_unlock_entry_wr(alock, entry);
+ }
+}
+
+void ocf_io_alock_unlock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry)
+{
+ unsigned long flags = 0;
+
+ OCF_DEBUG_CACHE(alock->cache, "Cache entry unlock one wr = %u", entry);
+
+ /* Lock waiters list */
+ ocf_alock_waitlist_lock(alock, entry, flags);
+ ocf_io_alock_unlock_wr_common(alock, entry);
+ ocf_alock_waitlist_unlock(alock, entry, flags);
+}
diff --git a/src/utils/utils_alock.h b/src/utils/utils_alock.h
index 2d3df97..3670c25 100644
--- a/src/utils/utils_alock.h
+++ b/src/utils/utils_alock.h
@@ -87,4 +87,15 @@ void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
ocf_cache_line_t entry);
+typedef void (*ocf_io_lock_prepare_wait)(void *io);
+typedef void (*ocf_io_lock_prepare_wake)(void *io);
+int ocf_io_alock_lock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry,
+ ocf_io_lock_prepare_wait prepare_wait_fn,
+ ocf_io_lock_prepare_wake prepare_wake_fn,
+ void *io);
+
+void ocf_io_alock_unlock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry);
+
#endif
--
2.30.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,679 @@
From 4d7ec05fe2cf796d499328546ba1057d08b315c1 Mon Sep 17 00:00:00 2001
From: Kemeng Shi <shikemeng@huawei.com>
Date: Tue, 10 Jan 2023 17:52:04 +0800
Subject: [PATCH 3/4] qos: add load balance
add load balance to offload IO from cache device to backdev device when
cache device is busy.
Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
---
inc/xcache_io.h | 22 ++++-
src/engine/xcache_engine_common.c | 82 +++++++++++++++--
src/engine/xcache_engine_common.h | 63 +++++++++++++
src/engine/xcache_engine_rd.c | 24 +++++
src/engine/xcache_engine_wb.c | 14 +++
src/engine/xcache_engine_wt.c | 8 ++
src/qos/qos.c | 6 ++
src/qos/qos.h | 27 ++++++
src/qos/qos_lb.h | 143 ++++++++++++++++++++++++++++++
src/xcache.c | 1 +
src/xcache.h | 3 +
src/xcache_ocf_core.c | 4 +
12 files changed, 386 insertions(+), 11 deletions(-)
create mode 100644 src/qos/qos.c
create mode 100644 src/qos/qos.h
create mode 100644 src/qos/qos_lb.h
diff --git a/inc/xcache_io.h b/inc/xcache_io.h
index a8acb67..833a73c 100644
--- a/inc/xcache_io.h
+++ b/inc/xcache_io.h
@@ -2,6 +2,7 @@
#define XCACHE_IO_H__
#include "ocf_env.h"
+#include "ocf/ocf.h"
enum entry_type {
XCACHE_IO_ENTRY,
@@ -49,6 +50,11 @@ struct backdev_io_end_arg {
struct xcache_backdev_io;
typedef int (*backdev_io_end_fn)(struct xcache_backdev_io *io, struct backdev_io_end_arg *arg);
typedef void (*backdev_io_res_fn)(struct xcache_backdev_io *io);
+enum xcache_dir {
+ XCACHE_RD = 0,
+ XCACHE_WR,
+ XCACHE_FLUSH,
+};
struct xcache_backdev_io {
// queue_entry
enum entry_type type;
@@ -60,9 +66,19 @@ struct xcache_backdev_io {
ocf_cache_line_t line;
ctx_data_t *data;
- backdev_io_res_fn io_res;
- uint64_t addr;
- uint64_t size;
+ union {
+ /* for xcache lock */
+ struct {
+ backdev_io_res_fn io_res;
+ uint64_t addr;
+ uint64_t size;
+ };
+ /* for io_end callback */
+ struct {
+ int dev;
+ enum xcache_dir dir;
+ };
+ };
void *priv;
};
diff --git a/src/engine/xcache_engine_common.c b/src/engine/xcache_engine_common.c
index f1bf022..eb1decb 100644
--- a/src/engine/xcache_engine_common.c
+++ b/src/engine/xcache_engine_common.c
@@ -4,6 +4,7 @@
#include "../utils/utils_cache_line.h"
#include "../metadata/metadata.h"
+#include "../xcache.h"
#include "xcache_engine_common.h"
#include "../xcache_lru.h"
#include "../xcache_queue.h"
@@ -342,6 +343,55 @@ int xcache_foreach_line(struct xcache_io_context *ctx, xcache_line_handle_func f
return 0;
}
+static int xcache_wr_lb_common_end(struct xcache_backdev_io *backdev_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_cache_line_t line = backdev_io->line;
+ uint8_t start_secotr, last_sector;
+
+ xcache_get_sectors_range(cache, cb_arg->addr, cb_arg->size, &start_secotr, &last_sector);
+ ocf_metadata_start_collision_shared_access(cache, line);
+ metadata_clear_valid_sec(cache, line, start_secotr, last_sector);
+ ocf_metadata_end_collision_shared_access(cache, line);
+
+ ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
+ return 0;
+}
+
+void xcache_wr_lb_common(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t buf_offset = addr - start_addr;
+
+ backdev_io->line = line;
+ backdev_io->end = xcache_wr_lb_common_end;
+ xcache_backdev_submit_io(backdev_io, false, addr, size, buf_offset, OCF_WRITE);
+}
+
+static inline bool xcache_engine_need_lb(struct xcache_io_handler *handler, struct xcache_line_range *line_range)
+{
+ struct xcache_io_context *ctx = handler->ctx;
+ struct xcache_io *io = ctx->io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ xcache_context_t *xcache_ctx = xcache_get_ctx(cache);
+ struct backdev_io *backdev_io = NULL;
+
+ if (handler->lb_fn == NULL) {
+ return false;
+ }
+
+ if (handler->need_lb_fn != NULL &&
+ !handler->need_lb_fn(xcache_io_cache(io), line_range)) {
+ return false;
+ }
+
+ return xcache_qos_need_lb(&xcache_ctx->qos, xcache_io_dir(io), line_range->size);
+}
+
static int xcache_handle_line(void *priv,
uint64_t core_line, uint64_t addr, uint64_t size,
uint8_t start_sector, uint8_t last_sector)
@@ -350,7 +400,14 @@ static int xcache_handle_line(void *priv,
struct xcache_io_context *ctx = handler->ctx;
struct xcache_io *io = ctx->io;
ocf_cache_t cache = xcache_ctx_cache(ctx);
- struct xcache_backdev_io *base_io;
+ struct xcache_backdev_io *backdev_io;
+ struct xcache_line_range line_range = {
+ .addr = addr,
+ .size = size,
+ .start_sector = start_sector,
+ .last_sector = last_sector,
+ };
+
ocf_cache_line_t line;
int lock;
@@ -368,21 +425,30 @@ static int xcache_handle_line(void *priv,
}
xcache_submit_miss_line(ctx, core_line, handler->miss_fn);
- ctx->hit_no++;
- ctx->cache_bytes += size;
if (lock == OCF_LOCK_NOT_ACQUIRED) {
+ ctx->hit_no++;
+ ctx->cache_bytes += size;
return 0;
}
- base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
- if (base_io == NULL) {
+ backdev_io = xcache_alloc_backdev_io(io);
+ if (backdev_io == NULL) {
ocf_cache_log(cache, log_err, "alloc base io failed\n");
return -ENOMEM;
}
- base_io->xcache_io = io;
- base_io->data = io->data;
- handler->hit_fn(base_io, line, SECTORS_TO_BYTES(start_sector), size, ctx->offset);
+
+ line_range.cache_line = line;
+ if (xcache_engine_need_lb(handler, &line_range))
+ {
+ ctx->core_bytes += size;
+ handler->lb_fn(backdev_io, line, addr, size);
+ return 0;
+ }
+
+ ctx->hit_no++;
+ ctx->cache_bytes += size;
+ handler->hit_fn(backdev_io, line, SECTORS_TO_BYTES(start_sector), size, ctx->offset);
return 0;
}
diff --git a/src/engine/xcache_engine_common.h b/src/engine/xcache_engine_common.h
index 3fc168d..fd73519 100644
--- a/src/engine/xcache_engine_common.h
+++ b/src/engine/xcache_engine_common.h
@@ -3,8 +3,10 @@
#include "../ocf_cache_priv.h"
#include "../utils/utils_cache_line.h"
+#include "../ocf_def_priv.h"
#include "../xcache.h"
+#include "../xcache_queue.h"
#define INVALID_CORE_LINE ((uint64_t)-1)
#define INVALID_LINE ((ocf_cache_line_t)-1)
@@ -36,6 +38,13 @@ static inline uint8_t xcache_sector_offset(ocf_cache_t cache, uint64_t sector)
return sector & (ocf_line_sectors(cache) - 1);
}
+static inline void xcache_get_sectors_range(ocf_cache_t cache, uint64_t addr, uint64_t size, uint8_t *start_sector, uint8_t *last_sector)
+{
+ uint64_t offset = xcache_addr_offset(cache, addr);
+ *start_sector = BYTES_TO_SECTORS(offset);
+ *last_sector = BYTES_TO_SECTORS(offset + size - 1);
+}
+
static inline void xcache_io_get_line_range(ocf_cache_t cache, uint64_t addr, uint64_t size,
uint64_t *line_first, uint64_t *line_last)
{
@@ -55,6 +64,15 @@ void xcache_map_cache_line(struct xcache_io_context *ctx,
uint64_t cache_line_to_addr(ocf_cache_t cache, ocf_cache_line_t line, uint64_t line_offset);
ocf_cache_line_t addr_to_cache_line(ocf_cache_t cache, uint64_t addr);
+struct xcache_line_range {
+ ocf_cache_line_t cache_line;
+ uint64_t core_line;
+ uint64_t addr;
+ uint64_t size;
+ uint64_t start_sector;
+ uint64_t last_sector;
+};
+
typedef int (*xcache_line_handle_func)(void *priv,
uint64_t core_line, uint64_t addr, uint64_t size,
uint8_t start_sector, uint8_t last_sector);
@@ -93,6 +111,11 @@ static inline ocf_queue_t xcache_io_queue(struct xcache_io *io)
return io->io_queue;
}
+static inline int xcache_io_dir(struct xcache_io *io)
+{
+ return io->rw;
+}
+
static inline ocf_core_t xcache_ctx_core(struct xcache_io_context *ctx)
{
return xcache_io_core(ctx->io);
@@ -108,12 +131,28 @@ static inline ocf_queue_t xcache_ctx_queue(struct xcache_io_context *ctx)
return xcache_io_queue(ctx->io);
}
+static inline struct xcache_backdev_io *xcache_alloc_backdev_io(struct xcache_io *io)
+{
+ struct xcache_backdev_io *backdev_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+
+ if (backdev_io == NULL) {
+ return NULL;
+ }
+
+ backdev_io->xcache_io = io;
+ backdev_io->data = io->data;
+ return backdev_io;
+}
+
typedef int (*xcache_line_valid_fn)(ocf_cache_t cache, ocf_cache_line_t line,
uint8_t start_sector, uint8_t last_sector);
typedef int (*xcache_line_hit_fn)(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
uint64_t offset, uint64_t size, uint64_t buf_offset);
typedef int (*xcache_line_miss_fn)(struct xcache_backdev_io *base_io, uint64_t addr,
uint64_t size, uint64_t buf_offset);
+typedef int (*xcache_line_need_lb_fn)(ocf_cache_t cache, struct xcache_line_range *line_range);
+typedef void (*xcache_line_lb_fn)(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size);
struct xcache_io_handler {
struct xcache_io_context *ctx;
@@ -121,9 +160,14 @@ struct xcache_io_handler {
xcache_line_hit_fn hit_fn;
xcache_line_miss_fn miss_fn;
backdev_io_res_fn res_fn;
+ xcache_line_need_lb_fn need_lb_fn;
+ xcache_line_lb_fn lb_fn;
};
int xcache_handle_io(struct xcache_io_handler *handler);
+void xcache_wr_lb_common(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size);
+
static inline void xcache_io_get(struct xcache_io *io)
{
env_atomic_inc_return(&io->remaining);
@@ -144,9 +188,28 @@ static inline void xcache_io_put(struct xcache_io *io)
xcache_io_end(io, io->error);
}
+static inline xcache_context_t *backdev_io_to_xcache_ctx(struct xcache_backdev_io *io_base)
+{
+ struct xcache_io *io = io_base->xcache_io;
+ ocf_queue_t q = io->io_queue;
+ ocf_cache_t cache = q->cache;
+ return xcache_get_ctx(cache);
+}
+
static inline void xcache_backdev_submit_io(struct xcache_backdev_io *io_base, bool cached, uint64_t addr, uint64_t size, uint64_t buf_offset, uint8_t dir)
{
struct xcache_io *io = io_base->xcache_io;
+ xcache_context_t *xcache_ctx = backdev_io_to_xcache_ctx(io_base);
+
+ io_base->dir = dir;
+ if (cached) {
+ io_base->dev = CACHE_DEV;
+ xcache_qos_load_add(&xcache_ctx->qos, CACHE_DEV, dir, size);
+ } else {
+ io_base->dev = CORE_DEV;
+ xcache_qos_load_add(&xcache_ctx->qos, CORE_DEV, dir, size);
+ }
+
xcache_io_get(io);
spdk_backdev_submit_io(io_base, cached, addr, size, buf_offset, dir);
}
diff --git a/src/engine/xcache_engine_rd.c b/src/engine/xcache_engine_rd.c
index ffe06d2..6ac3b7e 100644
--- a/src/engine/xcache_engine_rd.c
+++ b/src/engine/xcache_engine_rd.c
@@ -322,6 +322,28 @@ static int xcache_read_line_valid(ocf_cache_t cache, ocf_cache_line_t line,
return !metadata_test_valid_sec(cache, line, start_sector, last_sector);
}
+static int xcache_read_lb_cb(struct xcache_backdev_io *backdev_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+
+ ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
+ xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
+ return 0;
+}
+
+static void xcache_read_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t buf_offset = addr - start_addr;
+
+ backdev_io->end = xcache_read_lb_cb;
+ backdev_io->line = line;
+ xcache_backdev_submit_io(backdev_io, false, addr, size, buf_offset, OCF_READ);
+}
+
int xcache_read_generic(struct xcache_io *io)
{
int ret;
@@ -332,6 +354,8 @@ int xcache_read_generic(struct xcache_io *io)
.valid_fn = xcache_read_line_valid,
.miss_fn = xcache_read_miss,
.hit_fn = xcache_read_hit,
+ .need_lb_fn = NULL,
+ .lb_fn = xcache_read_lb,
};
xcache_init_io_ctx(&ctx, io);
diff --git a/src/engine/xcache_engine_wb.c b/src/engine/xcache_engine_wb.c
index 064f650..e4f8212 100644
--- a/src/engine/xcache_engine_wb.c
+++ b/src/engine/xcache_engine_wb.c
@@ -170,6 +170,18 @@ static void xcache_wb_res(struct xcache_backdev_io *base_io)
xcache_wb_hit(base_io, base_io->line, offset, base_io->size, buf_offset);
}
+/* bypass dirty sectors to core will make additional cache io to update valid bit */
+static int xcache_wb_need_lb(ocf_cache_t cache, struct xcache_line_range *range)
+{
+ return !metadata_test_dirty_sec(cache, range->cache_line, range->start_sector, range->last_sector);
+}
+
+static void xcache_wb_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ xcache_wr_lb_common(backdev_io, line, addr, size);
+}
+
int xcache_wb(struct xcache_io *io)
{
int ret;
@@ -180,6 +192,8 @@ int xcache_wb(struct xcache_io *io)
.valid_fn = NULL,
.miss_fn = xcache_wb_miss,
.hit_fn = xcache_wb_hit,
+ .need_lb_fn = xcache_wb_need_lb,
+ .lb_fn = xcache_wb_lb,
};
mark_flush();
diff --git a/src/engine/xcache_engine_wt.c b/src/engine/xcache_engine_wt.c
index 234608d..e3d4c99 100644
--- a/src/engine/xcache_engine_wt.c
+++ b/src/engine/xcache_engine_wt.c
@@ -167,6 +167,12 @@ static int xcache_wt_core(struct xcache_io_context *ctx)
return 0;
}
+static void xcache_wt_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ xcache_wr_lb_common(backdev_io, line, addr, size);
+}
+
int xcache_wt(struct xcache_io *io)
{
struct xcache_io_context ctx;
@@ -176,6 +182,8 @@ int xcache_wt(struct xcache_io *io)
.valid_fn = NULL,
.miss_fn = NULL,
.hit_fn = xcache_wt_hit_cache,
+ .need_lb_fn = NULL,
+ .lb_fn = xcache_wt_lb,
};
int ret;
diff --git a/src/qos/qos.c b/src/qos/qos.c
new file mode 100644
index 0000000..6ea2da9
--- /dev/null
+++ b/src/qos/qos.c
@@ -0,0 +1,6 @@
+#include "qos.h"
+
+void xcache_qos_init(struct xcache_qos *qos)
+{
+ qos_lb_init(&qos->qos_lb);
+}
diff --git a/src/qos/qos.h b/src/qos/qos.h
new file mode 100644
index 0000000..3b6a691
--- /dev/null
+++ b/src/qos/qos.h
@@ -0,0 +1,27 @@
+#ifndef __QOS_H__
+#define __QOS_H__
+
+#include "qos_lb.h"
+
+struct xcache_qos {
+ struct qos_lb qos_lb;
+};
+
+static inline void xcache_qos_load_add(struct xcache_qos *qos, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ qos_lb_load_add(&qos->qos_lb, dev, dir, bytes);
+}
+
+static inline void xcache_qos_load_sub(struct xcache_qos *qos, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ qos_lb_load_sub(&qos->qos_lb, dev, dir, bytes);
+}
+
+static inline bool xcache_qos_need_lb(struct xcache_qos *qos, enum xcache_dir dir, uint64_t bytes)
+{
+ return qos_need_lb(&qos->qos_lb, dir, bytes);
+}
+
+void xcache_qos_init(struct xcache_qos *qos);
+
+#endif
diff --git a/src/qos/qos_lb.h b/src/qos/qos_lb.h
new file mode 100644
index 0000000..bb3bfe4
--- /dev/null
+++ b/src/qos/qos_lb.h
@@ -0,0 +1,143 @@
+#ifndef __QOS_LB_H__
+#define __QOS_LB_H__
+
+#define CORE_DEFAULT_LOAD_WEIGHT 30
+#define CACHE_DEFAULT_LOAD_WEIGHT 1
+
+#define CORE_DEV 0
+#define CACHE_DEV 1
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "ocf/xcache.h"
+
+struct qos_dev_load {
+ env_atomic64 read_inflight_bytes;
+ env_atomic64 write_inflight_bytes;
+ uint32_t read_weight;
+ uint32_t write_weight;
+};
+
+static inline void qos_dev_load_init(struct qos_dev_load *load)
+{
+ env_atomic64_set(&load->read_inflight_bytes, 0);
+ env_atomic64_set(&load->write_inflight_bytes, 0);
+}
+
+static inline void qos_dev_load_add(struct qos_dev_load *load, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ env_atomic64_add(bytes, &load->read_inflight_bytes);
+ break;
+ case XCACHE_WR:
+ env_atomic64_add(bytes, &load->write_inflight_bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void qos_dev_load_sub(struct qos_dev_load *load, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ env_atomic64_sub(bytes, &load->read_inflight_bytes);
+ break;
+ case XCACHE_WR:
+ env_atomic64_sub(bytes, &load->write_inflight_bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline uint64_t qos_dev_load_read(struct qos_dev_load *load, enum xcache_dir dir)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ return env_atomic64_read(&load->read_inflight_bytes);
+ case XCACHE_WR:
+ return env_atomic64_read(&load->write_inflight_bytes);
+ default:
+ return 0;
+ }
+}
+
+static inline uint32_t qos_dev_load_weight(struct qos_dev_load *load, enum xcache_dir dir)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ return load->read_weight;
+ case XCACHE_WR:
+ return load->write_weight;
+ default:
+ return 0;
+ }
+}
+
+static inline uint64_t do_cal_load(uint64_t bytes, uint32_t weight)
+{
+ return bytes * weight;
+}
+
+static inline uint64_t qos_dev_load_cal(struct qos_dev_load *load)
+{
+ uint64_t read_inflight_bytes = qos_dev_load_read(load, XCACHE_RD);
+ uint64_t write_inflight_bytes = qos_dev_load_read(load, XCACHE_WR);
+
+ return do_cal_load(read_inflight_bytes, load->read_weight) +
+ do_cal_load(write_inflight_bytes, load->write_weight);
+}
+
+struct qos_lb {
+ struct qos_dev_load cache_load;
+ struct qos_dev_load core_load;
+};
+
+static inline void qos_lb_init(struct qos_lb *qos_lb)
+{
+ qos_dev_load_init(&qos_lb->cache_load);
+ qos_dev_load_init(&qos_lb->core_load);
+ qos_lb->cache_load.read_weight = CACHE_DEFAULT_LOAD_WEIGHT;
+ qos_lb->cache_load.write_weight = CACHE_DEFAULT_LOAD_WEIGHT;
+ qos_lb->core_load.read_weight = CORE_DEFAULT_LOAD_WEIGHT;
+ qos_lb->core_load.write_weight = CORE_DEFAULT_LOAD_WEIGHT;
+}
+
+static inline void qos_lb_load_add(struct qos_lb *qos_lb, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dev) {
+ case CACHE_DEV:
+ qos_dev_load_add(&qos_lb->cache_load, dir, bytes);
+ break;
+ case CORE_DEV:
+ qos_dev_load_add(&qos_lb->core_load, dir, bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void qos_lb_load_sub(struct qos_lb *qos_lb, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dev) {
+ case CACHE_DEV:
+ qos_dev_load_sub(&qos_lb->cache_load, dir, bytes);
+ break;
+ case CORE_DEV:
+ qos_dev_load_sub(&qos_lb->core_load, dir, bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline bool qos_need_lb(struct qos_lb *qos_lb, enum xcache_dir dir, uint64_t bytes)
+{
+ return qos_dev_load_cal(&qos_lb->cache_load) > qos_dev_load_cal(&qos_lb->core_load) +
+ do_cal_load(bytes, qos_dev_load_weight(&qos_lb->core_load, dir));
+}
+
+#endif
diff --git a/src/xcache.c b/src/xcache.c
index e8d1f2d..0c6a2b8 100644
--- a/src/xcache.c
+++ b/src/xcache.c
@@ -17,6 +17,7 @@ int xcache_init(ocf_cache_t cache)
set_deadline_policy();
evicting_init(ctx);
ctx->line_size_shift = __builtin_ffsll(ocf_line_size(cache)) - 1;
+ xcache_qos_init(&ctx->qos);
return 0;
}
diff --git a/src/xcache.h b/src/xcache.h
index f31ec15..4fd7277 100644
--- a/src/xcache.h
+++ b/src/xcache.h
@@ -5,7 +5,9 @@
#include "ocf/ocf_types.h"
#include "./ocf_cache_priv.h"
+#include "ocf/xcache.h"
#include "xcache_cleaner.h"
+#include "qos/qos.h"
typedef ocf_cache_line_t xcache_line_t;
@@ -14,6 +16,7 @@ typedef struct xcache_context {
void *xcache_evicting;
ocf_cache_t cache;
struct xcache_cleaning_ctx cleaning_ctx;
+ struct xcache_qos qos;
} xcache_context_t;
static inline xcache_context_t *xcache_get_ctx(ocf_cache_t cache)
diff --git a/src/xcache_ocf_core.c b/src/xcache_ocf_core.c
index a3d5c1c..bfd6619 100644
--- a/src/xcache_ocf_core.c
+++ b/src/xcache_ocf_core.c
@@ -6,6 +6,7 @@
#include "ocf/xcache.h"
#include "xcache_queue.h"
+#include "qos/qos.h"
void xcache_submit_io(struct xcache_io *io)
{
@@ -34,6 +35,9 @@ void xcache_submit_io(struct xcache_io *io)
void xcache_backdev_io_end(struct xcache_backdev_io *bd_io, struct backdev_io_end_arg *arg)
{
struct xcache_io *io = bd_io->xcache_io;
+ xcache_context_t *xcache_ctx = backdev_io_to_xcache_ctx(bd_io);
+
+ xcache_qos_load_sub(&xcache_ctx->qos, bd_io->dev, bd_io->dir, arg->size);
io->error |= arg->error;
bd_io->end(bd_io, arg);
--
2.30.0

View File

@ -0,0 +1,287 @@
From a09c79ba0cb154a0c0c1264bbb89248b7f8e956e Mon Sep 17 00:00:00 2001
From: Kemeng Shi <shikemeng@huawei.com>
Date: Tue, 10 Jan 2023 17:37:32 +0800
Subject: [PATCH 4/4] read_bf: add per-queue line data cache
Add per-queue line data cache to reduce cost from hugepage allocation
for read backfill.
Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
---
src/engine/xcache_engine_common.c | 2 +-
src/engine/xcache_engine_rd.c | 81 +++++++++++++++----------------
src/xcache_queue.c | 24 ++++++++-
src/xcache_queue.h | 6 +++
4 files changed, 68 insertions(+), 45 deletions(-)
diff --git a/src/engine/xcache_engine_common.c b/src/engine/xcache_engine_common.c
index eb1decb..9079cd0 100644
--- a/src/engine/xcache_engine_common.c
+++ b/src/engine/xcache_engine_common.c
@@ -355,7 +355,7 @@ static int xcache_wr_lb_common_end(struct xcache_backdev_io *backdev_io, struct
metadata_clear_valid_sec(cache, line, start_secotr, last_sector);
ocf_metadata_end_collision_shared_access(cache, line);
- ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
return 0;
}
diff --git a/src/engine/xcache_engine_rd.c b/src/engine/xcache_engine_rd.c
index 6ac3b7e..4b64975 100644
--- a/src/engine/xcache_engine_rd.c
+++ b/src/engine/xcache_engine_rd.c
@@ -10,10 +10,14 @@
#include "xcache_engine_common.h"
#include "../xcache_queue.h"
-static void xcache_read_bf_done(ocf_cache_t cache, struct xcache_backdev_io *base_io)
+static inline void xcache_read_bf_done(ocf_cache_t cache, struct xcache_backdev_io *base_io)
{
+ ocf_queue_t q = xcache_io_queue(base_io->xcache_io);
+
xcache_unlock_wr(ocf_cache_line_concurrency(cache), base_io->line);
- xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ xcache_queue_free_line_data(q, cache, base_io->data);
+ base_io->data = NULL;
+ xcache_queue_free_backdev_io(q, base_io);
}
static int xcache_read_bf_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
@@ -26,10 +30,6 @@ static int xcache_read_bf_cb(struct xcache_backdev_io *base_io, struct backdev_i
ocf_cache_log(cache, log_err, "read bf failed\n");
ocf_core_stats_cache_error_update(core, OCF_WRITE);
}
- if (env_atomic_read(&bf_io->remaining) == 1) {
- ctx_data_free(cache->owner, base_io->data);
- }
- base_io->data = NULL;
xcache_read_bf_done(cache, base_io);
return 0;
}
@@ -69,7 +69,7 @@ static ctx_data_t *xcache_get_bf_data(struct xcache_io *io, uint64_t addr, uint6
uint64_t from = addr - start_byte;
ctx_data_t *dst;
- dst = ctx_data_alloc(cache->owner, ((size + PAGE_SIZE - 1) / PAGE_SIZE));
+ dst = xcache_queue_alloc_line_data(xcache_io_queue(io), cache);
if (dst == NULL) {
return NULL;
}
@@ -78,14 +78,7 @@ static ctx_data_t *xcache_get_bf_data(struct xcache_io *io, uint64_t addr, uint6
return dst;
}
-static void xcache_free_bf_data(struct xcache_io *io, ctx_data_t *data)
-{
- ocf_cache_t cache = xcache_io_cache(io);
-
- ctx_data_free(cache->owner, data);
-}
-
-static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, uint64_t size, uint64_t buf_offset)
+static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, uint64_t size)
{
struct xcache_io *bf_io = base_io->xcache_io;
ocf_cache_t cache = xcache_io_cache(bf_io);
@@ -101,7 +94,7 @@ static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, u
cache_addr = cache_line_to_addr(cache, line, xcache_addr_offset(cache, addr));
base_io->end = xcache_read_bf_cb;
- xcache_backdev_submit_io(base_io, true, cache_addr, size, buf_offset, OCF_WRITE);
+ xcache_backdev_submit_io(base_io, true, cache_addr, size, 0, OCF_WRITE);
return 0;
out:
@@ -135,7 +128,7 @@ static void bf_io_end(struct xcache_io *bf_io, int error)
xcache_queue_free_xcache_io(bf_io->io_queue, bf_io);
}
-static struct xcache_io *xcache_get_bf_io(struct xcache_io *ori_io)
+static struct xcache_io *xcache_get_bf_xcache_io(struct xcache_io *ori_io)
{
struct xcache_io *bf_io = xcache_queue_alloc_xcache_io(ori_io->io_queue);
@@ -152,41 +145,54 @@ static struct xcache_io *xcache_get_bf_io(struct xcache_io *ori_io)
return bf_io;
}
-static void xcache_free_bf_io(struct xcache_io *bf_io)
+static void xcache_free_bf_xcache_io(struct xcache_io *bf_io)
{
xcache_queue_free_xcache_io(bf_io->io_queue, bf_io);
}
+static int xcache_submit_read_bf_line(struct xcache_io *io, struct xcache_io *bf_io, uint64_t bf_addr, uint64_t bf_size, ocf_cache_line_t line)
+{
+ ocf_cache_t cache = xcache_io_cache(bf_io);
+ struct xcache_backdev_io *base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+ if (base_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf base_io failed\n");
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ return -1;
+ }
+
+ base_io->data = xcache_get_bf_data(io, bf_addr, bf_size);
+ if (base_io->data == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf_data failed\n");
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(io), base_io);
+ return -1;
+ }
+ base_io->xcache_io = bf_io;
+ base_io->line = line;
+
+ return xcache_do_read_bf(base_io, bf_addr, bf_size);
+}
+
static void xcache_submit_read_bf(struct xcache_io *io, uint64_t addr, uint64_t size)
{
ocf_cache_t cache = xcache_io_cache(io);
ocf_core_t core = xcache_io_core(io);
ocf_core_id_t core_id = ocf_core_get_id(core);
uint64_t line_size = ocf_line_size(cache);
- bool bf_submit = false;
uint64_t core_line_first, core_line_last, core_line;
ocf_cache_line_t line;
uint64_t bf_addr, bf_size;
struct xcache_backdev_io *base_io;
struct xcache_io *bf_io;
- ctx_data_t *bf_data;
- bf_io = xcache_get_bf_io(io);
+ bf_io = xcache_get_bf_xcache_io(io);
if (bf_io == NULL) {
ocf_cache_log(cache, log_err, "alloc bf_io failed\n");
xcache_read_bf_error(io, addr, size);
return;
}
- bf_data = xcache_get_bf_data(io, addr, size);
- if (bf_data == NULL) {
- ocf_cache_log(cache, log_err, "alloc bf_data failed\n");
- xcache_free_bf_io(bf_io);
- xcache_read_bf_error(io, addr, size);
- return;
- }
-
xcache_io_get_line_range(cache, addr, size, &core_line_first, &core_line_last);
bf_addr = addr;
bf_size = xcache_line_to_addr(cache, core_line_first + 1) - bf_addr;
@@ -201,23 +207,12 @@ static void xcache_submit_read_bf(struct xcache_io *io, uint64_t addr, uint64_t
continue;
}
- base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
- if (base_io == NULL) {
- ocf_cache_log(cache, log_err, "alloc bf base_io failed\n");
- xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ if (xcache_submit_read_bf_line(io, bf_io, bf_addr, bf_size, line) != 0) {
+ ocf_cache_log(cache, log_err, "read bf line failed\n");
continue;
}
- base_io->xcache_io = bf_io;
- base_io->line = line;
- base_io->data = bf_data;
- if (xcache_do_read_bf(base_io, bf_addr, bf_size, bf_addr - addr) == 0) {
- bf_submit = true;
- }
}
- if (!bf_submit) {
- xcache_free_bf_data(io, bf_data);
- }
xcache_io_put(bf_io);
}
@@ -327,7 +322,7 @@ static int xcache_read_lb_cb(struct xcache_backdev_io *backdev_io, struct backde
struct xcache_io *io = backdev_io->xcache_io;
ocf_cache_t cache = xcache_io_cache(io);
- ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
return 0;
}
diff --git a/src/xcache_queue.c b/src/xcache_queue.c
index e2c3926..01e0445 100644
--- a/src/xcache_queue.c
+++ b/src/xcache_queue.c
@@ -27,6 +27,7 @@ int xcache_queue_ctx_init(ocf_queue_t queue)
INIT_LIST_HEAD(&queue_ctx->xcache_io_list);
queue_ctx->xcache_io_no = 0;
+ queue_ctx->bf_data_num = 0;
queue->priv1 = (void *)queue_ctx;
return 0;
}
@@ -204,7 +205,6 @@ void ocf_queue_run_single(ocf_queue_t q)
queue_entry_run(get_entry_type(entry), entry);
}
-#define QUEUE_CACHE_SIZE 128
// only called by request in queue to avoid lock
struct xcache_backdev_io *xcache_queue_alloc_backdev_io(ocf_queue_t q)
{
@@ -262,6 +262,28 @@ void xcache_queue_free_xcache_io(ocf_queue_t q, struct xcache_io *io)
queue_ctx->xcache_io_no++;
}
+ctx_data_t *xcache_queue_alloc_line_data(ocf_queue_t q, ocf_cache_t cache)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->bf_data_num > 0) {
+ return queue_ctx->bf_data[--queue_ctx->bf_data_num];
+ } else {
+ return ctx_data_alloc(cache->owner, (ocf_line_size(cache) + PAGE_SIZE - 1) / PAGE_SIZE);
+ }
+}
+
+void xcache_queue_free_line_data(ocf_queue_t q, ocf_cache_t cache, ctx_data_t *data)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->bf_data_num < QUEUE_CACHE_SIZE) {
+ queue_ctx->bf_data[queue_ctx->bf_data_num++] = data;
+ } else {
+ ctx_data_free(cache->owner, data);
+ }
+}
+
static void xcache_queue_push_entry(ocf_queue_t q, struct queue_entry *entry, bool at_head, bool allow_sync, enum entry_type type)
{
ocf_cache_t cache = ocf_queue_get_cache(q);
diff --git a/src/xcache_queue.h b/src/xcache_queue.h
index 3412a2a..9a9fd23 100644
--- a/src/xcache_queue.h
+++ b/src/xcache_queue.h
@@ -3,6 +3,8 @@
#include "ocf/xcache.h"
+#define QUEUE_CACHE_SIZE 128
+
struct xcache_queue_ctx {
struct list_head backdev_io_list;
int backdev_io_no;
@@ -10,6 +12,8 @@ struct xcache_queue_ctx {
int flush_io_no;
struct list_head xcache_io_list;
int xcache_io_no;
+ void *bf_data[QUEUE_CACHE_SIZE];
+ int bf_data_num;
};
int xcache_queue_ctx_init(ocf_queue_t queue);
@@ -26,4 +30,6 @@ void xcache_queue_push_backdev_io_front(struct xcache_backdev_io *base_io, bool
struct xcache_io *xcache_queue_alloc_xcache_io(ocf_queue_t q);
void xcache_queue_free_xcache_io(ocf_queue_t q, struct xcache_io *io);
+ctx_data_t *xcache_queue_alloc_line_data(ocf_queue_t q, ocf_cache_t cache);
+void xcache_queue_free_line_data(ocf_queue_t q, ocf_cache_t cache, ctx_data_t *data);
#endif
--
2.30.0

View File

@ -1,11 +1,15 @@
Name: ocf
Version: 21.6.3.1
Release: 1
Release: 2
Summary: high performance block storage caching meta-library written in C
License: BSD-3-Clause
URL: https://github.com/Open-CAS/ocf
Source0: https://github.com/Open-CAS/ocf/archive/refs/tags/v21.6.3.1.tar.gz
Patch1: 0001-ocf-export-function-in-OCF-for-further-use.patch
Patch2: 0002-ocf-overwrite-IO-path-of-OCF-while-reusing-metadata-.patch
Patch3: 0003-qos-add-load-balance.patch
Patch4: 0004-read_bf-add-per-queue-line-data-cache.patch
%description
Open CAS Framework (OCF) is high performance block storage caching
@ -35,5 +39,8 @@ cp -a * $RPM_BUILD_ROOT/usr/src/%{name}-%{version}
/usr/src/%{name}-%{version}/
%changelog
* Mon Jan 09 2023 shikemeng <shikemeng@huawei.com> - 21.6.3.1-2
- Enable xcache
* Thu Dec 29 2022 shikemeng <shikemeng@huawei.com> - 21.6.3.1-1
- Initialize OCF to 21.6.3.1