This patchset enable xcache. More details can be found in patches log. Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
680 lines
20 KiB
Diff
680 lines
20 KiB
Diff
From 4d7ec05fe2cf796d499328546ba1057d08b315c1 Mon Sep 17 00:00:00 2001
|
|
From: Kemeng Shi <shikemeng@huawei.com>
|
|
Date: Tue, 10 Jan 2023 17:52:04 +0800
|
|
Subject: [PATCH 3/4] qos: add load balance
|
|
|
|
add load balance to offload IO from cache device to backdev device when
|
|
cache device is busy.
|
|
|
|
Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
|
|
---
|
|
inc/xcache_io.h | 22 ++++-
|
|
src/engine/xcache_engine_common.c | 82 +++++++++++++++--
|
|
src/engine/xcache_engine_common.h | 63 +++++++++++++
|
|
src/engine/xcache_engine_rd.c | 24 +++++
|
|
src/engine/xcache_engine_wb.c | 14 +++
|
|
src/engine/xcache_engine_wt.c | 8 ++
|
|
src/qos/qos.c | 6 ++
|
|
src/qos/qos.h | 27 ++++++
|
|
src/qos/qos_lb.h | 143 ++++++++++++++++++++++++++++++
|
|
src/xcache.c | 1 +
|
|
src/xcache.h | 3 +
|
|
src/xcache_ocf_core.c | 4 +
|
|
12 files changed, 386 insertions(+), 11 deletions(-)
|
|
create mode 100644 src/qos/qos.c
|
|
create mode 100644 src/qos/qos.h
|
|
create mode 100644 src/qos/qos_lb.h
|
|
|
|
diff --git a/inc/xcache_io.h b/inc/xcache_io.h
|
|
index a8acb67..833a73c 100644
|
|
--- a/inc/xcache_io.h
|
|
+++ b/inc/xcache_io.h
|
|
@@ -2,6 +2,7 @@
|
|
#define XCACHE_IO_H__
|
|
|
|
#include "ocf_env.h"
|
|
+#include "ocf/ocf.h"
|
|
|
|
enum entry_type {
|
|
XCACHE_IO_ENTRY,
|
|
@@ -49,6 +50,11 @@ struct backdev_io_end_arg {
|
|
struct xcache_backdev_io;
|
|
typedef int (*backdev_io_end_fn)(struct xcache_backdev_io *io, struct backdev_io_end_arg *arg);
|
|
typedef void (*backdev_io_res_fn)(struct xcache_backdev_io *io);
|
|
+enum xcache_dir {
|
|
+ XCACHE_RD = 0,
|
|
+ XCACHE_WR,
|
|
+ XCACHE_FLUSH,
|
|
+};
|
|
struct xcache_backdev_io {
|
|
// queue_entry
|
|
enum entry_type type;
|
|
@@ -60,9 +66,19 @@ struct xcache_backdev_io {
|
|
ocf_cache_line_t line;
|
|
ctx_data_t *data;
|
|
|
|
- backdev_io_res_fn io_res;
|
|
- uint64_t addr;
|
|
- uint64_t size;
|
|
+ union {
|
|
+ /* for xcache lock */
|
|
+ struct {
|
|
+ backdev_io_res_fn io_res;
|
|
+ uint64_t addr;
|
|
+ uint64_t size;
|
|
+ };
|
|
+ /* for io_end callback */
|
|
+ struct {
|
|
+ int dev;
|
|
+ enum xcache_dir dir;
|
|
+ };
|
|
+ };
|
|
|
|
void *priv;
|
|
};
|
|
diff --git a/src/engine/xcache_engine_common.c b/src/engine/xcache_engine_common.c
|
|
index f1bf022..eb1decb 100644
|
|
--- a/src/engine/xcache_engine_common.c
|
|
+++ b/src/engine/xcache_engine_common.c
|
|
@@ -4,6 +4,7 @@
|
|
#include "../utils/utils_cache_line.h"
|
|
#include "../metadata/metadata.h"
|
|
|
|
+#include "../xcache.h"
|
|
#include "xcache_engine_common.h"
|
|
#include "../xcache_lru.h"
|
|
#include "../xcache_queue.h"
|
|
@@ -342,6 +343,55 @@ int xcache_foreach_line(struct xcache_io_context *ctx, xcache_line_handle_func f
|
|
return 0;
|
|
}
|
|
|
|
+static int xcache_wr_lb_common_end(struct xcache_backdev_io *backdev_io, struct backdev_io_end_arg *cb_arg)
|
|
+{
|
|
+ struct xcache_io *io = backdev_io->xcache_io;
|
|
+ ocf_cache_t cache = xcache_io_cache(io);
|
|
+ ocf_cache_line_t line = backdev_io->line;
|
|
+ uint8_t start_secotr, last_sector;
|
|
+
|
|
+ xcache_get_sectors_range(cache, cb_arg->addr, cb_arg->size, &start_secotr, &last_sector);
|
|
+ ocf_metadata_start_collision_shared_access(cache, line);
|
|
+ metadata_clear_valid_sec(cache, line, start_secotr, last_sector);
|
|
+ ocf_metadata_end_collision_shared_access(cache, line);
|
|
+
|
|
+ ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), line);
|
|
+ xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void xcache_wr_lb_common(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
|
|
+ uint64_t addr, uint64_t size)
|
|
+{
|
|
+ struct xcache_io *io = backdev_io->xcache_io;
|
|
+ uint64_t start_addr = xcache_io_start_addr(io);
|
|
+ uint64_t buf_offset = addr - start_addr;
|
|
+
|
|
+ backdev_io->line = line;
|
|
+ backdev_io->end = xcache_wr_lb_common_end;
|
|
+ xcache_backdev_submit_io(backdev_io, false, addr, size, buf_offset, OCF_WRITE);
|
|
+}
|
|
+
|
|
+static inline bool xcache_engine_need_lb(struct xcache_io_handler *handler, struct xcache_line_range *line_range)
|
|
+{
|
|
+ struct xcache_io_context *ctx = handler->ctx;
|
|
+ struct xcache_io *io = ctx->io;
|
|
+ ocf_cache_t cache = xcache_io_cache(io);
|
|
+ xcache_context_t *xcache_ctx = xcache_get_ctx(cache);
|
|
+ struct backdev_io *backdev_io = NULL;
|
|
+
|
|
+ if (handler->lb_fn == NULL) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (handler->need_lb_fn != NULL &&
|
|
+ !handler->need_lb_fn(xcache_io_cache(io), line_range)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return xcache_qos_need_lb(&xcache_ctx->qos, xcache_io_dir(io), line_range->size);
|
|
+}
|
|
+
|
|
static int xcache_handle_line(void *priv,
|
|
uint64_t core_line, uint64_t addr, uint64_t size,
|
|
uint8_t start_sector, uint8_t last_sector)
|
|
@@ -350,7 +400,14 @@ static int xcache_handle_line(void *priv,
|
|
struct xcache_io_context *ctx = handler->ctx;
|
|
struct xcache_io *io = ctx->io;
|
|
ocf_cache_t cache = xcache_ctx_cache(ctx);
|
|
- struct xcache_backdev_io *base_io;
|
|
+ struct xcache_backdev_io *backdev_io;
|
|
+ struct xcache_line_range line_range = {
|
|
+ .addr = addr,
|
|
+ .size = size,
|
|
+ .start_sector = start_sector,
|
|
+ .last_sector = last_sector,
|
|
+ };
|
|
+
|
|
ocf_cache_line_t line;
|
|
int lock;
|
|
|
|
@@ -368,21 +425,30 @@ static int xcache_handle_line(void *priv,
|
|
}
|
|
|
|
xcache_submit_miss_line(ctx, core_line, handler->miss_fn);
|
|
- ctx->hit_no++;
|
|
- ctx->cache_bytes += size;
|
|
|
|
if (lock == OCF_LOCK_NOT_ACQUIRED) {
|
|
+ ctx->hit_no++;
|
|
+ ctx->cache_bytes += size;
|
|
return 0;
|
|
}
|
|
|
|
- base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
|
|
- if (base_io == NULL) {
|
|
+ backdev_io = xcache_alloc_backdev_io(io);
|
|
+ if (backdev_io == NULL) {
|
|
ocf_cache_log(cache, log_err, "alloc base io failed\n");
|
|
return -ENOMEM;
|
|
}
|
|
- base_io->xcache_io = io;
|
|
- base_io->data = io->data;
|
|
- handler->hit_fn(base_io, line, SECTORS_TO_BYTES(start_sector), size, ctx->offset);
|
|
+
|
|
+ line_range.cache_line = line;
|
|
+ if (xcache_engine_need_lb(handler, &line_range))
|
|
+ {
|
|
+ ctx->core_bytes += size;
|
|
+ handler->lb_fn(backdev_io, line, addr, size);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ ctx->hit_no++;
|
|
+ ctx->cache_bytes += size;
|
|
+ handler->hit_fn(backdev_io, line, SECTORS_TO_BYTES(start_sector), size, ctx->offset);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/src/engine/xcache_engine_common.h b/src/engine/xcache_engine_common.h
|
|
index 3fc168d..fd73519 100644
|
|
--- a/src/engine/xcache_engine_common.h
|
|
+++ b/src/engine/xcache_engine_common.h
|
|
@@ -3,8 +3,10 @@
|
|
|
|
#include "../ocf_cache_priv.h"
|
|
#include "../utils/utils_cache_line.h"
|
|
+#include "../ocf_def_priv.h"
|
|
|
|
#include "../xcache.h"
|
|
+#include "../xcache_queue.h"
|
|
|
|
#define INVALID_CORE_LINE ((uint64_t)-1)
|
|
#define INVALID_LINE ((ocf_cache_line_t)-1)
|
|
@@ -36,6 +38,13 @@ static inline uint8_t xcache_sector_offset(ocf_cache_t cache, uint64_t sector)
|
|
return sector & (ocf_line_sectors(cache) - 1);
|
|
}
|
|
|
|
+static inline void xcache_get_sectors_range(ocf_cache_t cache, uint64_t addr, uint64_t size, uint8_t *start_sector, uint8_t *last_sector)
|
|
+{
|
|
+ uint64_t offset = xcache_addr_offset(cache, addr);
|
|
+ *start_sector = BYTES_TO_SECTORS(offset);
|
|
+ *last_sector = BYTES_TO_SECTORS(offset + size - 1);
|
|
+}
|
|
+
|
|
static inline void xcache_io_get_line_range(ocf_cache_t cache, uint64_t addr, uint64_t size,
|
|
uint64_t *line_first, uint64_t *line_last)
|
|
{
|
|
@@ -55,6 +64,15 @@ void xcache_map_cache_line(struct xcache_io_context *ctx,
|
|
uint64_t cache_line_to_addr(ocf_cache_t cache, ocf_cache_line_t line, uint64_t line_offset);
|
|
ocf_cache_line_t addr_to_cache_line(ocf_cache_t cache, uint64_t addr);
|
|
|
|
+struct xcache_line_range {
|
|
+ ocf_cache_line_t cache_line;
|
|
+ uint64_t core_line;
|
|
+ uint64_t addr;
|
|
+ uint64_t size;
|
|
+ uint64_t start_sector;
|
|
+ uint64_t last_sector;
|
|
+};
|
|
+
|
|
typedef int (*xcache_line_handle_func)(void *priv,
|
|
uint64_t core_line, uint64_t addr, uint64_t size,
|
|
uint8_t start_sector, uint8_t last_sector);
|
|
@@ -93,6 +111,11 @@ static inline ocf_queue_t xcache_io_queue(struct xcache_io *io)
|
|
return io->io_queue;
|
|
}
|
|
|
|
+static inline int xcache_io_dir(struct xcache_io *io)
|
|
+{
|
|
+ return io->rw;
|
|
+}
|
|
+
|
|
static inline ocf_core_t xcache_ctx_core(struct xcache_io_context *ctx)
|
|
{
|
|
return xcache_io_core(ctx->io);
|
|
@@ -108,12 +131,28 @@ static inline ocf_queue_t xcache_ctx_queue(struct xcache_io_context *ctx)
|
|
return xcache_io_queue(ctx->io);
|
|
}
|
|
|
|
+static inline struct xcache_backdev_io *xcache_alloc_backdev_io(struct xcache_io *io)
|
|
+{
|
|
+ struct xcache_backdev_io *backdev_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
|
|
+
|
|
+ if (backdev_io == NULL) {
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ backdev_io->xcache_io = io;
|
|
+ backdev_io->data = io->data;
|
|
+ return backdev_io;
|
|
+}
|
|
+
|
|
typedef int (*xcache_line_valid_fn)(ocf_cache_t cache, ocf_cache_line_t line,
|
|
uint8_t start_sector, uint8_t last_sector);
|
|
typedef int (*xcache_line_hit_fn)(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
|
|
uint64_t offset, uint64_t size, uint64_t buf_offset);
|
|
typedef int (*xcache_line_miss_fn)(struct xcache_backdev_io *base_io, uint64_t addr,
|
|
uint64_t size, uint64_t buf_offset);
|
|
+typedef int (*xcache_line_need_lb_fn)(ocf_cache_t cache, struct xcache_line_range *line_range);
|
|
+typedef void (*xcache_line_lb_fn)(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
|
|
+ uint64_t addr, uint64_t size);
|
|
|
|
struct xcache_io_handler {
|
|
struct xcache_io_context *ctx;
|
|
@@ -121,9 +160,14 @@ struct xcache_io_handler {
|
|
xcache_line_hit_fn hit_fn;
|
|
xcache_line_miss_fn miss_fn;
|
|
backdev_io_res_fn res_fn;
|
|
+ xcache_line_need_lb_fn need_lb_fn;
|
|
+ xcache_line_lb_fn lb_fn;
|
|
};
|
|
int xcache_handle_io(struct xcache_io_handler *handler);
|
|
|
|
+void xcache_wr_lb_common(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
|
|
+ uint64_t addr, uint64_t size);
|
|
+
|
|
static inline void xcache_io_get(struct xcache_io *io)
|
|
{
|
|
env_atomic_inc_return(&io->remaining);
|
|
@@ -144,9 +188,28 @@ static inline void xcache_io_put(struct xcache_io *io)
|
|
xcache_io_end(io, io->error);
|
|
}
|
|
|
|
+static inline xcache_context_t *backdev_io_to_xcache_ctx(struct xcache_backdev_io *io_base)
|
|
+{
|
|
+ struct xcache_io *io = io_base->xcache_io;
|
|
+ ocf_queue_t q = io->io_queue;
|
|
+ ocf_cache_t cache = q->cache;
|
|
+ return xcache_get_ctx(cache);
|
|
+}
|
|
+
|
|
static inline void xcache_backdev_submit_io(struct xcache_backdev_io *io_base, bool cached, uint64_t addr, uint64_t size, uint64_t buf_offset, uint8_t dir)
|
|
{
|
|
struct xcache_io *io = io_base->xcache_io;
|
|
+ xcache_context_t *xcache_ctx = backdev_io_to_xcache_ctx(io_base);
|
|
+
|
|
+ io_base->dir = dir;
|
|
+ if (cached) {
|
|
+ io_base->dev = CACHE_DEV;
|
|
+ xcache_qos_load_add(&xcache_ctx->qos, CACHE_DEV, dir, size);
|
|
+ } else {
|
|
+ io_base->dev = CORE_DEV;
|
|
+ xcache_qos_load_add(&xcache_ctx->qos, CORE_DEV, dir, size);
|
|
+ }
|
|
+
|
|
xcache_io_get(io);
|
|
spdk_backdev_submit_io(io_base, cached, addr, size, buf_offset, dir);
|
|
}
|
|
diff --git a/src/engine/xcache_engine_rd.c b/src/engine/xcache_engine_rd.c
|
|
index ffe06d2..6ac3b7e 100644
|
|
--- a/src/engine/xcache_engine_rd.c
|
|
+++ b/src/engine/xcache_engine_rd.c
|
|
@@ -322,6 +322,28 @@ static int xcache_read_line_valid(ocf_cache_t cache, ocf_cache_line_t line,
|
|
return !metadata_test_valid_sec(cache, line, start_sector, last_sector);
|
|
}
|
|
|
|
+static int xcache_read_lb_cb(struct xcache_backdev_io *backdev_io, struct backdev_io_end_arg *cb_arg)
|
|
+{
|
|
+ struct xcache_io *io = backdev_io->xcache_io;
|
|
+ ocf_cache_t cache = xcache_io_cache(io);
|
|
+
|
|
+ ocf_io_alock_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
|
|
+ xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void xcache_read_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
|
|
+ uint64_t addr, uint64_t size)
|
|
+{
|
|
+ struct xcache_io *io = backdev_io->xcache_io;
|
|
+ uint64_t start_addr = xcache_io_start_addr(io);
|
|
+ uint64_t buf_offset = addr - start_addr;
|
|
+
|
|
+ backdev_io->end = xcache_read_lb_cb;
|
|
+ backdev_io->line = line;
|
|
+ xcache_backdev_submit_io(backdev_io, false, addr, size, buf_offset, OCF_READ);
|
|
+}
|
|
+
|
|
int xcache_read_generic(struct xcache_io *io)
|
|
{
|
|
int ret;
|
|
@@ -332,6 +354,8 @@ int xcache_read_generic(struct xcache_io *io)
|
|
.valid_fn = xcache_read_line_valid,
|
|
.miss_fn = xcache_read_miss,
|
|
.hit_fn = xcache_read_hit,
|
|
+ .need_lb_fn = NULL,
|
|
+ .lb_fn = xcache_read_lb,
|
|
};
|
|
|
|
xcache_init_io_ctx(&ctx, io);
|
|
diff --git a/src/engine/xcache_engine_wb.c b/src/engine/xcache_engine_wb.c
|
|
index 064f650..e4f8212 100644
|
|
--- a/src/engine/xcache_engine_wb.c
|
|
+++ b/src/engine/xcache_engine_wb.c
|
|
@@ -170,6 +170,18 @@ static void xcache_wb_res(struct xcache_backdev_io *base_io)
|
|
xcache_wb_hit(base_io, base_io->line, offset, base_io->size, buf_offset);
|
|
}
|
|
|
|
+/* bypass dirty sectors to core will make additional cache io to update valid bit */
|
|
+static int xcache_wb_need_lb(ocf_cache_t cache, struct xcache_line_range *range)
|
|
+{
|
|
+ return !metadata_test_dirty_sec(cache, range->cache_line, range->start_sector, range->last_sector);
|
|
+}
|
|
+
|
|
+static void xcache_wb_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
|
|
+ uint64_t addr, uint64_t size)
|
|
+{
|
|
+ xcache_wr_lb_common(backdev_io, line, addr, size);
|
|
+}
|
|
+
|
|
int xcache_wb(struct xcache_io *io)
|
|
{
|
|
int ret;
|
|
@@ -180,6 +192,8 @@ int xcache_wb(struct xcache_io *io)
|
|
.valid_fn = NULL,
|
|
.miss_fn = xcache_wb_miss,
|
|
.hit_fn = xcache_wb_hit,
|
|
+ .need_lb_fn = xcache_wb_need_lb,
|
|
+ .lb_fn = xcache_wb_lb,
|
|
};
|
|
|
|
mark_flush();
|
|
diff --git a/src/engine/xcache_engine_wt.c b/src/engine/xcache_engine_wt.c
|
|
index 234608d..e3d4c99 100644
|
|
--- a/src/engine/xcache_engine_wt.c
|
|
+++ b/src/engine/xcache_engine_wt.c
|
|
@@ -167,6 +167,12 @@ static int xcache_wt_core(struct xcache_io_context *ctx)
|
|
return 0;
|
|
}
|
|
|
|
+static void xcache_wt_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
|
|
+ uint64_t addr, uint64_t size)
|
|
+{
|
|
+ xcache_wr_lb_common(backdev_io, line, addr, size);
|
|
+}
|
|
+
|
|
int xcache_wt(struct xcache_io *io)
|
|
{
|
|
struct xcache_io_context ctx;
|
|
@@ -176,6 +182,8 @@ int xcache_wt(struct xcache_io *io)
|
|
.valid_fn = NULL,
|
|
.miss_fn = NULL,
|
|
.hit_fn = xcache_wt_hit_cache,
|
|
+ .need_lb_fn = NULL,
|
|
+ .lb_fn = xcache_wt_lb,
|
|
};
|
|
int ret;
|
|
|
|
diff --git a/src/qos/qos.c b/src/qos/qos.c
|
|
new file mode 100644
|
|
index 0000000..6ea2da9
|
|
--- /dev/null
|
|
+++ b/src/qos/qos.c
|
|
@@ -0,0 +1,6 @@
|
|
+#include "qos.h"
|
|
+
|
|
+void xcache_qos_init(struct xcache_qos *qos)
|
|
+{
|
|
+ qos_lb_init(&qos->qos_lb);
|
|
+}
|
|
diff --git a/src/qos/qos.h b/src/qos/qos.h
|
|
new file mode 100644
|
|
index 0000000..3b6a691
|
|
--- /dev/null
|
|
+++ b/src/qos/qos.h
|
|
@@ -0,0 +1,27 @@
|
|
+#ifndef __QOS_H__
|
|
+#define __QOS_H__
|
|
+
|
|
+#include "qos_lb.h"
|
|
+
|
|
+struct xcache_qos {
|
|
+ struct qos_lb qos_lb;
|
|
+};
|
|
+
|
|
+static inline void xcache_qos_load_add(struct xcache_qos *qos, int dev, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ qos_lb_load_add(&qos->qos_lb, dev, dir, bytes);
|
|
+}
|
|
+
|
|
+static inline void xcache_qos_load_sub(struct xcache_qos *qos, int dev, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ qos_lb_load_sub(&qos->qos_lb, dev, dir, bytes);
|
|
+}
|
|
+
|
|
+static inline bool xcache_qos_need_lb(struct xcache_qos *qos, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ return qos_need_lb(&qos->qos_lb, dir, bytes);
|
|
+}
|
|
+
|
|
+void xcache_qos_init(struct xcache_qos *qos);
|
|
+
|
|
+#endif
|
|
diff --git a/src/qos/qos_lb.h b/src/qos/qos_lb.h
|
|
new file mode 100644
|
|
index 0000000..bb3bfe4
|
|
--- /dev/null
|
|
+++ b/src/qos/qos_lb.h
|
|
@@ -0,0 +1,143 @@
|
|
+#ifndef __QOS_LB_H__
|
|
+#define __QOS_LB_H__
|
|
+
|
|
+#define CORE_DEFAULT_LOAD_WEIGHT 30
|
|
+#define CACHE_DEFAULT_LOAD_WEIGHT 1
|
|
+
|
|
+#define CORE_DEV 0
|
|
+#define CACHE_DEV 1
|
|
+
|
|
+#include <stdint.h>
|
|
+#include <stdbool.h>
|
|
+
|
|
+#include "ocf/xcache.h"
|
|
+
|
|
+struct qos_dev_load {
|
|
+ env_atomic64 read_inflight_bytes;
|
|
+ env_atomic64 write_inflight_bytes;
|
|
+ uint32_t read_weight;
|
|
+ uint32_t write_weight;
|
|
+};
|
|
+
|
|
+static inline void qos_dev_load_init(struct qos_dev_load *load)
|
|
+{
|
|
+ env_atomic64_set(&load->read_inflight_bytes, 0);
|
|
+ env_atomic64_set(&load->write_inflight_bytes, 0);
|
|
+}
|
|
+
|
|
+static inline void qos_dev_load_add(struct qos_dev_load *load, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ switch (dir) {
|
|
+ case XCACHE_RD:
|
|
+ env_atomic64_add(bytes, &load->read_inflight_bytes);
|
|
+ break;
|
|
+ case XCACHE_WR:
|
|
+ env_atomic64_add(bytes, &load->write_inflight_bytes);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void qos_dev_load_sub(struct qos_dev_load *load, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ switch (dir) {
|
|
+ case XCACHE_RD:
|
|
+ env_atomic64_sub(bytes, &load->read_inflight_bytes);
|
|
+ break;
|
|
+ case XCACHE_WR:
|
|
+ env_atomic64_sub(bytes, &load->write_inflight_bytes);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline uint64_t qos_dev_load_read(struct qos_dev_load *load, enum xcache_dir dir)
|
|
+{
|
|
+ switch (dir) {
|
|
+ case XCACHE_RD:
|
|
+ return env_atomic64_read(&load->read_inflight_bytes);
|
|
+ case XCACHE_WR:
|
|
+ return env_atomic64_read(&load->write_inflight_bytes);
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline uint32_t qos_dev_load_weight(struct qos_dev_load *load, enum xcache_dir dir)
|
|
+{
|
|
+ switch (dir) {
|
|
+ case XCACHE_RD:
|
|
+ return load->read_weight;
|
|
+ case XCACHE_WR:
|
|
+ return load->write_weight;
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline uint64_t do_cal_load(uint64_t bytes, uint32_t weight)
|
|
+{
|
|
+ return bytes * weight;
|
|
+}
|
|
+
|
|
+static inline uint64_t qos_dev_load_cal(struct qos_dev_load *load)
|
|
+{
|
|
+ uint64_t read_inflight_bytes = qos_dev_load_read(load, XCACHE_RD);
|
|
+ uint64_t write_inflight_bytes = qos_dev_load_read(load, XCACHE_WR);
|
|
+
|
|
+ return do_cal_load(read_inflight_bytes, load->read_weight) +
|
|
+ do_cal_load(write_inflight_bytes, load->write_weight);
|
|
+}
|
|
+
|
|
+struct qos_lb {
|
|
+ struct qos_dev_load cache_load;
|
|
+ struct qos_dev_load core_load;
|
|
+};
|
|
+
|
|
+static inline void qos_lb_init(struct qos_lb *qos_lb)
|
|
+{
|
|
+ qos_dev_load_init(&qos_lb->cache_load);
|
|
+ qos_dev_load_init(&qos_lb->core_load);
|
|
+ qos_lb->cache_load.read_weight = CACHE_DEFAULT_LOAD_WEIGHT;
|
|
+ qos_lb->cache_load.write_weight = CACHE_DEFAULT_LOAD_WEIGHT;
|
|
+ qos_lb->core_load.read_weight = CORE_DEFAULT_LOAD_WEIGHT;
|
|
+ qos_lb->core_load.write_weight = CORE_DEFAULT_LOAD_WEIGHT;
|
|
+}
|
|
+
|
|
+static inline void qos_lb_load_add(struct qos_lb *qos_lb, int dev, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ switch (dev) {
|
|
+ case CACHE_DEV:
|
|
+ qos_dev_load_add(&qos_lb->cache_load, dir, bytes);
|
|
+ break;
|
|
+ case CORE_DEV:
|
|
+ qos_dev_load_add(&qos_lb->core_load, dir, bytes);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void qos_lb_load_sub(struct qos_lb *qos_lb, int dev, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ switch (dev) {
|
|
+ case CACHE_DEV:
|
|
+ qos_dev_load_sub(&qos_lb->cache_load, dir, bytes);
|
|
+ break;
|
|
+ case CORE_DEV:
|
|
+ qos_dev_load_sub(&qos_lb->core_load, dir, bytes);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline bool qos_need_lb(struct qos_lb *qos_lb, enum xcache_dir dir, uint64_t bytes)
|
|
+{
|
|
+ return qos_dev_load_cal(&qos_lb->cache_load) > qos_dev_load_cal(&qos_lb->core_load) +
|
|
+ do_cal_load(bytes, qos_dev_load_weight(&qos_lb->core_load, dir));
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/src/xcache.c b/src/xcache.c
|
|
index e8d1f2d..0c6a2b8 100644
|
|
--- a/src/xcache.c
|
|
+++ b/src/xcache.c
|
|
@@ -17,6 +17,7 @@ int xcache_init(ocf_cache_t cache)
|
|
set_deadline_policy();
|
|
evicting_init(ctx);
|
|
ctx->line_size_shift = __builtin_ffsll(ocf_line_size(cache)) - 1;
|
|
+ xcache_qos_init(&ctx->qos);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/src/xcache.h b/src/xcache.h
|
|
index f31ec15..4fd7277 100644
|
|
--- a/src/xcache.h
|
|
+++ b/src/xcache.h
|
|
@@ -5,7 +5,9 @@
|
|
#include "ocf/ocf_types.h"
|
|
#include "./ocf_cache_priv.h"
|
|
|
|
+#include "ocf/xcache.h"
|
|
#include "xcache_cleaner.h"
|
|
+#include "qos/qos.h"
|
|
|
|
typedef ocf_cache_line_t xcache_line_t;
|
|
|
|
@@ -14,6 +16,7 @@ typedef struct xcache_context {
|
|
void *xcache_evicting;
|
|
ocf_cache_t cache;
|
|
struct xcache_cleaning_ctx cleaning_ctx;
|
|
+ struct xcache_qos qos;
|
|
} xcache_context_t;
|
|
|
|
static inline xcache_context_t *xcache_get_ctx(ocf_cache_t cache)
|
|
diff --git a/src/xcache_ocf_core.c b/src/xcache_ocf_core.c
|
|
index a3d5c1c..bfd6619 100644
|
|
--- a/src/xcache_ocf_core.c
|
|
+++ b/src/xcache_ocf_core.c
|
|
@@ -6,6 +6,7 @@
|
|
|
|
#include "ocf/xcache.h"
|
|
#include "xcache_queue.h"
|
|
+#include "qos/qos.h"
|
|
|
|
void xcache_submit_io(struct xcache_io *io)
|
|
{
|
|
@@ -34,6 +35,9 @@ void xcache_submit_io(struct xcache_io *io)
|
|
void xcache_backdev_io_end(struct xcache_backdev_io *bd_io, struct backdev_io_end_arg *arg)
|
|
{
|
|
struct xcache_io *io = bd_io->xcache_io;
|
|
+ xcache_context_t *xcache_ctx = backdev_io_to_xcache_ctx(bd_io);
|
|
+
|
|
+ xcache_qos_load_sub(&xcache_ctx->qos, bd_io->dev, bd_io->dir, arg->size);
|
|
|
|
io->error |= arg->error;
|
|
bd_io->end(bd_io, arg);
|
|
--
|
|
2.30.0
|
|
|