ocf/0001-ocf-export-function-in-OCF-for-further-use.patch
Kemeng Shi de38fbd7f8 OCF: enable xcache
This patchset enable xcache. More details can be found in patches log.

Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
2023-01-10 17:12:54 +08:00

622 lines
19 KiB
Diff

From 7ea35ad8a7ebb93579b2567dd494d1ac6969fcb0 Mon Sep 17 00:00:00 2001
From: Kemeng Shi <shikemeng@huawei.com>
Date: Tue, 10 Jan 2023 23:06:29 +0800
Subject: [PATCH 1/4] ocf: export function in OCF for further use
1. export ocf_cleanr_run
2. export ocf_hb_id_naked_[un]lock
3. provide more general metadata flush interface .flush_do_asynch_common
4. exrpot some ocf_lru_list operations
5. provide more general alock ocf_io_alock_[un]lock_wr
Signed-off-by: Kemeng Shi <shikemeng@huawei.com>
---
src/cleaning/cleaning.c | 46 +++++---
src/cleaning/cleaning.h | 3 +
src/concurrency/ocf_metadata_concurrency.c | 4 +-
src/concurrency/ocf_metadata_concurrency.h | 7 ++
src/engine/engine_common.c | 4 +-
src/metadata/metadata_raw.c | 87 ++++++++++-----
src/metadata/metadata_raw.h | 40 +++++++
src/ocf_lru.c | 6 +-
src/ocf_lru.h | 7 ++
src/utils/utils_alock.c | 122 +++++++++++++++++++++
src/utils/utils_alock.h | 11 ++
11 files changed, 285 insertions(+), 52 deletions(-)
diff --git a/src/cleaning/cleaning.c b/src/cleaning/cleaning.c
index 94421f0..c7eef8f 100644
--- a/src/cleaning/cleaning.c
+++ b/src/cleaning/cleaning.c
@@ -70,47 +70,57 @@ static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache)
return 1;
}
-static void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+int ocf_cleaner_run_prepare(ocf_cleaner_t cleaner, ocf_queue_t queue)
{
ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
- ocf_mngt_cache_unlock(cache);
- ocf_queue_put(cleaner->io_queue);
- cleaner->end(cleaner, interval);
-}
-
-void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
-{
- ocf_cache_t cache;
-
- OCF_CHECK_NULL(cleaner);
- OCF_CHECK_NULL(queue);
-
- cache = ocf_cleaner_get_cache(cleaner);
-
/* Do not involve cleaning when cache is not running
* (error, etc.).
*/
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
ocf_mngt_cache_is_locked(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
/* Sleep in case there is management operation in progress. */
if (ocf_mngt_cache_trylock(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
ocf_mngt_cache_unlock(cache);
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
ocf_queue_get(queue);
cleaner->io_queue = queue;
+ return 0;
+}
+
+void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+{
+ ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
+
+ ocf_mngt_cache_unlock(cache);
+ ocf_queue_put(cleaner->io_queue);
+ cleaner->end(cleaner, interval);
+}
+
+void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
+{
+ ocf_cache_t cache;
+
+ OCF_CHECK_NULL(cleaner);
+ OCF_CHECK_NULL(queue);
+
+ if (ocf_cleaner_run_prepare(cleaner, queue) != 0) {
+ return;
+ }
+
+ cache = ocf_cleaner_get_cache(cleaner);
ocf_cleaning_perform_cleaning(cache, ocf_cleaner_run_complete);
}
diff --git a/src/cleaning/cleaning.h b/src/cleaning/cleaning.h
index 007dac0..f514393 100644
--- a/src/cleaning/cleaning.h
+++ b/src/cleaning/cleaning.h
@@ -53,4 +53,7 @@ void ocf_kick_cleaner(ocf_cache_t cache);
void ocf_stop_cleaner(ocf_cache_t cache);
+int ocf_cleaner_run_prepare(ocf_cleaner_t cleaner, ocf_queue_t queue);
+void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval);
+
#endif
diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c
index 52059a0..794e27f 100644
--- a/src/concurrency/ocf_metadata_concurrency.c
+++ b/src/concurrency/ocf_metadata_concurrency.c
@@ -212,7 +212,7 @@ void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock,
number. Preffered way to lock multiple hash buckets is to use
request lock rountines ocf_req_hash_(un)lock_(rd/wr).
*/
-static inline void ocf_hb_id_naked_lock(
+void ocf_hb_id_naked_lock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
@@ -226,7 +226,7 @@ static inline void ocf_hb_id_naked_lock(
ENV_BUG();
}
-static inline void ocf_hb_id_naked_unlock(
+void ocf_hb_id_naked_unlock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h
index 97262d8..43f4d1d 100644
--- a/src/concurrency/ocf_metadata_concurrency.h
+++ b/src/concurrency/ocf_metadata_concurrency.h
@@ -177,4 +177,11 @@ void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_loc
uint32_t page);
void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock,
uint32_t page);
+
+void ocf_hb_id_naked_lock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw);
+void ocf_hb_id_naked_unlock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw);
#endif
diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c
index a789b13..5b30250 100644
--- a/src/engine/engine_common.c
+++ b/src/engine/engine_common.c
@@ -584,7 +584,7 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
req->info.hit_no, req->core_line_count);
}
-void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
+void __attribute__((weak)) ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
@@ -614,7 +614,7 @@ void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
ocf_queue_kick(q, allow_sync);
}
-void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
+void __attribute__((weak)) ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c
index 15287e7..efef69c 100644
--- a/src/metadata/metadata_raw.c
+++ b/src/metadata/metadata_raw.c
@@ -349,6 +349,8 @@ struct _raw_ram_flush_ctx {
struct ocf_metadata_raw *raw;
struct ocf_request *req;
ocf_req_end_t complete;
+ void *io;
+ ocf_metadata_io_ctx_end_t io_end;
env_atomic flush_req_cnt;
int error;
};
@@ -369,8 +371,7 @@ static void _raw_ram_flush_do_asynch_io_complete(ocf_cache_t cache,
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
/* Call metadata flush completed call back */
- ctx->req->error |= ctx->error;
- ctx->complete(ctx->req, ctx->error);
+ ctx->io_end(ctx->io, ctx->error);
env_free(ctx);
}
@@ -429,17 +430,17 @@ int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
return 0;
}
-static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
+static void __raw_ram_flush_do_asynch_add_pages(struct ocf_metadata_line_getter *getter,
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
- int *pages_to_flush) {
+ int *pages_to_flush)
+{
int i, j = 0;
- int line_no = req->core_line_count;
- struct ocf_map_info *map;
+ int line_no = line_getter_line_num(getter);
+ ocf_cache_line_t line;
for (i = 0; i < line_no; i++) {
- map = &req->map[i];
- if (map->flush) {
- pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
+ if (line_getter_flush_line(getter, i, &line)) {
+ pages_tab[j] = _RAW_RAM_PAGE(raw, line);
j++;
}
}
@@ -447,37 +448,31 @@ static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
*pages_to_flush = j;
}
-static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
- struct ocf_request *req, struct ocf_metadata_raw *raw,
- ocf_req_end_t complete)
+static int _raw_ram_flush_asynch_common(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
{
int result = 0, i;
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
uint32_t *pages_tab;
- int line_no = req->core_line_count;
+ int line_no = line_getter_line_num(line_getter);
int pages_to_flush;
uint32_t start_page = 0;
uint32_t count = 0;
struct _raw_ram_flush_ctx *ctx;
- ENV_BUG_ON(!complete);
+ ENV_BUG_ON(!io_ctx->io_end);
OCF_DEBUG_TRACE(cache);
- if (!req->info.flush_metadata) {
- /* Nothing to flush call flush callback */
- complete(req, 0);
- return 0;
- }
-
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
- complete(req, -OCF_ERR_NO_MEM);
+ io_context_end(io_ctx, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
- ctx->req = req;
- ctx->complete = complete;
+ ctx->io = io_ctx->io;
+ ctx->io_end = io_ctx->io_end;
ctx->raw = raw;
env_atomic_set(&ctx->flush_req_cnt, 1);
@@ -487,7 +482,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
if (!pages_tab) {
env_free(ctx);
- complete(req, -OCF_ERR_NO_MEM);
+ io_context_end(io_ctx, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
}
@@ -496,7 +491,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
* to prevent freeing of asynchronous context
*/
- __raw_ram_flush_do_asynch_add_pages(req, pages_tab, raw,
+ __raw_ram_flush_do_asynch_add_pages(line_getter, pages_tab, raw,
&pages_to_flush);
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
@@ -526,9 +521,9 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
env_atomic_inc(&ctx->flush_req_cnt);
- result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
+ result |= metadata_io_write_i_asynch(cache, io_ctx->queue, ctx,
raw->ssd_pages_offset + start_page, count,
- req->ioi.io.flags,
+ io_ctx->io_flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);
@@ -547,6 +542,43 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
return result;
}
+static int req_line_num(void *getter)
+{
+ struct ocf_request *req = (struct ocf_request *)getter;
+
+ return req->core_line_count;
+}
+
+static bool req_flush_line(void *getter, int index, ocf_cache_line_t *line)
+{
+ struct ocf_request *req = (struct ocf_request *)getter;
+
+ if (!req->map[index].flush) {
+ return false;
+ }
+
+ *line = req->map[index].coll_idx;
+ return true;
+}
+
+static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete)
+{
+ struct ocf_metadata_io_context io_ctx = {
+ .io = (void *)req,
+ .io_flags = req->ioi.io.flags,
+ .io_end = (ocf_metadata_io_ctx_end_t)complete,
+ .queue = req->io_queue,
+ };
+ struct ocf_metadata_line_getter line_getter = {
+ .getter = (void *)req,
+ .get_line_num = req_line_num,
+ .get_flush_line = req_flush_line,
+ };
+ return _raw_ram_flush_asynch_common(cache, raw, &io_ctx, &line_getter);
+}
+
/*******************************************************************************
* RAW Interfaces definitions
******************************************************************************/
@@ -566,6 +598,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
.flush_do_asynch = _raw_ram_flush_do_asynch,
+ .flush_do_asynch_common = _raw_ram_flush_asynch_common
},
[metadata_raw_type_dynamic] = {
.init = raw_dynamic_init,
diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h
index 0357774..57f7b75 100644
--- a/src/metadata/metadata_raw.h
+++ b/src/metadata/metadata_raw.h
@@ -93,6 +93,35 @@ struct ocf_metadata_raw {
struct ocf_alock *mio_conc;
};
+struct ocf_metadata_line_getter {
+ void *getter;
+ int (*get_line_num)(void *getter);
+ bool (*get_flush_line)(void *getter, int index, ocf_cache_line_t *line);
+};
+
+static inline int line_getter_line_num(struct ocf_metadata_line_getter *getter)
+{
+ return getter->get_line_num(getter->getter);
+}
+
+static inline bool line_getter_flush_line(struct ocf_metadata_line_getter *getter,
+ int index, ocf_cache_line_t *line)
+{
+ return getter->get_flush_line(getter->getter, index, line);
+}
+
+typedef void (*ocf_metadata_io_ctx_end_t)(void *io, int error);
+struct ocf_metadata_io_context {
+ void *io;
+ int io_flags;
+ ocf_queue_t queue;
+ ocf_metadata_io_ctx_end_t io_end;
+};
+
+static inline void io_context_end(struct ocf_metadata_io_context *ctx, int error)
+{
+ ctx->io_end(ctx->io, error);
+}
/**
* RAW container interface
*/
@@ -137,8 +166,12 @@ struct raw_iface {
int (*flush_do_asynch)(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
+
+ int (*flush_do_asynch_common)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ struct ocf_metadata_io_context *io_ctx, struct ocf_metadata_line_getter *line_getter);
};
+
/**
* @brief Initialize RAW instance
*
@@ -288,6 +321,13 @@ static inline int ocf_metadata_raw_flush_do_asynch(ocf_cache_t cache,
return raw->iface->flush_do_asynch(cache, req, raw, complete);
}
+static inline int ocf_metadata_raw_flush_do_asynch_common(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
+{
+ return raw->iface->flush_do_asynch_common(cache, raw, io_ctx, line_getter);
+}
+
/*
* Check if line is valid for specified RAW descriptor
*/
diff --git a/src/ocf_lru.c b/src/ocf_lru.c
index e9c3882..8e323c0 100644
--- a/src/ocf_lru.c
+++ b/src/ocf_lru.c
@@ -221,7 +221,7 @@ void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node->next = end_marker;
}
-static struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
+struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t lru_idx, bool clean)
{
if (part->id == PARTITION_FREELIST)
@@ -261,7 +261,7 @@ static inline void ocf_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
add_lru_head(cache, dst_list, cline);
}
-static void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
+void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
@@ -658,7 +658,7 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_cleaner_fire(cache, &attribs);
}
-static void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
+void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
ocf_core_t core;
diff --git a/src/ocf_lru.h b/src/ocf_lru.h
index a71b5fd..ae50b5e 100644
--- a/src/ocf_lru.h
+++ b/src/ocf_lru.h
@@ -33,4 +33,11 @@ void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
uint32_t ocf_lru_num_free(ocf_cache_t cache);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
+void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
+ struct ocf_part *src_part, struct ocf_part *dst_part);
+struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
+ uint32_t lru_idx, bool clean);
+void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
+ ocf_core_id_t core_id, ocf_part_id_t part_id);
+
#endif
diff --git a/src/utils/utils_alock.c b/src/utils/utils_alock.c
index 25f41a6..183682a 100644
--- a/src/utils/utils_alock.c
+++ b/src/utils/utils_alock.c
@@ -799,3 +799,125 @@ uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);
}
+
+int ocf_io_alock_lock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry,
+ ocf_io_lock_prepare_wait prepare_wait_fn,
+ ocf_io_lock_prepare_wake prepare_wake_fn,
+ void *io)
+{
+ struct ocf_alock_waiter *waiter;
+ unsigned long flags = 0;
+ int ret = OCF_LOCK_NOT_ACQUIRED;
+
+ if (ocf_alock_trylock_entry_wr(alock, entry)) {
+ return OCF_LOCK_ACQUIRED;
+ }
+
+ ocf_alock_waitlist_lock(alock, entry, flags);
+
+ /* At the moment list is protected, double check if the cache entry is
+ * unlocked
+ */
+ if (ocf_alock_trylock_entry_wr(alock, entry)) {
+ ret = OCF_LOCK_ACQUIRED;
+ goto unlock;
+ }
+
+ waiter = env_allocator_new(alock->allocator);
+ if (!waiter) {
+ ret = -OCF_ERR_NO_MEM;
+ goto unlock;
+ }
+
+ /* Setup waiters filed */
+ waiter->entry = entry;
+ waiter->req = (struct ocf_request *)io;
+ waiter->cmpl = prepare_wake_fn;
+ waiter->rw = OCF_WRITE;
+ INIT_LIST_HEAD(&waiter->item);
+
+ prepare_wait_fn(io);
+ /* Add to waiters list */
+ ocf_alock_waitlist_add(alock, entry, waiter);
+
+unlock:
+ ocf_alock_waitlist_unlock(alock, entry, flags);
+
+ return ret;
+}
+
+static inline void ocf_io_alock_unlock_wr_common(struct ocf_alock *alock,
+ const ocf_cache_line_t entry)
+{
+ bool locked = false;
+ bool exchanged = true;
+
+ uint32_t idx = _WAITERS_LIST_ITEM(entry);
+ struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
+ struct ocf_alock_waiter *waiter;
+
+ struct list_head *iter, *next;
+
+ /*
+ * Lock exchange scenario
+ * 1. WR -> IDLE
+ * 2. WR -> RD
+ * 3. WR -> WR
+ */
+
+ /* Check is requested page is on the list */
+ list_for_each_safe(iter, next, &lst->head) {
+ waiter = list_entry(iter, struct ocf_alock_waiter, item);
+
+ if (entry != waiter->entry)
+ continue;
+
+ if (exchanged) {
+ if (waiter->rw == OCF_WRITE)
+ locked = ocf_alock_trylock_entry_wr2wr(alock, entry);
+ else if (waiter->rw == OCF_READ)
+ locked = ocf_alock_trylock_entry_wr2rd(alock, entry);
+ else
+ ENV_BUG();
+ } else {
+ if (waiter->rw == OCF_WRITE)
+ locked = ocf_alock_trylock_entry_wr(alock, entry);
+ else if (waiter->rw == OCF_READ)
+ locked = ocf_alock_trylock_entry_rd(alock, entry);
+ else
+ ENV_BUG();
+ }
+
+ if (locked) {
+ exchanged = false;
+ list_del(iter);
+
+ waiter->cmpl(waiter->req);
+
+ env_allocator_del(alock->allocator, waiter);
+ } else {
+ break;
+ }
+ }
+
+ if (exchanged) {
+ /* No exchange, no waiters on the list, unlock and return
+ * WR -> IDLE
+ */
+ ocf_alock_unlock_entry_wr(alock, entry);
+ }
+}
+
+void ocf_io_alock_unlock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry)
+{
+ unsigned long flags = 0;
+
+ OCF_DEBUG_CACHE(alock->cache, "Cache entry unlock one wr = %u", entry);
+
+ /* Lock waiters list */
+ ocf_alock_waitlist_lock(alock, entry, flags);
+ ocf_io_alock_unlock_wr_common(alock, entry);
+ ocf_alock_waitlist_unlock(alock, entry, flags);
+}
diff --git a/src/utils/utils_alock.h b/src/utils/utils_alock.h
index 2d3df97..3670c25 100644
--- a/src/utils/utils_alock.h
+++ b/src/utils/utils_alock.h
@@ -87,4 +87,15 @@ void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
ocf_cache_line_t entry);
+typedef void (*ocf_io_lock_prepare_wait)(void *io);
+typedef void (*ocf_io_lock_prepare_wake)(void *io);
+int ocf_io_alock_lock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry,
+ ocf_io_lock_prepare_wait prepare_wait_fn,
+ ocf_io_lock_prepare_wake prepare_wake_fn,
+ void *io);
+
+void ocf_io_alock_unlock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry);
+
#endif
--
2.30.0