From 35d7e10580712aabc48ee4904e69d177cb38594c Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Wed, 29 Oct 2025 16:39:38 +0800 Subject: [PATCH 001/243] ub: ubase: support debugfs for active dev stats. commit 6b3b919054909ca3d7bdfb1c9c2fe48c098ff1cb openEuler This commit is used to support debugfs for active dev stats. Current commit uses debugfs to collect statistics on the number of times that the rx stream stop and resume interfaces are invoked. In addition, the latest 10 statistics can be queried. Signed-off-by: Xiaobo Zhang Signed-off-by: Xiongchuan Zhou Signed-off-by: huwentao --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 57 ++++++++++++++++++++++++ drivers/ub/ubase/ubase_dev.c | 10 ++++- drivers/ub/ubase/ubase_stats.c | 22 +++++++++ drivers/ub/ubase/ubase_stats.h | 3 ++ 4 files changed, 90 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index d7495d8b4ef7..ef0b162e1188 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -30,6 +30,55 @@ static int ubase_dbg_dump_rst_info(struct seq_file *s, void *data) return 0; } +static int ubase_dbg_dump_activate_record(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + struct ubase_activate_dev_stats *record; + u8 cnt = 1, stats_cnt; + u64 total, idx; + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits) || + test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + return -EBUSY; + + record = &udev->stats.activate_record; + + mutex_lock(&record->lock); + + seq_puts(s, "current time : "); + ubase_dbg_format_time(ktime_get_real_seconds(), s); + seq_puts(s, "\n"); + seq_printf(s, "activate dev count : %llu\n", record->act_cnt); + seq_printf(s, "deactivate dev count : %llu\n", record->deact_cnt); + + total = record->act_cnt + record->deact_cnt; + if (!total) { + seq_puts(s, "activate dev change records : NA\n"); + mutex_unlock(&record->lock); + return 0; + } + + seq_puts(s, "activate dev change records :\n"); + seq_puts(s, "\tNo.\tTIME\t\t\t\tSTATUS\t\tRESULT\n"); + + stats_cnt = min(total, UBASE_ACT_STAT_MAX_NUM); + while (cnt <= stats_cnt) { + total--; + idx = total % UBASE_ACT_STAT_MAX_NUM; + seq_printf(s, "\t%-2d\t", cnt); + ubase_dbg_format_time(record->stats[idx].time, s); + seq_printf(s, "\t%s", record->stats[idx].activate ? + "activate" : "deactivate"); + seq_printf(s, "\t%d", record->stats[idx].result); + seq_puts(s, "\n"); + cnt++; + } + + mutex_unlock(&record->lock); + + return 0; +} + static void ubase_dbg_fill_single_port(struct seq_file *s, struct ubase_perf_stats_result *stats) { @@ -193,6 +242,14 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_rst_info, }, + { + .name = "activate_record", + .dentry_index = UBASE_DBG_DENTRY_ROOT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_activate_record, + }, { .name = "sl_vl_map", .dentry_index = UBASE_DBG_DENTRY_QOS, diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index abdb3232edea..3921cd0ff824 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -16,6 +16,7 @@ #include "ubase_mailbox.h" #include "ubase_pmem.h" #include "ubase_reset.h" +#include "ubase_stats.h" #include "ubase_dev.h" #define UBASE_PERIOD_100MS 100 @@ -1317,12 +1318,15 @@ int ubase_activate_dev(struct auxiliary_device *adev) if (ret) { ubase_err(udev, "failed to activate ubase dev, ret = %d.\n", ret); - return ret; + goto activate_dev_err; } ubase_activate_notify(udev, adev, true); - return 0; +activate_dev_err: + ubase_update_activate_stats(udev, true, ret); + + return ret; } EXPORT_SYMBOL(ubase_activate_dev); @@ -1352,6 +1356,8 @@ int ubase_deactivate_dev(struct auxiliary_device *adev) ubase_activate_notify(udev, adev, true); } + ubase_update_activate_stats(udev, false, ret); + return ret; } EXPORT_SYMBOL(ubase_deactivate_dev); diff --git a/drivers/ub/ubase/ubase_stats.c b/drivers/ub/ubase/ubase_stats.c index b30e839ebb0b..7f536e0cd537 100644 --- a/drivers/ub/ubase/ubase_stats.c +++ b/drivers/ub/ubase/ubase_stats.c @@ -65,3 +65,25 @@ int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, sizeof(*data) / sizeof(u64), false); } EXPORT_SYMBOL(ubase_get_ub_port_stats); + +void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, + int result) +{ + struct ubase_activate_dev_stats *record = &udev->stats.activate_record; + u64 idx, total; + + mutex_lock(&record->lock); + + if (activate) + record->act_cnt++; + else + record->deact_cnt++; + + total = record->act_cnt + record->deact_cnt; + idx = (total - 1) % UBASE_ACT_STAT_MAX_NUM; + record->stats[idx].activate = activate; + record->stats[idx].time = ktime_get_real_seconds(); + record->stats[idx].result = result; + + mutex_unlock(&record->lock); +} diff --git a/drivers/ub/ubase/ubase_stats.h b/drivers/ub/ubase/ubase_stats.h index b3f6e5d788cc..a6826dd461c7 100644 --- a/drivers/ub/ubase/ubase_stats.h +++ b/drivers/ub/ubase/ubase_stats.h @@ -15,4 +15,7 @@ struct ubase_query_mac_stats_cmd { __le64 stats_val[]; }; +void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, + int result); + #endif /* _UBASE_STATS_H */ -- Gitee From 0445b641aca4d3e315fe287a617113c1e6c12697 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Tue, 23 Sep 2025 15:18:35 +0800 Subject: [PATCH 002/243] ub: ubase: Added debug information query function for FST/FVT/RQMT entries commit 0bbe314ab1feb4918f0643fd752f319fb2cfe310 openEuler This patch enhances the debugfs of the ubase driver by adding support for querying information on FST (Flow Steering Table), FVT (Flow Virtualization Table), and RQMT (Requirement) entries, thereby improving QoS debugging capabilities. Main Features: 1. Entry Information Display: - FST Table: Displays configuration details such as the number of service level queue virtual channels and the starting queue ID. - FVT Table: Shows information on virtual channel size and requirement offsets. - FST_REVERT Table: Provides mapping relationships between FST index, UE index, queue index, and the number of virtual channels. - RQMT Table: Displays requirement information, including FST index, starting queue index, and queue number offset. 2. Multi-User Entity Support: - Supports querying entry information for the current UE and all registered UEs. - Uses a UE list lock to ensure thread-safe access. This feature provides network administrators with an in-depth visualization tool for traffic steering and virtualization configurations, aiding in the diagnosis and optimization of complex QoS policy configurations. Signed-off-by: Zihao Sheng Signed-off-by: Xiongchuan Zhou Signed-off-by: huwentao --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 8 ++ drivers/ub/ubase/debugfs/ubase_qos_debugfs.c | 102 +++++++++++++++++++ drivers/ub/ubase/debugfs/ubase_qos_debugfs.h | 1 + 3 files changed, 111 insertions(+) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index ef0b162e1188..56ce970411bd 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -314,6 +314,14 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_adev_qos_info, }, + { + .name = "fst_fvt_rqmt_info", + .dentry_index = UBASE_DBG_DENTRY_QOS, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_fsv_fvt_rqmt, + }, { .name = "tm_queue", .dentry_index = UBASE_DBG_DENTRY_QOS, diff --git a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c index 9b8221e422da..91e05df180bb 100644 --- a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c @@ -252,6 +252,108 @@ int ubase_dbg_dump_adev_qos_info(struct seq_file *s, void *data) return 0; } +static void ubase_dbg_fill_fst_fvt(struct seq_file *s, + struct ubase_query_fst_fvt_rqmt_cmd *resp) +{ + seq_puts(s, "\tFST:\n"); + seq_printf(s, "\t\tsl_queue_vl_num: %u\n", + le16_to_cpu(resp->sl_queue_vl_num)); + seq_printf(s, "\t\tsl_queue_start_qid: %u\n", + le16_to_cpu(resp->sl_queue_start_qid)); + seq_puts(s, "\tFVT:\n"); + seq_printf(s, "\t\tfvt_vl_size: %u\n", le16_to_cpu(resp->fvt_vl_size)); + seq_printf(s, "\t\tfvt_rqmt_offset: %u\n", + le16_to_cpu(resp->fvt_rqmt_offset)); +} + +static void ubase_dbg_fill_fst_revert(struct seq_file *s, + struct ubase_query_fst_fvt_rqmt_cmd *resp) +{ + u16 vl_num = min(UBASE_MAX_VL_NUM, le16_to_cpu(resp->sl_queue_vl_num)); + u16 j; + + seq_puts(s, "\tFST_REVERT:\n"); + seq_puts(s, "\t\tFST_IDX UE_IDX QUE_IDX VL_NUM\n"); + + for (j = 0; j < vl_num; j++) { + seq_puts(s, "\t\t"); + seq_printf(s, "%-9u", le16_to_cpu(resp->fstr_info[j].fst_idx)); + seq_printf(s, "%-10u", resp->fstr_info[j].queue_ue_num); + seq_printf(s, "%-10u", resp->fstr_info[j].queue_que_num); + seq_printf(s, "%-9u", le16_to_cpu(resp->fstr_info[j].queue_vl_num)); + seq_puts(s, "\n"); + } +} + +static void ubase_dbg_fill_rqmt(struct seq_file *s, + struct ubase_query_fst_fvt_rqmt_cmd *resp) +{ + u16 vl_size = min(UBASE_MAX_VL_NUM, le16_to_cpu(resp->fvt_vl_size)); + u16 j; + + seq_puts(s, "\tRQMT:\n"); + seq_puts(s, "\t\tFST_IDX QUE_IDX QUE_SHIFT\n"); + + for (j = 0; j < vl_size; j++) { + seq_puts(s, "\t\t"); + seq_printf(s, "%-9u", le16_to_cpu(resp->rqmt_info[j].fst_idx)); + seq_printf(s, "%-10u", + le16_to_cpu(resp->rqmt_info[j].start_queue_idx)); + seq_printf(s, "%-12u", + le16_to_cpu(resp->rqmt_info[j].queue_quantity_shift)); + seq_puts(s, "\n"); + } + + seq_puts(s, "\n"); +} + +static void ubase_dbg_fill_tbl_content(struct seq_file *s, + struct ubase_query_fst_fvt_rqmt_cmd *resp) +{ + ubase_dbg_fill_fst_fvt(s, resp); + + ubase_dbg_fill_fst_revert(s, resp); + + ubase_dbg_fill_rqmt(s, resp); +} + +int ubase_dbg_dump_fsv_fvt_rqmt(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + struct ubase_query_fst_fvt_rqmt_cmd resp = {0}; + struct ubase_ue_node *ue_node; + u16 ue_id; + int ret; + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits) || + test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + return -EBUSY; + + seq_puts(s, "current ue:\n"); + ret = ubase_query_fst_fvt_rqmt(udev, &resp, 0); + if (ret) + return ret; + + ubase_dbg_fill_tbl_content(s, &resp); + + mutex_lock(&udev->ue_list_lock); + list_for_each_entry(ue_node, &udev->ue_list, list) { + ue_id = ue_node->bus_ue_id; + memset(&resp, 0, sizeof(resp)); + + seq_printf(s, "ue%u:\n", ue_id); + + ret = ubase_query_fst_fvt_rqmt(udev, &resp, ue_id); + if (ret) + goto out; + ubase_dbg_fill_tbl_content(s, &resp); + } + +out: + mutex_unlock(&udev->ue_list_lock); + return ret; +} + static void ubase_dbg_fill_tm_queue_seq(struct seq_file *s, struct ubase_query_tm_queue_cmd *resp) { diff --git a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h index 48f45a2dbec0..e44b4cacd21e 100644 --- a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h +++ b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h @@ -16,6 +16,7 @@ int ubase_dbg_dump_ets_tcg_info(struct seq_file *s, void *data); int ubase_dbg_dump_ets_port_info(struct seq_file *s, void *data); int ubase_dbg_dump_rack_vl_bitmap(struct seq_file *s, void *data); int ubase_dbg_dump_adev_qos_info(struct seq_file *s, void *data); +int ubase_dbg_dump_fsv_fvt_rqmt(struct seq_file *s, void *data); int ubase_dbg_dump_tm_queue_info(struct seq_file *s, void *data); int ubase_dbg_dump_tm_qset_info(struct seq_file *s, void *data); int ubase_dbg_dump_tm_pri_info(struct seq_file *s, void *data); -- Gitee From 23a1a3e3dfbe905e7874803144b0e4be83dd95a1 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Wed, 29 Oct 2025 16:50:32 +0800 Subject: [PATCH 003/243] ub: ubase: add function that query aeq/ceq/tp/tpg context commit f817c5c6db98ed97a926202808d90fa395048dc2 openEuler This patch adds hardware context debugging functionality to the ubase driver, providing visual query capabilities for key hardware contexts such as AEQ, CEQ, TPG, and TP through the debugfs interface. 1. Main Features: - Supports querying AEQ/CEQ event queue contexts (status, size, interrupt configuration, etc.) - Provides display of TPG (Transmission Port Group) and TP (Transmission Port) context information - Implements hardware register context dump functionality with desensitization of key fields - Adds a new context debugging directory, integrating various context query interfaces 2. Technical Features: - Uses a mailbox mechanism to query hardware context registers - Masks sensitive addresses and token information - Supports multi-context group and port bitmap filtering - Thread-safe lock mechanism protects data structure access This implementation provides developers with an in-depth hardware state diagnostic tool, facilitating the debugging of complex hardware interaction issues. Signed-off-by: Fengyan Mu Signed-off-by: Xiongchuan Zhou Signed-off-by: huwentao --- drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c | 369 +++++++++++++++++++ drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h | 20 + drivers/ub/ubase/debugfs/ubase_debugfs.c | 62 ++++ drivers/ub/ubase/ubase_tp.h | 26 ++ 4 files changed, 477 insertions(+) create mode 100644 drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c index 1004033c1581..33221a90edd9 100644 --- a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c @@ -7,6 +7,237 @@ #include #include "ubase_debugfs.h" +#include "ubase_hw.h" +#include "ubase_mailbox.h" +#include "ubase_tp.h" +#include "ubase_ctx_debugfs.h" + +#define UBASE_DEFAULT_CTXGN 0 + +static void ubase_dump_eq_ctx(struct seq_file *s, struct ubase_eq *eq) +{ + seq_printf(s, "%-5u", eq->eqn); + seq_printf(s, "%-13u", eq->entries_num); + seq_printf(s, "%-7u", eq->state); + seq_printf(s, "%-8u", eq->arm_st); + seq_printf(s, "%-10u", eq->eqe_size); + seq_printf(s, "%-11u", eq->eq_period); + seq_printf(s, "%-14u", eq->coalesce_cnt); + seq_printf(s, "%-6u", eq->irqn); + seq_printf(s, "%-10u", eq->eqc_irqn); + seq_printf(s, "%-12u", eq->cons_index); + seq_puts(s, "\n"); +} + +static void ubase_eq_ctx_titles_print(struct seq_file *s) +{ + seq_puts(s, "EQN ENTRIES_NUM STATE ARM_ST EQE_SIZE EQ_PERIOD "); + seq_puts(s, "COALESCE_CNT IRQN EQC_IRQN CONS_INDEX\n"); +} + +static void ubase_dump_aeq_ctx(struct seq_file *s, struct ubase_dev *udev, u32 idx) +{ + struct ubase_aeq *aeq = &udev->irq_table.aeq; + struct ubase_eq *eq = &aeq->eq; + + ubase_dump_eq_ctx(s, eq); +} + +static void ubase_dump_ceq_ctx(struct seq_file *s, struct ubase_dev *udev, u32 idx) +{ + struct ubase_ceq *ceq = &udev->irq_table.ceqs.ceq[idx]; + struct ubase_eq *eq = &ceq->eq; + + ubase_dump_eq_ctx(s, eq); +} + +static void ubase_tpg_ctx_titles_print(struct seq_file *s) +{ + seq_puts(s, "CHANNEL_ID TPGN TP_SHIFT VALID_TP "); + seq_puts(s, "START_TPN TPG_STATE TP_CNT\n"); +} + +static void ubase_dump_tpg_ctx(struct seq_file *s, struct ubase_dev *udev, u32 idx) +{ + struct ubase_tpg *tpg = &udev->tp_ctx.tpg[idx]; + + seq_printf(s, "%-12u", idx); + seq_printf(s, "%-9u", tpg->mb_tpgn); + seq_printf(s, "%-10u", tpg->tp_shift); + seq_printf(s, "%-10lu", tpg->valid_tp); + seq_printf(s, "%-11u", tpg->start_tpn); + seq_printf(s, "%-11u", tpg->tpg_state); + seq_printf(s, "%-8u", tpg->tp_cnt); + seq_puts(s, "\n"); +} + +enum ubase_dbg_ctx_type { + UBASE_DBG_AEQ_CTX = 0, + UBASE_DBG_CEQ_CTX, + UBASE_DBG_TPG_CTX, + UBASE_DBG_TP_CTX, +}; + +static u32 ubase_get_ctx_num(struct ubase_dev *udev, + enum ubase_dbg_ctx_type ctx_type, u32 ctxgn) +{ + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + u32 ctx_num = 0; + + switch (ctx_type) { + case UBASE_DBG_AEQ_CTX: + ctx_num = udev->caps.dev_caps.num_aeq_vectors; + break; + case UBASE_DBG_CEQ_CTX: + ctx_num = udev->irq_table.ceqs.num; + break; + case UBASE_DBG_TPG_CTX: + ctx_num = unic_caps->tpg.max_cnt; + break; + case UBASE_DBG_TP_CTX: + spin_lock(&udev->tp_ctx.tpg_lock); + if (udev->tp_ctx.tpg) + ctx_num = udev->tp_ctx.tpg[ctxgn].tp_cnt; + spin_unlock(&udev->tp_ctx.tpg_lock); + break; + default: + ubase_err(udev, "failed to get ctx num, ctx_type = %u.\n", + ctx_type); + break; + } + + return ctx_num; +} + +static int ubase_dbg_dump_context(struct seq_file *s, + enum ubase_dbg_ctx_type ctx_type) +{ + struct ubase_dbg_ctx { + void (*print_ctx_titles)(struct seq_file *s); + void (*get_ctx)(struct seq_file *s, struct ubase_dev *udev, u32 idx); + } dbg_ctx[] = { + {ubase_eq_ctx_titles_print, ubase_dump_aeq_ctx}, + {ubase_eq_ctx_titles_print, ubase_dump_ceq_ctx}, + {ubase_tpg_ctx_titles_print, ubase_dump_tpg_ctx}, + }; + struct ubase_dev *udev = dev_get_drvdata(s->private); + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + unsigned long port_bitmap; + u32 tp_pos, i; + + dbg_ctx[ctx_type].print_ctx_titles(s); + + port_bitmap = unic_caps->utp_port_bitmap; + for (i = 0; i < ubase_get_ctx_num(udev, ctx_type, UBASE_DEFAULT_CTXGN); i++) { + if (ctx_type != UBASE_DBG_TP_CTX) { + dbg_ctx[ctx_type].get_ctx(s, udev, i); + continue; + } + + tp_pos = (i % unic_caps->tpg.depth) * UBASE_TP_PORT_BITMAP_STEP; + if (test_bit(tp_pos, &port_bitmap)) + dbg_ctx[ctx_type].get_ctx(s, udev, i); + } + + return 0; +} + +struct ubase_ctx_info { + u32 start_idx; + u32 ctx_size; + u8 op; + const char *ctx_name; +}; + +static inline u32 ubase_get_ctx_group_num(struct ubase_dev *udev, + enum ubase_dbg_ctx_type ctx_type) +{ + if (ctx_type == UBASE_DBG_TP_CTX) + return udev->caps.unic_caps.tpg.max_cnt; + + return 1; +} + +static void ubase_get_ctx_info(struct ubase_dev *udev, + enum ubase_dbg_ctx_type ctx_type, + struct ubase_ctx_info *ctx_info, u32 ctxgn) +{ + switch (ctx_type) { + case UBASE_DBG_AEQ_CTX: + ctx_info->start_idx = 0; + ctx_info->ctx_size = UBASE_AEQ_CTX_SIZE; + ctx_info->op = UBASE_MB_QUERY_AEQ_CONTEXT; + ctx_info->ctx_name = "aeq"; + break; + case UBASE_DBG_CEQ_CTX: + ctx_info->start_idx = 0; + ctx_info->ctx_size = UBASE_CEQ_CTX_SIZE; + ctx_info->op = UBASE_MB_QUERY_CEQ_CONTEXT; + ctx_info->ctx_name = "ceq"; + break; + case UBASE_DBG_TPG_CTX: + ctx_info->start_idx = udev->caps.unic_caps.tpg.start_idx; + ctx_info->ctx_size = udev->ctx_buf.tpg.entry_size; + ctx_info->op = UBASE_MB_QUERY_TPG_CONTEXT; + ctx_info->ctx_name = "tpg"; + break; + case UBASE_DBG_TP_CTX: + spin_lock(&udev->tp_ctx.tpg_lock); + ctx_info->start_idx = udev->tp_ctx.tpg ? + udev->tp_ctx.tpg[ctxgn].start_tpn : 0; + spin_unlock(&udev->tp_ctx.tpg_lock); + + ctx_info->ctx_size = udev->ctx_buf.tp.entry_size; + ctx_info->op = UBASE_MB_QUERY_TP_CONTEXT; + ctx_info->ctx_name = "tp"; + break; + default: + ubase_err(udev, "failed to get ctx info, ctx_type = %u.\n", + ctx_type); + break; + } +} + +static void ubase_mask_eq_ctx_key_words(void *buf) +{ + struct ubase_eq_ctx *eq = (struct ubase_eq_ctx *)buf; + + eq->eqe_base_addr_l = 0; + eq->eqe_base_addr_h = 0; + eq->eqe_token_id = 0; + eq->eqe_token_value = 0; +} + +static void ubase_mask_tp_ctx_key_words(void *buf) +{ + struct ubase_tp_ctx *tp = (struct ubase_tp_ctx *)buf; + + tp->wqe_ba_l = 0; + tp->wqe_ba_h = 0; + tp->tp_wqe_token_id = 0; + tp->reorder_q_addr_l = 0; + tp->reorder_q_addr_h = 0; + tp->scc_token = 0; + tp->scc_token_1 = 0; +} + +static void ubase_mask_ctx_key_words(void *buf, + enum ubase_dbg_ctx_type ctx_type) +{ + switch (ctx_type) { + case UBASE_DBG_AEQ_CTX: + case UBASE_DBG_CEQ_CTX: + ubase_mask_eq_ctx_key_words(buf); + break; + case UBASE_DBG_TPG_CTX: + break; + case UBASE_DBG_TP_CTX: + ubase_mask_tp_ctx_key_words(buf); + break; + default: + break; + } +} static void __ubase_print_context_hw(struct seq_file *s, void *ctx_addr, u32 ctx_len) @@ -29,3 +260,141 @@ void ubase_print_context_hw(struct seq_file *s, void *ctx_addr, u32 ctx_len) __ubase_print_context_hw(s, ctx_addr, ctx_len); } EXPORT_SYMBOL(ubase_print_context_hw); + +static int ubase_dbg_dump_ctx_hw(struct seq_file *s, void *data, + enum ubase_dbg_ctx_type ctx_type) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + struct ubase_ctx_info ctx_info = {0}; + struct ubase_cmd_mailbox *mailbox; + u32 max_ctxgn, ctxn, ctxgn; + struct ubase_mbx_attr attr; + int ret = 0; + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits) || + test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + return -EBUSY; + + mailbox = __ubase_alloc_cmd_mailbox(udev); + if (IS_ERR_OR_NULL(mailbox)) { + ubase_err(udev, + "failed to alloc mailbox for dump hw context.\n"); + return -ENOMEM; + } + + max_ctxgn = ubase_get_ctx_group_num(udev, ctx_type); + for (ctxgn = 0; ctxgn < max_ctxgn; ctxgn++) { + ubase_get_ctx_info(udev, ctx_type, &ctx_info, ctxgn); + + for (ctxn = 0; ctxn < ubase_get_ctx_num(udev, ctx_type, ctxgn); ctxn++) { + ubase_fill_mbx_attr(&attr, ctxn + ctx_info.start_idx, + ctx_info.op, 0); + ret = __ubase_hw_upgrade_ctx_ex(udev, &attr, mailbox); + if (ret) { + ubase_err(udev, + "failed to post query %s ctx mbx, ret = %d.\n", + ctx_info.ctx_name, ret); + goto upgrade_ctx_err; + } + + seq_printf(s, "offset\t%s%u\n", ctx_info.ctx_name, + ctxn + ctx_info.start_idx); + ubase_mask_ctx_key_words(mailbox->buf, ctx_type); + __ubase_print_context_hw(s, mailbox->buf, ctx_info.ctx_size); + seq_puts(s, "\n"); + } + } + +upgrade_ctx_err: + __ubase_free_cmd_mailbox(udev, mailbox); + + return ret; +} + +int ubase_dbg_dump_aeq_context(struct seq_file *s, void *data) +{ + return ubase_dbg_dump_context(s, UBASE_DBG_AEQ_CTX); +} + +int ubase_dbg_dump_ceq_context(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + int ret; + + if (!mutex_trylock(&udev->irq_table.ceq_lock)) + return -EBUSY; + + if (!udev->irq_table.ceqs.ceq) { + mutex_unlock(&udev->irq_table.ceq_lock); + return -EBUSY; + } + + ret = ubase_dbg_dump_context(s, UBASE_DBG_CEQ_CTX); + mutex_unlock(&udev->irq_table.ceq_lock); + + return ret; +} + +int ubase_dbg_dump_tpg_ctx(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + int ret; + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) + return -EBUSY; + + if (!ubase_get_ctx_num(udev, UBASE_DBG_TPG_CTX, UBASE_DEFAULT_CTXGN)) + return -EOPNOTSUPP; + + if (!spin_trylock(&udev->tp_ctx.tpg_lock)) + return -EBUSY; + + if (!udev->tp_ctx.tpg) { + spin_unlock(&udev->tp_ctx.tpg_lock); + return -EBUSY; + } + + ret = ubase_dbg_dump_context(s, UBASE_DBG_TPG_CTX); + spin_unlock(&udev->tp_ctx.tpg_lock); + + return ret; +} + +int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) + return -EBUSY; + + if (!ubase_get_ctx_num(udev, UBASE_DBG_TPG_CTX, UBASE_DEFAULT_CTXGN)) + return -EOPNOTSUPP; + + return ubase_dbg_dump_ctx_hw(s, data, UBASE_DBG_TPG_CTX); +} + +int ubase_dbg_dump_tp_ctx_hw(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) + return -EBUSY; + + if (!ubase_get_ctx_num(udev, UBASE_DBG_TP_CTX, UBASE_DEFAULT_CTXGN)) + return -EOPNOTSUPP; + + if (!ubase_get_ctx_group_num(udev, UBASE_DBG_TP_CTX)) + return -EOPNOTSUPP; + + return ubase_dbg_dump_ctx_hw(s, data, UBASE_DBG_TP_CTX); +} + +int ubase_dbg_dump_aeq_ctx_hw(struct seq_file *s, void *data) +{ + return ubase_dbg_dump_ctx_hw(s, data, UBASE_DBG_AEQ_CTX); +} + +int ubase_dbg_dump_ceq_ctx_hw(struct seq_file *s, void *data) +{ + return ubase_dbg_dump_ctx_hw(s, data, UBASE_DBG_CEQ_CTX); +} diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h new file mode 100644 index 000000000000..532665141fc8 --- /dev/null +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_CTX_DEBUGFS_H__ +#define __UBASE_CTX_DEBUGFS_H__ + +struct device; + +int ubase_dbg_dump_aeq_context(struct seq_file *s, void *data); +int ubase_dbg_dump_ceq_context(struct seq_file *s, void *data); +int ubase_dbg_dump_tpg_ctx(struct seq_file *s, void *data); +int ubase_dbg_dump_tp_ctx_hw(struct seq_file *s, void *data); +int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data); +int ubase_dbg_dump_aeq_ctx_hw(struct seq_file *s, void *data); +int ubase_dbg_dump_ceq_ctx_hw(struct seq_file *s, void *data); + +#endif diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 56ce970411bd..ab3e1a88ced8 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -8,6 +8,7 @@ #include #include +#include "ubase_ctx_debugfs.h" #include "ubase_dev.h" #include "ubase_hw.h" #include "ubase_qos_debugfs.h" @@ -216,6 +217,11 @@ int ubase_dbg_seq_file_init(struct device *dev, EXPORT_SYMBOL(ubase_dbg_seq_file_init); static struct ubase_dbg_dentry_info ubase_dbg_dentry[] = { + { + .name = "context", + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + }, { .name = "qos", .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, @@ -242,6 +248,22 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_rst_info, }, + { + .name = "aeq_context", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_aeq_context, + }, + { + .name = "ceq_context", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_ceq_context, + }, { .name = "activate_record", .dentry_index = UBASE_DBG_DENTRY_ROOT, @@ -250,6 +272,46 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_activate_record, }, + { + .name = "tpg_context", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_tpg_ctx, + }, + { + .name = "tp_context_hw", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_tp_ctx_hw, + }, + { + .name = "tpg_context_hw", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_tpg_ctx_hw, + }, + { + .name = "aeq_context_hw", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_aeq_ctx_hw, + }, + { + .name = "ceq_context_hw", + .dentry_index = UBASE_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_ceq_ctx_hw, + }, { .name = "sl_vl_map", .dentry_index = UBASE_DBG_DENTRY_QOS, diff --git a/drivers/ub/ubase/ubase_tp.h b/drivers/ub/ubase/ubase_tp.h index 965535a18f1a..0506e77c98f0 100644 --- a/drivers/ub/ubase/ubase_tp.h +++ b/drivers/ub/ubase/ubase_tp.h @@ -11,8 +11,34 @@ #include "ubase_dev.h" +#define UBASE_TP_PORT_BITMAP_STEP 2 + #define UBASE_WAIT_TP_FLUSH_TOTAL_STEPS 12 +struct ubase_tp_ctx { + u32 rsvd0; + u32 wqe_ba_l; + u32 wqe_ba_h : 20; + u32 rsvd1 : 12; + u32 rsvd2[5]; + u32 rsvd3_0 : 4; + u32 tp_wqe_token_id : 20; + u32 rsvd3_1 : 8; + u32 rsvd4[5]; + u32 rsvd5 : 4; + u32 reorder_q_addr_l : 28; + u32 reorder_q_addr_h : 24; + u32 rsvd6 : 8; + u32 rsvd7[5]; + u32 scc_token : 19; + u32 rsvd8 : 13; + u32 rsvd9[4]; + u32 rsvd10_0 : 24; + u32 scc_token_1 : 4; + u32 rsvd10_1 : 4; + u32 rsvd11[37]; +}; + struct ubase_tpg { u32 mb_tpgn; u8 tpg_state; -- Gitee From a82a88a5180ab2f30333b11a636081c06112c05f Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Wed, 29 Oct 2025 16:53:51 +0800 Subject: [PATCH 004/243] ub: ubase: Supports the query of UBCL config. commit 8a2d8b74eb75df90052e3baad58e645aa0706b7f openEuler UBCL config file is a binary file. The file content determines the configuration of the UB system and deeply affects the running of it. If the file can be read conveniently and effectively, the maintenance capability of the system will be greatly improved. This patch allows the UBASE driver to read the content of UBCL config through debugfs. Users can use debugfs to read the file. Signed-off-by: Yixi Shen Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou Signed-off-by: huwentao --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 88 ++++++++++++++++++++++++ drivers/ub/ubase/ubase_cmd.h | 10 +++ include/ub/ubase/ubase_comm_cmd.h | 1 + 3 files changed, 99 insertions(+) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index ab3e1a88ced8..76175d604366 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -8,6 +8,7 @@ #include #include +#include "ubase_cmd.h" #include "ubase_ctx_debugfs.h" #include "ubase_dev.h" #include "ubase_hw.h" @@ -31,6 +32,85 @@ static int ubase_dbg_dump_rst_info(struct seq_file *s, void *data) return 0; } +static int ubase_query_ubcl_config(struct ubase_dev *udev, u16 offset, + u16 is_query, u16 size, + struct ubase_ubcl_config_cmd *resp) +{ + struct ubase_ubcl_config_cmd req; + struct ubase_cmd_buf in, out; + int ret; + + memset(resp, 0, sizeof(*resp)); + memset(&req, 0, sizeof(req)); + req.offset = cpu_to_le16(offset); + req.size = cpu_to_le16(size); + req.is_query_size = cpu_to_le16(is_query); + + __ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_UBCL_CONFIG, true, + sizeof(req), &req); + __ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_UBCL_CONFIG, true, + sizeof(*resp), resp); + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret && ret != -EPERM) + ubase_err(udev, "failed to query UBCL_config, ret = %d.\n", ret); + + if (ret == -EPERM) + return -EOPNOTSUPP; + + return ret; +} + +static void ubase_dbg_fill_ubcl_content(struct ubase_ubcl_config_cmd *resp, + u32 *addr, struct seq_file *s) +{ + int i, j; + + for (i = 0; i < UBASE_UBCL_CFG_DATA_NUM; i += UBASE_UBCL_CFG_DATA_ALIGN) { + seq_printf(s, "%08X: ", (*addr * UBASE_UBCL_CFG_DATA_ALIGN)); + for (j = 0; j < UBASE_UBCL_CFG_DATA_ALIGN; j++) + seq_printf(s, "%08X ", resp->data[i + j]); + seq_puts(s, "\n"); + + *addr += UBASE_UBCL_CFG_DATA_ALIGN; + if ((i * sizeof(u32)) >= resp->size) + break; + } +} + +static int ubase_dbg_dump_ubcl_config(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + struct ubase_ubcl_config_cmd resp = {0}; + u16 read_size = sizeof(resp.data); + u16 offset = 0; + u16 total_size; + u32 addr = 0; + int ret; + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits) || + test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + return -EBUSY; + + ret = ubase_query_ubcl_config(udev, offset, 1, 0, &resp); + if (ret) + return ret; + total_size = le16_to_cpu(resp.size); + + seq_puts(s, "UBCL_config:\n"); + seq_printf(s, "total_size: %u\n", total_size); + while (offset < total_size) { + read_size = min(read_size, total_size - offset); + ret = ubase_query_ubcl_config(udev, offset, 0, read_size, &resp); + if (ret) + return ret; + offset += le16_to_cpu(resp.size); + + ubase_dbg_fill_ubcl_content(&resp, &addr, s); + } + + return 0; +} + static int ubase_dbg_dump_activate_record(struct seq_file *s, void *data) { struct ubase_dev *udev = dev_get_drvdata(s->private); @@ -264,6 +344,14 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_ceq_context, }, + { + .name = "UBCL_config", + .dentry_index = UBASE_DBG_DENTRY_ROOT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_ubcl_config, + }, { .name = "activate_record", .dentry_index = UBASE_DBG_DENTRY_ROOT, diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index 0187597493b7..63b67179f2fb 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -49,6 +49,16 @@ struct ubase_query_version_cmd { __le32 caps[UBASE_CAP_LEN]; }; +#define UBASE_UBCL_CFG_DATA_ALIGN 4 +#define UBASE_UBCL_CFG_DATA_NUM 60 +struct ubase_ubcl_config_cmd { + __le16 is_query_size; + __le16 offset; + __le16 size; + __le16 rsv; + __le32 data[UBASE_UBCL_CFG_DATA_NUM]; +}; + enum ubase_ue2ue_sub_cmd { UBASE_UE2UE_CTRLQ_MSG = 3, }; diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 7e78860d7778..311a309d10bd 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -37,6 +37,7 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_NET_GUID = 0x0035, UBASE_OPC_STATS_MAC_ALL = 0x0038, UBASE_OPC_QUERY_BUS_EID = 0x0047, + UBASE_OPC_QUERY_UBCL_CONFIG = 0x0050, /* NL commands */ UBASE_OPC_CFG_VL_MAP = 0x2206, -- Gitee From 1976d974ef39c2ec9719245c91142fac826a0f9b Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Wed, 29 Oct 2025 16:58:25 +0800 Subject: [PATCH 005/243] ub: ubase: Support querying dev caps. commit 7bb0989aa20b20df97785020ba318a0fc5915d37 openEuler The UB driver supports a set of code running in different business contexts. The ub driver uses dev caps to distinguish different contexts. Therefore, the debugging method of dev caps is important. This patch allows the UBASE driver to print dev caps for users. Users can use this function through debugfs. Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou Signed-off-by: huwentao --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 157 +++++++++++++++++++++++ 1 file changed, 157 insertions(+) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 76175d604366..ad97d7a58188 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -32,6 +32,154 @@ static int ubase_dbg_dump_rst_info(struct seq_file *s, void *data) return 0; } +static void ubase_dbg_dump_caps_bits(struct seq_file *s, struct ubase_dev *udev) +{ +#define CAP_FMT(name) "\tsupport_" #name ": %d\n" +#define PTRINT_CAP(name, func) seq_printf(s, CAP_FMT(name), func(udev)) + + PTRINT_CAP(ub_link, ubase_dev_ubl_supported); + PTRINT_CAP(ta_extdb_buffer_config, ubase_dev_ta_extdb_buf_supported); + PTRINT_CAP(ta_timer_buffer_config, ubase_dev_ta_timer_buf_supported); + PTRINT_CAP(err_handle, ubase_dev_err_handle_supported); + PTRINT_CAP(ctrlq, ubase_dev_ctrlq_supported); + PTRINT_CAP(eth_mac, ubase_dev_eth_mac_supported); + PTRINT_CAP(mac_stats, ubase_dev_mac_stats_supported); + PTRINT_CAP(prealloc, __ubase_dev_prealloc_supported); + PTRINT_CAP(udma, ubase_dev_udma_supported); + PTRINT_CAP(unic, ubase_dev_unic_supported); + PTRINT_CAP(uvb, ubase_dev_uvb_supported); + PTRINT_CAP(ip_over_urma, ubase_ip_over_urma_supported); + if (ubase_ip_over_urma_supported(udev)) + PTRINT_CAP(ip_over_urma_utp, ubase_ip_over_urma_utp_supported); + PTRINT_CAP(activate_proxy, ubase_activate_proxy_supported); + PTRINT_CAP(utp, ubase_utp_supported); +} + +static void ubase_dbg_dump_caps_info(struct seq_file *s, struct ubase_dev *udev) +{ + struct ubase_caps *dev_caps = &udev->caps.dev_caps; + struct ubase_dbg_common_caps_info { + const char *format; + u64 caps_info; + } ubase_common_caps_info[] = { + {"\tnum_ceq_vectors: %u\n", dev_caps->num_ceq_vectors}, + {"\tnum_aeq_vectors: %u\n", dev_caps->num_aeq_vectors}, + {"\tnum_misc_vectors: %u\n", dev_caps->num_misc_vectors}, + {"\taeqe_size: %u\n", dev_caps->aeqe_size}, + {"\tceqe_size: %u\n", dev_caps->ceqe_size}, + {"\taeqe_depth: %u\n", dev_caps->aeqe_depth}, + {"\tceqe_depth: %u\n", dev_caps->ceqe_depth}, + {"\ttotal_ue_num: %u\n", dev_caps->total_ue_num}, + {"\tta_extdb_buf_size: %llu\n", udev->ta_ctx.extdb_buf.size}, + {"\tta_timer_buf_size: %llu\n", udev->ta_ctx.timer_buf.size}, + {"\tpublic_jetty_cnt: %u\n", dev_caps->public_jetty_cnt}, + {"\tvl_num: %hhu\n", dev_caps->vl_num}, + {"\trsvd_jetty_cnt: %hu\n", dev_caps->rsvd_jetty_cnt}, + {"\tpacket_pattern_mode: %u\n", dev_caps->packet_pattern_mode}, + {"\tack_queue_num: %u\n", dev_caps->ack_queue_num}, + {"\toor_en: %u\n", dev_caps->oor_en}, + {"\treorder_queue_en: %u\n", dev_caps->reorder_queue_en}, + {"\ton_flight_size: %u\n", dev_caps->on_flight_size}, + {"\treorder_cap: %u\n", dev_caps->reorder_cap}, + {"\treorder_queue_shift: %u\n", dev_caps->reorder_queue_shift}, + {"\tat_times: %u\n", dev_caps->at_times}, + {"\tue_num: %u\n", dev_caps->ue_num}, + {"\tmac_stats_num: %u\n", dev_caps->mac_stats_num}, + {"\tlogic_port_bitmap: 0x%x\n", dev_caps->logic_port_bitmap}, + {"\tub_port_logic_id: %u\n", dev_caps->ub_port_logic_id}, + {"\tio_port_logic_id: %u\n", dev_caps->io_port_logic_id}, + {"\tio_port_id: %u\n", dev_caps->io_port_id}, + {"\tnl_port_id: %u\n", dev_caps->nl_port_id}, + {"\tchip_id: %u\n", dev_caps->chip_id}, + {"\tdie_id: %u\n", dev_caps->die_id}, + {"\tue_id: %u\n", dev_caps->ue_id}, + {"\tnl_id: %u\n", dev_caps->nl_id}, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ubase_common_caps_info); i++) + seq_printf(s, ubase_common_caps_info[i].format, + ubase_common_caps_info[i].caps_info); +} + +static void ubase_dbg_dump_common_caps(struct seq_file *s, struct ubase_dev *udev) +{ + struct ubase_caps *dev_caps = &udev->caps.dev_caps; + + ubase_dbg_dump_caps_info(s, udev); + + seq_puts(s, "\treq_vl:"); + ubase_dbg_dump_arr_info(s, dev_caps->req_vl, dev_caps->vl_num); + + seq_puts(s, "\tresp_vl:"); + ubase_dbg_dump_arr_info(s, dev_caps->resp_vl, dev_caps->vl_num); +} + +static void ubase_dbg_dump_adev_caps(struct seq_file *s, + struct ubase_adev_caps *caps) +{ + struct ubase_dbg_adev_caps_info { + const char *format; + u32 caps_info; + } ubase_adev_caps_info[] = { + {"\tjfs_max_cnt: %u\n", caps->jfs.max_cnt}, + {"\tjfs_reserved_cnt: %u\n", caps->jfs.reserved_cnt}, + {"\tjfs_depth: %u\n", caps->jfs.depth}, + {"\tjfr_max_cnt: %u\n", caps->jfr.max_cnt}, + {"\tjfr_reserved_cnt: %u\n", caps->jfr.reserved_cnt}, + {"\tjfr_depth: %u\n", caps->jfr.depth}, + {"\tjfc_max_cnt: %u\n", caps->jfc.max_cnt}, + {"\tjfc_reserved_cnt: %u\n", caps->jfc.reserved_cnt}, + {"\tjfc_depth: %u\n", caps->jfc.depth}, + {"\ttp_max_cnt: %u\n", caps->tp.max_cnt}, + {"\ttp_reserved_cnt: %u\n", caps->tp.reserved_cnt}, + {"\ttp_depth: %u\n", caps->tp.depth}, + {"\ttpg_max_cnt: %u\n", caps->tpg.max_cnt}, + {"\ttpg_reserved_cnt: %u\n", caps->tpg.reserved_cnt}, + {"\ttpg_depth: %u\n", caps->tpg.depth}, + {"\tcqe_size: %hu\n", caps->cqe_size}, + {"\tutp_port_bitmap: 0x%x\n", caps->utp_port_bitmap}, + {"\tjtg_max_cnt: %u\n", caps->jtg_max_cnt}, + {"\trc_max_cnt: %u\n", caps->rc_max_cnt}, + {"\trc_depth: %u\n", caps->rc_que_depth}, + {"\tccc_max_cnt: %u\n", caps->ccc_max_cnt}, + {"\tdest_addr_max_cnt: %u\n", caps->dest_addr_max_cnt}, + {"\tseid_upi_max_cnt: %u\n", caps->seid_upi_max_cnt}, + {"\ttpm_max_cnt: %u\n", caps->tpm_max_cnt}, + {"\tprealloc_mem_dma_len: %llu\n", caps->pmem.dma_len}, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ubase_adev_caps_info); i++) + seq_printf(s, ubase_adev_caps_info[i].format, + ubase_adev_caps_info[i].caps_info); +} + +static int ubase_dbg_dump_dev_caps(struct seq_file *s, void *data) +{ + struct ubase_dev *udev = dev_get_drvdata(s->private); + struct ubase_dev_caps *udev_caps = &udev->caps; + + seq_puts(s, "CAP_BITS:\n"); + ubase_dbg_dump_caps_bits(s, udev); + seq_puts(s, "\nCOMMON_CAPS:\n"); + ubase_dbg_dump_common_caps(s, udev); + + if (ubase_dev_pmu_supported(udev)) + return 0; + + seq_puts(s, "\nUNIC_CAPS:\n"); + ubase_dbg_dump_adev_caps(s, &udev_caps->unic_caps); + + if (ubase_dev_cdma_supported(udev)) + seq_puts(s, "\nCDMA_CAPS:\n"); + else + seq_puts(s, "\nUDMA_CAPS:\n"); + ubase_dbg_dump_adev_caps(s, &udev_caps->udma_caps); + + return 0; +} + static int ubase_query_ubcl_config(struct ubase_dev *udev, u16 offset, u16 is_query, u16 size, struct ubase_ubcl_config_cmd *resp) @@ -344,6 +492,15 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_ceq_context, }, + { + .name = "caps_info", + .dentry_index = UBASE_DBG_DENTRY_ROOT, + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_PMU | + UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + .init = __ubase_dbg_seq_file_init, + .read_func = ubase_dbg_dump_dev_caps, + }, { .name = "UBCL_config", .dentry_index = UBASE_DBG_DENTRY_ROOT, -- Gitee From 218499157ff747c9e3b2e2bb2b832ae4b8159bbf Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Thu, 30 Oct 2025 21:30:51 +0800 Subject: [PATCH 006/243] ub: cdma: support the probe and remove processes commit 24c71ca3842ca57c683837dd4fc18ae0637dde9d openEuler This patch implements the probe and remove processes for auxiliary bus driver devices in the CDMA driver. The implementation includes device creation and initialization, adding the device to the device list, querying the device's eid and upi, and the open, close, and ioctl processes for the device file. Signed-off-by: Zhipeng Lu Signed-off-by: Hongwu Wang Signed-off-by: zhaolichang <943677312@qq.com> --- arch/arm64/configs/tencent.config | 3 + drivers/ub/Kconfig | 1 + drivers/ub/Makefile | 1 + drivers/ub/cdma/Kconfig | 10 ++ drivers/ub/cdma/Makefile | 5 + drivers/ub/cdma/cdma.h | 113 ++++++++++++++++++ drivers/ub/cdma/cdma_chardev.c | 188 +++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_chardev.h | 19 +++ drivers/ub/cdma/cdma_cmd.c | 190 ++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_cmd.h | 79 +++++++++++++ drivers/ub/cdma/cdma_dev.c | 186 +++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_dev.h | 19 +++ drivers/ub/cdma/cdma_ioctl.c | 31 +++++ drivers/ub/cdma/cdma_ioctl.h | 12 ++ drivers/ub/cdma/cdma_main.c | 146 +++++++++++++++++++++++ drivers/ub/cdma/cdma_tid.c | 89 ++++++++++++++ drivers/ub/cdma/cdma_tid.h | 16 +++ drivers/ub/cdma/cdma_types.h | 19 +++ include/uapi/ub/cdma/cdma_abi.h | 61 ++++++++++ include/ub/cdma/cdma_api.h | 16 +++ 20 files changed, 1204 insertions(+) create mode 100644 drivers/ub/cdma/Kconfig create mode 100644 drivers/ub/cdma/Makefile create mode 100644 drivers/ub/cdma/cdma.h create mode 100644 drivers/ub/cdma/cdma_chardev.c create mode 100644 drivers/ub/cdma/cdma_chardev.h create mode 100644 drivers/ub/cdma/cdma_cmd.c create mode 100644 drivers/ub/cdma/cdma_cmd.h create mode 100644 drivers/ub/cdma/cdma_dev.c create mode 100644 drivers/ub/cdma/cdma_dev.h create mode 100644 drivers/ub/cdma/cdma_ioctl.c create mode 100644 drivers/ub/cdma/cdma_ioctl.h create mode 100644 drivers/ub/cdma/cdma_main.c create mode 100644 drivers/ub/cdma/cdma_tid.c create mode 100644 drivers/ub/cdma/cdma_tid.h create mode 100644 drivers/ub/cdma/cdma_types.h create mode 100644 include/uapi/ub/cdma/cdma_abi.h create mode 100644 include/ub/cdma/cdma_api.h diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index a8f6f34cdee6..b806c4dc225c 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1834,6 +1834,9 @@ CONFIG_UB_UBL=m CONFIG_UB_UNIC=m CONFIG_UB_UNIC_UBL=y CONFIG_UB_UNIC_DCB=y + +# UB CDMA driver +CONFIG_UB_CDMA=m # end of unified bus # UMMU diff --git a/drivers/ub/Kconfig b/drivers/ub/Kconfig index 0b7cb0ef16cf..6197483bd71e 100644 --- a/drivers/ub/Kconfig +++ b/drivers/ub/Kconfig @@ -16,6 +16,7 @@ if UB source "drivers/ub/ubus/Kconfig" source "drivers/ub/ubfi/Kconfig" source "drivers/ub/ubase/Kconfig" +source "drivers/ub/cdma/Kconfig" source "drivers/ub/obmm/Kconfig" source "drivers/ub/sentry/Kconfig" config UB_URMA diff --git a/drivers/ub/Makefile b/drivers/ub/Makefile index d1dd2267abe0..2a40689dafac 100644 --- a/drivers/ub/Makefile +++ b/drivers/ub/Makefile @@ -4,5 +4,6 @@ obj-y += ubus/ obj-y += ubfi/ obj-$(CONFIG_UB_URMA) += urma/ obj-$(CONFIG_UB_UBASE) += ubase/ +obj-$(CONFIG_UB_CDMA) += cdma/ obj-y += obmm/ obj-$(CONFIG_UB_SENTRY) += sentry/ diff --git a/drivers/ub/cdma/Kconfig b/drivers/ub/cdma/Kconfig new file mode 100644 index 000000000000..b09e13215852 --- /dev/null +++ b/drivers/ub/cdma/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0+ +menuconfig UB_CDMA + default n + tristate "cdma driver" + depends on UB_UBASE && UB_UMMU_CORE + help + This option enables support for CDMA drivers. The CDMA driver facilitates + the creation and destruction of CDMA devices, as well as the creation of + resources within CDMA devices to perform DMA read/write tasks and retrieve + the completion status of executed tasks. diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile new file mode 100644 index 000000000000..7375c6a08738 --- /dev/null +++ b/drivers/ub/cdma/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0+ + +cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o + +obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h new file mode 100644 index 000000000000..af45096026ee --- /dev/null +++ b/drivers/ub/cdma/cdma.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_H__ +#define __CDMA_H__ + +#include +#include +#include +#include + +#include + +#define CDMA_RESET_WAIT_TIME 3000 +#define CDMA_MAX_SL_NUM 16 + +#define CDMA_UPI_MASK 0x7FFF + +struct cdma_res { + u32 max_cnt; + u32 start_idx; + u32 depth; +}; + +struct cdma_oor_caps { + bool oor_en; + bool reorder_queue_en; + u8 reorder_cap; + u8 reorder_queue_shift; + u8 at_times; + u16 on_flight_size; +}; + +struct cdma_tbl { + u32 max_cnt; + u32 size; +}; + +struct cdma_caps { + struct cdma_res jfs; + struct cdma_res jfce; + struct cdma_res jfc; + struct cdma_res queue; + u32 jfs_sge; + u32 jfr_sge; + u32 jfs_rsge; + u32 jfs_inline_sz; + u32 comp_vector_cnt; + u32 eid_num; + u16 ue_cnt; + u8 ue_id; + u32 rc_outstd_cnt; + u32 utp_cnt; + u32 trans_mode; + u32 ta_version; + u32 tp_version; + u32 max_msg_len; + u32 feature; + u32 public_jetty_cnt; + u32 rsvd_jetty_cnt; + u16 cons_ctrl_alg; + u16 rc_queue_num; + u16 rc_queue_depth; + u8 rc_entry_size; + u8 packet_pattern_mode; + u8 ack_queue_num; + u8 port_num; + u8 cqe_size; + u8 cc_priority_cnt; + bool virtualization; + struct cdma_oor_caps oor_caps; + struct cdma_tbl src_addr; + struct cdma_tbl seid; +}; + +struct cdma_chardev { + struct device *dev; + +#define CDMA_NAME_LEN 16 + char name[CDMA_NAME_LEN]; + struct cdev cdev; + int dev_num; + dev_t devno; +}; + +struct cdma_dev { + struct dma_device base; + struct device *dev; + struct auxiliary_device *adev; + struct cdma_chardev chardev; + struct cdma_caps caps; + + u32 eid; + u32 upi; + u32 tid; + u32 ummu_tid; + u32 status; + u8 sl_num; + u8 sl[CDMA_MAX_SL_NUM]; + + void __iomem *k_db_base; + resource_size_t db_base; + struct iommu_sva *ksva; + struct mutex eu_mutex; + struct mutex db_mutex; + struct list_head db_page; + + struct mutex file_mutex; + struct list_head file_list; + struct page *arm_db_page; +}; + +#endif /* _CDMA_H_ */ diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c new file mode 100644 index 000000000000..094a71cbc531 --- /dev/null +++ b/drivers/ub/cdma/cdma_chardev.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt +#define dev_fmt pr_fmt + +#include +#include +#include "cdma_ioctl.h" +#include "cdma_chardev.h" +#include "cdma_types.h" +#include "cdma.h" + +#define CDMA_DEVICE_NAME "cdma/dev" + +struct cdma_num_manager { + struct idr idr; + spinlock_t lock; +}; + +static struct cdma_num_manager cdma_num_mg = { + .idr = IDR_INIT(cdma_num_mg.idr), + .lock = __SPIN_LOCK_UNLOCKED(cdma_num_mg.lock), +}; + +static void cdma_num_free(struct cdma_dev *cdev) +{ + spin_lock(&cdma_num_mg.lock); + idr_remove(&cdma_num_mg.idr, cdev->chardev.dev_num); + spin_unlock(&cdma_num_mg.lock); +} + +static int cdma_num_alloc(struct cdma_dev *cdev) +{ +#define CDMA_START 0 +#define CDMA_END 0xffff + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&cdma_num_mg.lock); + id = idr_alloc(&cdma_num_mg.idr, cdev, CDMA_START, CDMA_END, GFP_NOWAIT); + spin_unlock(&cdma_num_mg.lock); + idr_preload_end(); + + return id; +} + +static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ +#define CDMA_MAX_CMD_SIZE 8192 + struct cdma_file *cfile = (struct cdma_file *)file->private_data; + struct cdma_ioctl_hdr hdr = { 0 }; + int ret; + + if (cmd == CDMA_SYNC) { + ret = copy_from_user(&hdr, (void *)arg, sizeof(hdr)); + if (ret || hdr.args_len > CDMA_MAX_CMD_SIZE) { + pr_err("copy user ret = %d, input parameter len = %u.\n", + ret, hdr.args_len); + return -EINVAL; + } + ret = cdma_cmd_parse(cfile, &hdr); + return ret; + } + + pr_err("invalid ioctl command, command = %u.\n", cmd); + return -ENOIOCTLCMD; +} + +static int cdma_open(struct inode *inode, struct file *file) +{ + struct cdma_chardev *chardev; + struct cdma_file *cfile; + struct cdma_dev *cdev; + + chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); + cdev = container_of(chardev, struct cdma_dev, chardev); + + cfile = kzalloc(sizeof(struct cdma_file), GFP_KERNEL); + if (!cfile) + return -ENOMEM; + + mutex_lock(&cdev->file_mutex); + cfile->cdev = cdev; + kref_init(&cfile->ref); + file->private_data = cfile; + list_add_tail(&cfile->list, &cdev->file_list); + nonseekable_open(inode, file); + mutex_unlock(&cdev->file_mutex); + + return 0; +} + +static int cdma_close(struct inode *inode, struct file *file) +{ + struct cdma_file *cfile = (struct cdma_file *)file->private_data; + struct cdma_dev *cdev; + + cdev = cfile->cdev; + + mutex_lock(&cdev->file_mutex); + list_del(&cfile->list); + mutex_unlock(&cdev->file_mutex); + + kref_put(&cfile->ref, cdma_release_file); + pr_debug("cdma close success.\n"); + + return 0; +} + +static const struct file_operations cdma_ops = { + .owner = THIS_MODULE, + .unlocked_ioctl = cdma_ioctl, + .open = cdma_open, + .release = cdma_close, +}; + +void cdma_destroy_chardev(struct cdma_dev *cdev) +{ + struct cdma_chardev *chardev = &cdev->chardev; + + if (!chardev->dev) + return; + + device_destroy(cdma_cdev_class, chardev->devno); + cdev_del(&chardev->cdev); + unregister_chrdev_region(chardev->devno, CDMA_MAX_DEVICES); + cdma_num_free(cdev); +} + +int cdma_create_chardev(struct cdma_dev *cdev) +{ + struct cdma_chardev *chardev = &cdev->chardev; + int ret; + + chardev->dev_num = cdma_num_alloc(cdev); + if (chardev->dev_num < 0) { + dev_err(cdev->dev, "alloc dev_num failed, ret = %d\n", chardev->dev_num); + return -ENOMEM; + } + + ret = snprintf(chardev->name, sizeof(chardev->name), + "%s.%d", CDMA_DEVICE_NAME, chardev->dev_num); + if (ret < 0) { + dev_err(cdev->dev, "sprintf failed in create cdma chardev\n"); + goto num_free; + } + + ret = alloc_chrdev_region(&chardev->devno, 0, CDMA_MAX_DEVICES, + chardev->name); + if (ret) { + dev_err(cdev->dev, "alloc chrdev region failed, ret = %d\n", ret); + goto num_free; + } + + cdev_init(&chardev->cdev, &cdma_ops); + ret = cdev_add(&chardev->cdev, chardev->devno, CDMA_MAX_DEVICES); + if (ret) { + dev_err(cdev->dev, "cdev add failed, ret = %d\n", ret); + goto chrdev_unregister; + } + + chardev->dev = device_create(cdma_cdev_class, NULL, chardev->devno, + NULL, chardev->name); + if (IS_ERR(chardev->dev)) { + ret = PTR_ERR(chardev->dev); + dev_err(cdev->dev, "create device failed, ret = %d\n", ret); + goto cdev_delete; + } + + dev_dbg(cdev->dev, "create chardev: %s succeeded\n", chardev->name); + return 0; + +cdev_delete: + cdev_del(&chardev->cdev); +chrdev_unregister: + unregister_chrdev_region(chardev->devno, CDMA_MAX_DEVICES); +num_free: + cdma_num_free(cdev); + return ret; +} + +void cdma_release_file(struct kref *ref) +{ + struct cdma_file *cfile = container_of(ref, struct cdma_file, ref); + + kfree(cfile); +} diff --git a/drivers/ub/cdma/cdma_chardev.h b/drivers/ub/cdma/cdma_chardev.h new file mode 100644 index 000000000000..5366dd77ea54 --- /dev/null +++ b/drivers/ub/cdma/cdma_chardev.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_CHARDEV_H__ +#define __CDMA_CHARDEV_H__ + +#define CDMA_TEST_NAME "cdma_dev" +#define CDMA_MAX_DEVICES 1 +#define CDMA_JETTY_DSQE_OFFSET 0x1000 + +extern struct class *cdma_cdev_class; + +struct cdma_dev; + +void cdma_destroy_chardev(struct cdma_dev *cdev); +int cdma_create_chardev(struct cdma_dev *cdev); +void cdma_release_file(struct kref *ref); + +#endif /* _CDMA_CHARDEV_H_ */ diff --git a/drivers/ub/cdma/cdma_cmd.c b/drivers/ub/cdma/cdma_cmd.c new file mode 100644 index 000000000000..80ab8791b018 --- /dev/null +++ b/drivers/ub/cdma/cdma_cmd.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include + +#include "cdma.h" +#include +#include +#include +#include "cdma_cmd.h" + +static int cdma_cmd_query_fw_resource(struct cdma_dev *cdev, struct cdma_ue_info *out_addr) +{ +#define CDMA_QUERY_UE_RES 0x0004 + struct ubase_cmd_buf out = { 0 }; + struct ubase_cmd_buf in = { 0 }; + + ubase_fill_inout_buf(&in, CDMA_QUERY_UE_RES, true, 0, NULL); + ubase_fill_inout_buf(&out, CDMA_QUERY_UE_RES, true, + sizeof(*out_addr), out_addr); + + return ubase_cmd_send_inout(cdev->adev, &in, &out); +} + +static int cdma_query_caps_from_firmware(struct cdma_dev *cdev) +{ + struct cdma_caps *caps = &cdev->caps; + struct cdma_ue_info cmd = { 0 }; + int ret; + + ret = cdma_cmd_query_fw_resource(cdev, &cmd); + if (ret) + return dev_err_probe(cdev->dev, ret, "query fw resource failed\n"); + + caps->jfs_sge = cmd.jfs_sge; + caps->trans_mode = cmd.trans_mode; + caps->seid.max_cnt = cmd.seid_upi_tbl_num; + caps->feature = cmd.cap_info; + caps->ue_cnt = cmd.ue_cnt; + caps->ue_id = cmd.ue_id; + + dev_dbg(cdev->dev, "jfs_sge = 0x%x, trans_mode = 0x%x, seid.max_cnt = 0x%x\n", + caps->jfs_sge, caps->trans_mode, caps->seid.max_cnt); + dev_dbg(cdev->dev, "feature = 0x%x, ue_cnt = 0x%x, ue_id = 0x%x\n", + caps->feature, caps->ue_cnt, caps->ue_id); + + return 0; +} + +static int cdma_set_caps_from_adev_caps(struct cdma_dev *cdev) +{ +#define MAX_WQEBB_IN_SQE 4 + struct cdma_caps *caps = &cdev->caps; + struct ubase_adev_caps *adev_caps; + + adev_caps = ubase_get_cdma_caps(cdev->adev); + if (!adev_caps) { + dev_err(cdev->dev, "get cdma adev caps failed\n"); + return -EINVAL; + } + + caps->jfs.max_cnt = adev_caps->jfs.max_cnt; + caps->jfs.depth = adev_caps->jfs.depth / MAX_WQEBB_IN_SQE; + caps->jfs.start_idx = adev_caps->jfs.start_idx; + caps->jfc.max_cnt = adev_caps->jfc.max_cnt; + caps->jfc.depth = adev_caps->jfc.depth; + caps->jfc.start_idx = adev_caps->jfc.start_idx; + caps->cqe_size = adev_caps->cqe_size; + + return 0; +} + +static int cdma_set_caps_from_ubase_caps(struct cdma_dev *cdev) +{ + struct cdma_caps *caps = &cdev->caps; + struct ubase_caps *ubase_caps; + + ubase_caps = ubase_get_dev_caps(cdev->adev); + if (!ubase_caps) { + dev_err(cdev->dev, "get cdma ubase caps failed\n"); + return -EINVAL; + } + + caps->comp_vector_cnt = ubase_caps->num_ceq_vectors; + caps->public_jetty_cnt = ubase_caps->public_jetty_cnt; + cdev->eid = ubase_caps->eid; + cdev->upi = ubase_caps->upi; + + return 0; +} + +int cdma_init_dev_caps(struct cdma_dev *cdev) +{ + struct cdma_caps *caps = &cdev->caps; + int ret; + u8 i; + + ret = cdma_query_caps_from_firmware(cdev); + if (ret) + return ret; + + ret = cdma_set_caps_from_adev_caps(cdev); + if (ret) + return ret; + + ret = cdma_set_caps_from_ubase_caps(cdev); + if (ret) + return ret; + + caps->queue.max_cnt = min(caps->jfs.max_cnt, caps->jfc.max_cnt); + caps->queue.start_idx = 0; + caps->jfce.max_cnt = caps->jfc.max_cnt; + caps->jfce.start_idx = 0; + + dev_info(cdev->dev, "query cdev eid = 0x%x, cdev upi = 0x%x\n", cdev->eid, + cdev->upi); + dev_info(cdev->dev, "queue:max_cnt = 0x%x, start_idx = 0x%x\n", + caps->queue.max_cnt, caps->queue.start_idx); + dev_info(cdev->dev, "jfs:max_cnt = 0x%x, depth = 0x%x, start_idx = 0x%x\n", + caps->jfs.max_cnt, caps->jfs.depth, caps->jfs.start_idx); + dev_info(cdev->dev, "jfce:max_cnt = 0x%x, depth = 0x%x, start_idx = 0x%x\n", + caps->jfce.max_cnt, caps->jfce.depth, caps->jfce.start_idx); + dev_info(cdev->dev, "jfc:max_cnt = 0x%x, depth = 0x%x, start_idx = 0x%x\n", + caps->jfc.max_cnt, caps->jfc.depth, caps->jfc.start_idx); + dev_info(cdev->dev, "comp_vector_cnt = 0x%x, public_jetty_cnt = 0x%x\n", + caps->comp_vector_cnt, caps->public_jetty_cnt); + dev_info(cdev->dev, "sl_num = 0x%x\n", cdev->sl_num); + for (i = 0; i < cdev->sl_num; i++) + dev_info(cdev->dev, "sl[%u] = 0x%x\n", i, cdev->sl[i]); + + return 0; +} + +int cdma_ctrlq_query_eu(struct cdma_dev *cdev) +{ +#define CDMA_CTRLQ_QUERY_SEID_UPI 0x1 +#define CDMA_CTRLQ_CMD_SEID_UPI 0xB5 + struct cdma_device_attr *attr = &cdev->base.attr; + struct eu_query_out out_query = { 0 }; + struct eu_query_in in_query = { 0 }; + struct ubase_ctrlq_msg msg = { 0 }; + struct eu_info *eus = attr->eus; + int ret; + u8 i; + + in_query.cmd = CDMA_CTRLQ_CMD_SEID_UPI; + + msg = (struct ubase_ctrlq_msg) { + .service_ver = UBASE_CTRLQ_SER_VER_01, + .service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + .opcode = CDMA_CTRLQ_QUERY_SEID_UPI, + .need_resp = 1, + .is_resp = 0, + .resv = 0, + .resp_seq = 0, + .in_size = sizeof(in_query), + .in = &in_query, + .out_size = sizeof(out_query), + .out = &out_query, + }; + + ret = ubase_ctrlq_send_msg(cdev->adev, &msg); + if (ret) { + dev_err(cdev->dev, + "query seid upi from ctrl cpu failed, ret = %d.\n", ret); + return ret; + } + + if (!out_query.seid_num || out_query.seid_num > CDMA_MAX_EU_NUM) { + dev_err(cdev->dev, + "query seid upi num is invalid, num = %u.\n", + out_query.seid_num); + return -EINVAL; + } + + mutex_lock(&cdev->eu_mutex); + memcpy(eus, out_query.eus, sizeof(struct eu_info) * out_query.seid_num); + attr->eu_num = out_query.seid_num; + + for (i = 0; i < attr->eu_num; i++) + dev_dbg(cdev->dev, + "cdma init eus[%u], upi = 0x%x, eid = 0x%x, eid_idx = 0x%x.\n", + i, eus[i].upi, eus[i].eid.dw0, eus[i].eid_idx); + mutex_unlock(&cdev->eu_mutex); + + return 0; +} diff --git a/drivers/ub/cdma/cdma_cmd.h b/drivers/ub/cdma/cdma_cmd.h new file mode 100644 index 000000000000..550f60640b36 --- /dev/null +++ b/drivers/ub/cdma/cdma_cmd.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_CMD_H__ +#define __CDMA_CMD_H__ + +#include + +struct cdma_dev; + +struct cdma_ue_info { + /* BD0 */ + u16 jfs_num_shift : 4; + u16 jfr_num_shift : 4; + u16 jfc_num_shift : 4; + u16 jetty_num_shift : 4; + + u16 jetty_grp_num; + + u16 jfs_depth_shift : 4; + u16 jfr_depth_shift : 4; + u16 jfc_depth_shift : 4; + u16 cqe_size_shift : 4; + + u16 jfs_sge : 5; + u16 jfr_sge : 5; + u16 jfs_rsge : 6; + + u16 max_jfs_inline_sz; + u16 max_jfc_inline_sz; + u32 cap_info; + + u16 trans_mode : 5; + u16 ue_num : 8; + u16 virtualization : 1; + u16 rsvd0 : 2; + + u16 ue_cnt; + u8 ue_id; + u8 default_cong_alg; + u8 cons_ctrl_alg; + u8 cc_priority_cnt; + /* BD1 */ + u16 src_addr_tbl_sz; + u16 src_addr_tbl_num; + u16 dest_addr_tbl_sz; + u16 dest_addr_tbl_num; + u16 seid_upi_tbl_sz; + u16 seid_upi_tbl_num; + u16 tpm_tbl_sz; + u16 tpm_tbl_num; + u32 tp_range; + u8 port_num; + u8 port_id; + u8 rsvd1[2]; + u16 rc_queue_num; + u16 rc_depth; + u8 rc_entry; + u8 rsvd2[3]; + /* BD2 */ + u32 rsvd3[8]; + /* BD3 */ + u32 rsvd4[8]; +}; + +struct eu_query_in { + u32 cmd : 8; + u32 rsv : 24; +}; + +struct eu_query_out { + u32 seid_num : 8; + u32 rsv : 24; + struct eu_info eus[CDMA_MAX_EU_NUM]; +}; + +int cdma_init_dev_caps(struct cdma_dev *cdev); +int cdma_ctrlq_query_eu(struct cdma_dev *cdev); +#endif diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c new file mode 100644 index 000000000000..4c883e728362 --- /dev/null +++ b/drivers/ub/cdma/cdma_dev.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include +#include +#include +#include + +#include "cdma.h" +#include "cdma_cmd.h" +#include "cdma_tid.h" +#include +#include +#include "cdma_dev.h" + +static DEFINE_XARRAY(cdma_devs_tbl); +static atomic_t cdma_devs_num = ATOMIC_INIT(0); + +/* Add the device to the device list for user query. */ +static int cdma_add_device_to_list(struct cdma_dev *cdev) +{ + struct auxiliary_device *adev = cdev->adev; + int ret; + + if (adev->id >= CDMA_UE_MAX_NUM) { + dev_err(cdev->dev, "invalid ue id %u.\n", adev->id); + return -EINVAL; + } + + ret = xa_err(xa_store(&cdma_devs_tbl, adev->id, cdev, GFP_KERNEL)); + if (ret) { + dev_err(cdev->dev, + "store cdma device to table failed, adev id = %u.\n", + adev->id); + return ret; + } + + atomic_inc(&cdma_devs_num); + + return 0; +} + +static void cdma_del_device_from_list(struct cdma_dev *cdev) +{ + struct auxiliary_device *adev = cdev->adev; + + if (adev->id >= CDMA_UE_MAX_NUM) { + dev_err(cdev->dev, "invalid ue id %u.\n", adev->id); + return; + } + + atomic_dec(&cdma_devs_num); + xa_erase(&cdma_devs_tbl, adev->id); +} + +static void cdma_init_base_dev(struct cdma_dev *cdev) +{ + struct cdma_device_attr *attr = &cdev->base.attr; + struct cdma_device_cap *dev_cap = &attr->dev_cap; + struct cdma_caps *caps = &cdev->caps; + + attr->eid.dw0 = cdev->eid; + dev_cap->max_jfc = caps->jfc.max_cnt; + dev_cap->max_jfs = caps->jfs.max_cnt; + dev_cap->max_jfc_depth = caps->jfc.depth; + dev_cap->max_jfs_depth = caps->jfs.depth; + dev_cap->trans_mode = caps->trans_mode; + dev_cap->max_jfs_sge = caps->jfs_sge; + dev_cap->max_jfs_rsge = caps->jfs_rsge; + dev_cap->max_msg_size = caps->max_msg_len; + dev_cap->ceq_cnt = caps->comp_vector_cnt; + dev_cap->max_jfs_inline_len = caps->jfs_inline_sz; +} + +static int cdma_init_dev_param(struct cdma_dev *cdev, + struct auxiliary_device *adev) +{ + struct ubase_resource_space *mem_base; + int ret; + + mem_base = ubase_get_mem_base(adev); + if (!mem_base) + return -EINVAL; + + cdev->adev = adev; + cdev->dev = adev->dev.parent; + cdev->k_db_base = mem_base->addr; + cdev->db_base = mem_base->addr_unmapped; + + ret = cdma_init_dev_caps(cdev); + if (ret) + return ret; + + cdma_init_base_dev(cdev); + + dev_set_drvdata(&adev->dev, cdev); + + mutex_init(&cdev->db_mutex); + mutex_init(&cdev->eu_mutex); + INIT_LIST_HEAD(&cdev->db_page); + mutex_init(&cdev->file_mutex); + INIT_LIST_HEAD(&cdev->file_list); + + return 0; +} + +static void cdma_uninit_dev_param(struct cdma_dev *cdev) +{ + mutex_destroy(&cdev->db_mutex); + mutex_destroy(&cdev->eu_mutex); + mutex_destroy(&cdev->file_mutex); + dev_set_drvdata(&cdev->adev->dev, NULL); +} + +int cdma_create_arm_db_page(struct cdma_dev *cdev) +{ + cdev->arm_db_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!cdev->arm_db_page) { + dev_err(cdev->dev, "alloc dev arm db page failed.\n"); + return -ENOMEM; + } + return 0; +} + +void cdma_destroy_arm_db_page(struct cdma_dev *cdev) +{ + if (!cdev->arm_db_page) + return; + + put_page(cdev->arm_db_page); + cdev->arm_db_page = NULL; +} + +struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev; + + cdev = kzalloc((sizeof(*cdev)), GFP_KERNEL); + if (!cdev) + return NULL; + + if (cdma_init_dev_param(cdev, adev)) + goto free; + + if (cdma_add_device_to_list(cdev)) + goto free_param; + + if (cdma_alloc_dev_tid(cdev)) + goto del_list; + + if (cdma_create_arm_db_page(cdev)) + goto free_tid; + + dev_dbg(&adev->dev, "cdma.%u init succeeded.\n", adev->id); + + return cdev; + +free_tid: + cdma_free_dev_tid(cdev); +del_list: + cdma_del_device_from_list(cdev); +free_param: + cdma_uninit_dev_param(cdev); +free: + kfree(cdev); + return NULL; +} + +void cdma_destroy_dev(struct cdma_dev *cdev) +{ + if (!cdev) + return; + + cdma_destroy_arm_db_page(cdev); + ubase_ctrlq_unregister_crq_event(cdev->adev, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + CDMA_CTRLQ_EU_UPDATE); + cdma_free_dev_tid(cdev); + + cdma_del_device_from_list(cdev); + cdma_uninit_dev_param(cdev); + kfree(cdev); +} diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h new file mode 100644 index 000000000000..41222a67e12a --- /dev/null +++ b/drivers/ub/cdma/cdma_dev.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_DEV_H__ +#define __CDMA_DEV_H__ + +#include + +#define CDMA_CTRLQ_EU_UPDATE 0x2 +#define CDMA_UE_MAX_NUM 64 + +struct cdma_dev; + +struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); +void cdma_destroy_dev(struct cdma_dev *cdev); +int cdma_create_arm_db_page(struct cdma_dev *cdev); +void cdma_destroy_arm_db_page(struct cdma_dev *cdev); + +#endif /* _CDMA_DEV_H_ */ diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c new file mode 100644 index 000000000000..28d21ca61ea6 --- /dev/null +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include + +#include +#include "cdma.h" +#include "cdma_types.h" +#include "cdma_ioctl.h" + +typedef int (*cdma_cmd_handler)(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile); + +static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { +}; + +int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) +{ + struct cdma_dev *cdev = cfile->cdev; + + if (hdr->command >= CDMA_CMD_MAX || !g_cdma_cmd_handler[hdr->command]) { + dev_err(cdev->dev, + "invalid cdma user command or no handler, command = %u\n", + hdr->command); + return -EINVAL; + } + + return g_cdma_cmd_handler[hdr->command](hdr, cfile); +} diff --git a/drivers/ub/cdma/cdma_ioctl.h b/drivers/ub/cdma/cdma_ioctl.h new file mode 100644 index 000000000000..a5b20c99117e --- /dev/null +++ b/drivers/ub/cdma/cdma_ioctl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_IOCTL_H__ +#define __CDMA_IOCTL_H__ + +struct cdma_file; +struct cdma_ioctl_hdr; + +int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr); + +#endif /* _CDMA_IOCTL_H_ */ diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c new file mode 100644 index 000000000000..d04b4b43c989 --- /dev/null +++ b/drivers/ub/cdma/cdma_main.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt +#define dev_fmt pr_fmt + +#include + +#include "cdma.h" +#include "cdma_dev.h" +#include "cdma_chardev.h" +#include +#include "cdma_cmd.h" + +/* Enabling jfc_arm_mode will cause jfc to report cqe; otherwise, it will not. */ +uint jfc_arm_mode; +module_param(jfc_arm_mode, uint, 0444); +MODULE_PARM_DESC(jfc_arm_mode, + "Set the ARM mode of the JFC, default: 0(0:Always ARM, others: NO ARM)"); + +struct class *cdma_cdev_class; + +static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) +{ + int ret; + + /* query eu failure does not affect driver loading, as eu can be updated. */ + ret = cdma_ctrlq_query_eu(cdev); + if (ret) + dev_warn(&auxdev->dev, "query eu failed, ret = %d.\n", ret); + + return 0; +} + +static int cdma_init_dev(struct auxiliary_device *auxdev) +{ + struct cdma_dev *cdev; + int ret; + + dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", + __func__, auxdev->name, auxdev->id); + + cdev = cdma_create_dev(auxdev); + if (!cdev) + return -ENOMEM; + + ret = cdma_create_chardev(cdev); + if (ret) { + cdma_destroy_dev(cdev); + return ret; + } + + ret = cdma_init_dev_info(auxdev, cdev); + if (ret) { + cdma_destroy_chardev(cdev); + cdma_destroy_dev(cdev); + return ret; + } + + return ret; +} + +static void cdma_uninit_dev(struct auxiliary_device *auxdev) +{ + struct cdma_dev *cdev; + + dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", + __func__, auxdev->name, auxdev->id); + + cdev = dev_get_drvdata(&auxdev->dev); + if (!cdev) { + dev_err(&auxdev->dev, "get drvdata from ubase failed.\n"); + return; + } + + cdma_destroy_chardev(cdev); + cdma_destroy_dev(cdev); +} + +static int cdma_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *auxdev_id) +{ + int ret; + + ret = cdma_init_dev(auxdev); + if (ret) + return ret; + + return 0; +} + +static void cdma_remove(struct auxiliary_device *auxdev) +{ + cdma_uninit_dev(auxdev); +} + +static const struct auxiliary_device_id cdma_id_table[] = { + { + .name = UBASE_ADEV_NAME ".cdma", + }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, cdma_id_table); + +static struct auxiliary_driver cdma_driver = { + .probe = cdma_probe, + .remove = cdma_remove, + .name = "cdma", + .id_table = cdma_id_table, +}; + +static int __init cdma_init(void) +{ + int ret; + + cdma_cdev_class = class_create("cdma"); + if (IS_ERR(cdma_cdev_class)) { + pr_err("create cdma class failed.\n"); + return PTR_ERR(cdma_cdev_class); + } + + ret = auxiliary_driver_register(&cdma_driver); + if (ret) { + pr_err("auxiliary register failed.\n"); + goto free_class; + } + + return 0; + +free_class: + class_destroy(cdma_cdev_class); + + return ret; +} + +static void __exit cdma_exit(void) +{ + auxiliary_driver_unregister(&cdma_driver); + class_destroy(cdma_cdev_class); +} + +module_init(cdma_init); +module_exit(cdma_exit); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("Hisilicon UBus Crystal DMA Driver"); diff --git a/drivers/ub/cdma/cdma_tid.c b/drivers/ub/cdma/cdma_tid.c new file mode 100644 index 000000000000..6e38b72bc8e0 --- /dev/null +++ b/drivers/ub/cdma/cdma_tid.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include + +#include "cdma.h" +#include "cdma_tid.h" + +int cdma_alloc_dev_tid(struct cdma_dev *cdev) +{ + struct ummu_seg_attr seg_attr = { + .token = NULL, + .e_bit = UMMU_EBIT_ON, + }; + struct ummu_param drvdata = { + .mode = MAPT_MODE_TABLE, + }; + int ret; + + ret = iommu_dev_enable_feature(cdev->dev, IOMMU_DEV_FEAT_KSVA); + if (ret) { + dev_err(cdev->dev, "enable ksva failed, ret = %d.\n", ret); + return ret; + } + + ret = iommu_dev_enable_feature(cdev->dev, IOMMU_DEV_FEAT_SVA); + if (ret) { + dev_err(cdev->dev, "enable sva failed, ret = %d.\n", ret); + goto err_sva_enable_dev; + } + + cdev->ksva = ummu_ksva_bind_device(cdev->dev, &drvdata); + if (!cdev->ksva) { + dev_err(cdev->dev, "ksva bind device failed.\n"); + ret = -EINVAL; + goto err_ksva_bind_device; + } + + ret = ummu_get_tid(cdev->dev, cdev->ksva, &cdev->tid); + if (ret) { + dev_err(cdev->dev, "get tid for cdma device failed.\n"); + goto err_get_tid; + } + + ret = ummu_sva_grant_range(cdev->ksva, 0, CDMA_MAX_GRANT_SIZE, + UMMU_DEV_WRITE | UMMU_DEV_READ, + &seg_attr); + if (ret) { + dev_err(cdev->dev, "sva grant range for cdma device failed.\n"); + goto err_get_tid; + } + + return 0; + +err_get_tid: + ummu_ksva_unbind_device(cdev->ksva); +err_ksva_bind_device: + if (iommu_dev_disable_feature(cdev->dev, IOMMU_DEV_FEAT_SVA)) + dev_warn(cdev->dev, "disable sva failed, ret = %d.\n", ret); +err_sva_enable_dev: + if (iommu_dev_disable_feature(cdev->dev, IOMMU_DEV_FEAT_KSVA)) + dev_warn(cdev->dev, "disable ksva failed, ret = %d.\n", ret); + + return ret; +} + +void cdma_free_dev_tid(struct cdma_dev *cdev) +{ + int ret; + + ret = ummu_sva_ungrant_range(cdev->ksva, 0, CDMA_MAX_GRANT_SIZE, NULL); + if (ret) + dev_warn(cdev->dev, + "sva ungrant range for cdma device failed, ret = %d.\n", + ret); + + ummu_ksva_unbind_device(cdev->ksva); + + ret = iommu_dev_disable_feature(cdev->dev, IOMMU_DEV_FEAT_SVA); + if (ret) + dev_warn(cdev->dev, "disable sva failed, ret = %d.\n", ret); + + ret = iommu_dev_disable_feature(cdev->dev, IOMMU_DEV_FEAT_KSVA); + if (ret) + dev_warn(cdev->dev, "disable ksva failed, ret = %d.\n", ret); +} diff --git a/drivers/ub/cdma/cdma_tid.h b/drivers/ub/cdma/cdma_tid.h new file mode 100644 index 000000000000..8bbd8c0c979a --- /dev/null +++ b/drivers/ub/cdma/cdma_tid.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_TID_H__ +#define __CDMA_TID_H__ + +#include + +#define CDMA_MAX_GRANT_SIZE GENMASK(47, 12) + +struct cdma_dev; + +int cdma_alloc_dev_tid(struct cdma_dev *cdev); +void cdma_free_dev_tid(struct cdma_dev *cdev); + +#endif diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h new file mode 100644 index 000000000000..4ef38c23e22c --- /dev/null +++ b/drivers/ub/cdma/cdma_types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_TYPES_H__ +#define __CDMA_TYPES_H__ + +#include +#include +#include + +struct cdma_dev; + +struct cdma_file { + struct cdma_dev *cdev; + struct list_head list; + struct kref ref; +}; + +#endif diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h new file mode 100644 index 000000000000..ae83d18dfd8d --- /dev/null +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef _UAPI_UB_CDMA_CDMA_ABI_H_ +#define _UAPI_UB_CDMA_CDMA_ABI_H_ + +#include + +/* cdma ioctl cmd */ +#define CDMA_IOC_MAGIC 'C' +#define CDMA_SYNC _IOWR(CDMA_IOC_MAGIC, 0, struct cdma_ioctl_hdr) + +enum cdma_cmd { + CDMA_CMD_MAX +}; + +struct cdma_ioctl_hdr { + __u32 command; + __u32 args_len; + __u64 args_addr; +}; + +struct dev_eid { + __u32 dw0; + __u32 dw1; + __u32 dw2; + __u32 dw3; +}; + +struct eu_info { + __u32 eid_idx; + struct dev_eid eid; + __u32 upi; +}; + +struct cdma_device_cap { + __u32 max_jfc; + __u32 max_jfs; + __u32 max_jfc_depth; + __u32 max_jfs_depth; + __u32 max_jfs_inline_len; + __u32 max_jfs_sge; + __u32 max_jfs_rsge; + __u64 max_msg_size; + __u32 max_atomic_size; + __u16 trans_mode; + __u32 ceq_cnt; + __u32 max_eid_cnt; + __u64 page_size_cap; +}; + +struct cdma_device_attr { +#define CDMA_MAX_EU_NUM 64 + __u8 eu_num; + struct dev_eid eid; + struct eu_info eu; + struct eu_info eus[CDMA_MAX_EU_NUM]; + struct cdma_device_cap dev_cap; +}; + +#endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h new file mode 100644 index 000000000000..8b223fe6f5ab --- /dev/null +++ b/include/ub/cdma/cdma_api.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef _UB_CDMA_CDMA_API_H_ +#define _UB_CDMA_CDMA_API_H_ + +#include +#include + +struct dma_device { + struct cdma_device_attr attr; + atomic_t ref_cnt; + void *private_data; +}; + +#endif -- Gitee From 8a5bdd3bc9385bad255af72c2439538b749c953d Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 16:14:36 +0800 Subject: [PATCH 007/243] ub: cdma: support querying sl information and updating eu commit ca1562136e1414353235fc99165b6c4ac7660ca3 openEuler This patch implements the querying of sl information during device initialization in the CDMA driver and the process of dynamically refreshing eu (eid/upi) through crtlq. Signed-off-by: Zhipeng Lu Signed-off-by: Hongwu Wang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_cmd.c | 26 +++++++ drivers/ub/cdma/cdma_dev.c | 141 ++++++++++++++++++++++++++++++++++++- drivers/ub/cdma/cdma_dev.h | 13 ++++ 3 files changed, 179 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_cmd.c b/drivers/ub/cdma/cdma_cmd.c index 80ab8791b018..74e6b32a58c7 100644 --- a/drivers/ub/cdma/cdma_cmd.c +++ b/drivers/ub/cdma/cdma_cmd.c @@ -92,6 +92,28 @@ static int cdma_set_caps_from_ubase_caps(struct cdma_dev *cdev) return 0; } +static int cdma_set_caps_from_adev_qos(struct cdma_dev *cdev) +{ + struct ubase_adev_qos *qos; + + qos = ubase_get_adev_qos(cdev->adev); + if (!qos) { + dev_err(cdev->dev, "get cdma adev qos failed\n"); + return -EINVAL; + } + + if (!qos->ctp_sl_num || qos->ctp_sl_num > CDMA_MAX_SL_NUM) { + dev_err(cdev->dev, "sl num %u is invalid\n", + qos->ctp_sl_num); + return -EINVAL; + } + + cdev->sl_num = qos->ctp_sl_num; + memcpy(cdev->sl, qos->ctp_sl, qos->ctp_sl_num); + + return 0; +} + int cdma_init_dev_caps(struct cdma_dev *cdev) { struct cdma_caps *caps = &cdev->caps; @@ -110,6 +132,10 @@ int cdma_init_dev_caps(struct cdma_dev *cdev) if (ret) return ret; + ret = cdma_set_caps_from_adev_qos(cdev); + if (ret) + return ret; + caps->queue.max_cnt = min(caps->jfs.max_cnt, caps->jfc.max_cnt); caps->queue.start_idx = 0; caps->jfce.max_cnt = caps->jfc.max_cnt; diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 4c883e728362..9d7142214a45 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -115,6 +115,108 @@ static void cdma_uninit_dev_param(struct cdma_dev *cdev) dev_set_drvdata(&cdev->adev->dev, NULL); } +static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) +{ + struct cdma_device_attr *attr = &cdev->base.attr; + struct eu_info *eus = cdev->base.attr.eus; + u8 i; + + for (i = 0; i < attr->eu_num; i++) { + if (eu->eid_idx != eus[i].eid_idx) + continue; + + dev_dbg(cdev->dev, + "cdma.%u: eid_idx[0x%x] eid[0x%x->0x%x] upi[0x%x->0x%x] update success.\n", + cdev->adev->id, eu->eid_idx, eus[i].eid.dw0, + eu->eid.dw0, eus[i].upi, eu->upi & CDMA_UPI_MASK); + + eus[i].eid = eu->eid; + eus[i].upi = eu->upi & CDMA_UPI_MASK; + + if (attr->eu.eid_idx == eu->eid_idx) { + attr->eu.eid = eu->eid; + attr->eu.upi = eu->upi & CDMA_UPI_MASK; + } + return 0; + } + + if (attr->eu_num >= CDMA_MAX_EU_NUM) { + dev_err(cdev->dev, "cdma.%u: eu table is full.\n", + cdev->adev->id); + return -EINVAL; + } + + eus[attr->eu_num++] = *eu; + dev_dbg(cdev->dev, + "cdma.%u: eid_idx[0x%x] eid[0x%x] upi[0x%x] add success.\n", + cdev->adev->id, eu->eid_idx, eu->eid.dw0, + eu->upi & CDMA_UPI_MASK); + + return 0; +} + +static int cdma_ctrlq_eu_del(struct cdma_dev *cdev, struct eu_info *eu) +{ + struct cdma_device_attr *attr = &cdev->base.attr; + struct eu_info *eus = cdev->base.attr.eus; + int ret = -EINVAL; + u8 i, j; + + if (!attr->eu_num) { + dev_err(cdev->dev, "cdma.%u: eu table is empty.\n", + cdev->adev->id); + return -EINVAL; + } + + for (i = 0; i < attr->eu_num; i++) { + if (eu->eid_idx != eus[i].eid_idx) + continue; + + for (j = i; j < attr->eu_num - 1; j++) + eus[j] = eus[j + 1]; + memset(&eus[j], 0, sizeof(*eus)); + + if (attr->eu.eid_idx == eu->eid_idx) + attr->eu = eus[0]; + attr->eu_num--; + ret = 0; + break; + } + + dev_info(cdev->dev, + "cdma.%u: eid_idx[0x%x] eid[0x%x] upi[0x%x] delete %s.\n", + cdev->adev->id, eu->eid_idx, eu->eid.dw0, + eu->upi & CDMA_UPI_MASK, ret ? "failed" : "success"); + + return ret; +} + +static int cdma_ctrlq_eu_update(struct auxiliary_device *adev, u8 service_ver, + void *data, u16 len, u16 seq) +{ + struct cdma_dev *cdev = dev_get_drvdata(&adev->dev); + struct cdma_ctrlq_eu_info *ctrlq_eu; + int ret = -EINVAL; + + if (len < sizeof(*ctrlq_eu)) { + dev_err(cdev->dev, "ctrlq data len is invalid.\n"); + return -EINVAL; + } + + ctrlq_eu = (struct cdma_ctrlq_eu_info *)data; + + mutex_lock(&cdev->eu_mutex); + if (ctrlq_eu->op == CDMA_CTRLQ_EU_ADD) + ret = cdma_ctrlq_eu_add(cdev, &ctrlq_eu->eu); + else if (ctrlq_eu->op == CDMA_CTRLQ_EU_DEL) + ret = cdma_ctrlq_eu_del(cdev, &ctrlq_eu->eu); + else + dev_err(cdev->dev, "ctrlq eu op is invalid.\n"); + mutex_unlock(&cdev->eu_mutex); + + return ret; +} + int cdma_create_arm_db_page(struct cdma_dev *cdev) { cdev->arm_db_page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -134,6 +236,36 @@ void cdma_destroy_arm_db_page(struct cdma_dev *cdev) cdev->arm_db_page = NULL; } +int cdma_register_crq_event(struct auxiliary_device *adev) +{ + struct ubase_ctrlq_event_nb nb = { + .service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + .opcode = CDMA_CTRLQ_EU_UPDATE, + .back = adev, + .crq_handler = cdma_ctrlq_eu_update, + }; + int ret; + + if (!adev) + return -EINVAL; + + ret = ubase_ctrlq_register_crq_event(adev, &nb); + if (ret) { + dev_err(&adev->dev, "register crq event failed, id = %u, ret = %d.\n", + adev->id, ret); + return ret; + } + + return 0; +} + +void cdma_unregister_crq_event(struct auxiliary_device *adev) +{ + ubase_ctrlq_unregister_crq_event(adev, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + CDMA_CTRLQ_EU_UPDATE); +} + struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) { struct cdma_dev *cdev; @@ -151,13 +283,18 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) if (cdma_alloc_dev_tid(cdev)) goto del_list; - if (cdma_create_arm_db_page(cdev)) + if (cdma_register_crq_event(adev)) goto free_tid; + if (cdma_create_arm_db_page(cdev)) + goto unregister_crq; + dev_dbg(&adev->dev, "cdma.%u init succeeded.\n", adev->id); return cdev; +unregister_crq: + cdma_unregister_crq_event(adev); free_tid: cdma_free_dev_tid(cdev); del_list: @@ -174,6 +311,8 @@ void cdma_destroy_dev(struct cdma_dev *cdev) if (!cdev) return; + ubase_virt_unregister(cdev->adev); + cdma_destroy_arm_db_page(cdev); ubase_ctrlq_unregister_crq_event(cdev->adev, UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index 41222a67e12a..e79dc2608e24 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -11,8 +11,21 @@ struct cdma_dev; +struct cdma_ctrlq_eu_info { + struct eu_info eu; + u32 op : 4; + u32 rsvd : 28; +}; + +enum cdma_ctrlq_eu_op { + CDMA_CTRLQ_EU_ADD = 0, + CDMA_CTRLQ_EU_DEL = 1, +}; + struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); void cdma_destroy_dev(struct cdma_dev *cdev); +int cdma_register_crq_event(struct auxiliary_device *adev); +void cdma_unregister_crq_event(struct auxiliary_device *adev); int cdma_create_arm_db_page(struct cdma_dev *cdev); void cdma_destroy_arm_db_page(struct cdma_dev *cdev); -- Gitee From 13154b3e44c47b2b65748d4ca6238469fe0acb01 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 16:18:34 +0800 Subject: [PATCH 008/243] ub: cdma: support for getting device list commit 1291f9445da2401526178bb343177b8a8a887df8 openEuler This patch implements device list-related APIs and functionalities within the CDMA driver. The implementation includes support for the interfaces dma_get_device_list and dma_free_device_list. Signed-off-by: Zhipeng Lu Signed-off-by: Hongwu Wang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 3 +- drivers/ub/cdma/cdma.h | 5 ++ drivers/ub/cdma/cdma_api.c | 88 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_context.h | 29 +++++++++++ drivers/ub/cdma/cdma_dev.c | 7 +++ drivers/ub/cdma/cdma_dev.h | 2 + drivers/ub/cdma/cdma_ioctl.c | 41 +++++++++++++++ include/uapi/ub/cdma/cdma_abi.h | 7 +++ include/ub/cdma/cdma_api.h | 9 ++++ 9 files changed, 190 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/cdma/cdma_api.c create mode 100644 drivers/ub/cdma/cdma_context.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 7375c6a08738..332700d90004 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ -cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o +cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ + cdma_api.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index af45096026ee..4a9720af8681 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -16,6 +16,11 @@ #define CDMA_UPI_MASK 0x7FFF +enum cdma_status { + CDMA_NORMAL, + CDMA_SUSPEND, +}; + struct cdma_res { u32 max_cnt; u32 start_idx; diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c new file mode 100644 index 000000000000..1f83307ea9bc --- /dev/null +++ b/drivers/ub/cdma/cdma_api.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt +#define dev_fmt pr_fmt + +#include "cdma_dev.h" +#include "cdma_cmd.h" +#include "cdma_context.h" +#include "cdma.h" +#include + +struct dma_device *dma_get_device_list(u32 *num_devices) +{ + struct cdma_device_attr *attr; + struct xarray *cdma_devs_tbl; + struct cdma_dev *cdev = NULL; + struct dma_device *ret_list; + unsigned long index; + u32 count = 0; + u32 devs_num; + + if (!num_devices) + return NULL; + + cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); + if (devs_num == 0) { + pr_err("cdma device table is empty.\n"); + return NULL; + } + + ret_list = kcalloc(devs_num, sizeof(struct dma_device), GFP_KERNEL); + if (!ret_list) { + *num_devices = 0; + return NULL; + } + + xa_for_each(cdma_devs_tbl, index, cdev) { + attr = &cdev->base.attr; + if (cdev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", + attr->eid.dw0); + continue; + } + + if (!attr->eu_num) { + pr_warn("no eu in cdma dev eid = 0x%x.\n", cdev->eid); + continue; + } + + memcpy(&attr->eu, &attr->eus[0], sizeof(attr->eu)); + attr->eid.dw0 = cdev->eid; + memcpy(&ret_list[count], &cdev->base, sizeof(*ret_list)); + ret_list[count].private_data = kzalloc( + sizeof(struct cdma_ctx_res), GFP_KERNEL); + if (!ret_list[count].private_data) + break; + count++; + } + *num_devices = count; + + return ret_list; +} +EXPORT_SYMBOL_GPL(dma_get_device_list); + +void dma_free_device_list(struct dma_device *dev_list, u32 num_devices) +{ + int ref_cnt; + u32 i; + + if (!dev_list) + return; + + for (i = 0; i < num_devices; i++) { + ref_cnt = atomic_read(&dev_list[i].ref_cnt); + if (ref_cnt > 0) { + pr_warn("the device resourse is still in use, eid = 0x%x, cnt = %d.\n", + dev_list[i].attr.eid.dw0, ref_cnt); + return; + } + } + + for (i = 0; i < num_devices; i++) + kfree(dev_list[i].private_data); + + kfree(dev_list); +} +EXPORT_SYMBOL_GPL(dma_free_device_list); diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h new file mode 100644 index 000000000000..3614be608534 --- /dev/null +++ b/drivers/ub/cdma/cdma_context.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_CONTEXT_H__ +#define __CDMA_CONTEXT_H__ + +#include +#include +#include +#include + +struct cdma_context { + struct dma_context base_ctx; + struct cdma_dev *cdev; + struct iommu_sva *sva; + struct list_head pgdir_list; + struct mutex pgdir_mutex; + spinlock_t lock; + int handle; + u32 tid; + bool is_kernel; + atomic_t ref_cnt; +}; + +struct cdma_ctx_res { + struct cdma_context *ctx; +}; + +#endif /* CDMA_CONTEXT_H */ diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 9d7142214a45..4fc025a6628e 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -19,6 +19,13 @@ static DEFINE_XARRAY(cdma_devs_tbl); static atomic_t cdma_devs_num = ATOMIC_INIT(0); +struct xarray *get_cdma_dev_tbl(u32 *devs_num) +{ + *devs_num = atomic_read(&cdma_devs_num); + + return &cdma_devs_tbl; +} + /* Add the device to the device list for user query. */ static int cdma_add_device_to_list(struct cdma_dev *cdev) { diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index e79dc2608e24..623ae8d1f43e 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -5,6 +5,7 @@ #define __CDMA_DEV_H__ #include +#include #define CDMA_CTRLQ_EU_UPDATE 0x2 #define CDMA_UE_MAX_NUM 64 @@ -24,6 +25,7 @@ enum cdma_ctrlq_eu_op { struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); void cdma_destroy_dev(struct cdma_dev *cdev); +struct xarray *get_cdma_dev_tbl(u32 *devices_num); int cdma_register_crq_event(struct auxiliary_device *adev); void cdma_unregister_crq_event(struct auxiliary_device *adev); int cdma_create_arm_db_page(struct cdma_dev *cdev); diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 28d21ca61ea6..518b5802f7e1 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -13,7 +13,48 @@ typedef int (*cdma_cmd_handler)(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile); +static void cdma_fill_device_attr(struct cdma_dev *cdev, + struct cdma_device_cap *dev_cap) +{ + dev_cap->max_jfc = cdev->caps.jfc.max_cnt; + dev_cap->max_jfs = cdev->caps.jfs.max_cnt; + dev_cap->max_jfc_depth = cdev->caps.jfc.depth; + dev_cap->max_jfs_depth = cdev->caps.jfs.depth; + dev_cap->max_jfs_sge = cdev->caps.jfs_sge; + dev_cap->max_jfs_rsge = cdev->caps.jfs_rsge; + dev_cap->max_msg_size = cdev->caps.max_msg_len; + dev_cap->trans_mode = cdev->caps.trans_mode; + dev_cap->ceq_cnt = cdev->caps.comp_vector_cnt; +} + +static int cdma_query_dev(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) +{ + struct cdma_cmd_query_device_attr_args args = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + unsigned long ret; + + if (!hdr->args_addr || hdr->args_len < sizeof(args)) + return -EINVAL; + + args.out.attr.eid.dw0 = cdev->eid; + args.out.attr.eu_num = cdev->base.attr.eu_num; + memcpy(args.out.attr.eus, cdev->base.attr.eus, + sizeof(struct eu_info) * cdev->base.attr.eu_num); + cdma_fill_device_attr(cdev, &args.out.attr.dev_cap); + + ret = copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &args, + (u32)sizeof(args)); + if (ret) { + dev_err(cdev->dev, "query dev copy to user failed, ret = %lu\n", + ret); + return -EFAULT; + } + + return 0; +} + static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { + [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, }; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index ae83d18dfd8d..c72268f4460a 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -11,6 +11,7 @@ #define CDMA_SYNC _IOWR(CDMA_IOC_MAGIC, 0, struct cdma_ioctl_hdr) enum cdma_cmd { + CDMA_CMD_QUERY_DEV_INFO, CDMA_CMD_MAX }; @@ -58,4 +59,10 @@ struct cdma_device_attr { struct cdma_device_cap dev_cap; }; +struct cdma_cmd_query_device_attr_args { + struct { + struct cdma_device_attr attr; + } out; +}; + #endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 8b223fe6f5ab..c5e43bb600e3 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -13,4 +13,13 @@ struct dma_device { void *private_data; }; +struct dma_context { + struct dma_device *dma_dev; + u32 tid; /* data valid only in bit 0-19 */ +}; + +struct dma_device *dma_get_device_list(u32 *num_devices); + +void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); + #endif -- Gitee From 85f2c774b2db4746f6be9b5bddbab5c3f03b45a2 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 16:22:18 +0800 Subject: [PATCH 009/243] ub: cdma: support for getting device by eid commit a26341f5c33ecca2a6c0ca1570a80be47ad413f5 openEuler This patch implements the get device by eid API in the CDMA driver and realizes the related functionality. The implementation includes support for the dma_get_device_by_eid interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 51 +++++++++++++++++++++++++++++++++++++- drivers/ub/cdma/cdma_dev.c | 15 +++++++++++ drivers/ub/cdma/cdma_dev.h | 4 +++ include/ub/cdma/cdma_api.h | 2 ++ 4 files changed, 71 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 1f83307ea9bc..e918f9899494 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -24,7 +24,7 @@ struct dma_device *dma_get_device_list(u32 *num_devices) return NULL; cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); - if (devs_num == 0) { + if (!devs_num) { pr_err("cdma device table is empty.\n"); return NULL; } @@ -86,3 +86,52 @@ void dma_free_device_list(struct dma_device *dev_list, u32 num_devices) kfree(dev_list); } EXPORT_SYMBOL_GPL(dma_free_device_list); + +struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) +{ + struct cdma_device_attr *attr; + struct xarray *cdma_devs_tbl; + struct cdma_dev *cdev = NULL; + struct dma_device *ret_dev; + unsigned long index; + u32 devs_num; + + if (!eid) + return NULL; + + cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); + if (!devs_num) { + pr_err("cdma device table is empty.\n"); + return NULL; + } + + ret_dev = kzalloc(sizeof(struct dma_device), GFP_KERNEL); + if (!ret_dev) + return NULL; + + xa_for_each(cdma_devs_tbl, index, cdev) { + attr = &cdev->base.attr; + if (cdev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", + attr->eid.dw0); + continue; + } + + if (!cdma_find_seid_in_eus(attr->eus, attr->eu_num, eid, + &attr->eu)) + continue; + + memcpy(ret_dev, &cdev->base, sizeof(*ret_dev)); + ret_dev->private_data = kzalloc( + sizeof(struct cdma_ctx_res), GFP_KERNEL); + if (!ret_dev->private_data) { + kfree(ret_dev); + return NULL; + } + return ret_dev; + } + kfree(ret_dev); + + return NULL; +} +EXPORT_SYMBOL_GPL(dma_get_device_by_eid); diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 4fc025a6628e..10a4baa61e78 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -330,3 +330,18 @@ void cdma_destroy_dev(struct cdma_dev *cdev) cdma_uninit_dev_param(cdev); kfree(cdev); } + +bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, + struct eu_info *eu_out) +{ + u32 i; + + for (i = 0; i < eu_num; i++) + if (eus[i].eid.dw0 == eid->dw0 && eus[i].eid.dw1 == eid->dw1 && + eus[i].eid.dw2 == eid->dw2 && eus[i].eid.dw3 == eid->dw3) { + *eu_out = eus[i]; + return true; + } + + return false; +} diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index 623ae8d1f43e..73dc4077a74e 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -11,6 +11,8 @@ #define CDMA_UE_MAX_NUM 64 struct cdma_dev; +struct eu_info; +struct dev_eid; struct cdma_ctrlq_eu_info { struct eu_info eu; @@ -26,6 +28,8 @@ enum cdma_ctrlq_eu_op { struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); void cdma_destroy_dev(struct cdma_dev *cdev); struct xarray *get_cdma_dev_tbl(u32 *devices_num); +bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, + struct eu_info *eu_out); int cdma_register_crq_event(struct auxiliary_device *adev); void cdma_unregister_crq_event(struct auxiliary_device *adev); int cdma_create_arm_db_page(struct cdma_dev *cdev); diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index c5e43bb600e3..759f455581a9 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -22,4 +22,6 @@ struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); +struct dma_device *dma_get_device_by_eid(struct dev_eid *eid); + #endif -- Gitee From 830861325507362c320c71e3398236e944e86d18 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Thu, 28 Aug 2025 21:42:43 +0800 Subject: [PATCH 010/243] ub: cdma: support for allocating queue commit eecfd8889f72f8c03c8af731e508dc3cb46b4b14 openEuler This patch implements the creation of user context and the application of queue APIs in the CDMA driver. The implementation includes support for the dma_create_context and dma_alloc_queue interfaces. Signed-off-by: Zhipeng Lu Signed-off-by: Xinchi Ma Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 24 ++++++ drivers/ub/cdma/cdma_api.c | 102 ++++++++++++++++++++++++ drivers/ub/cdma/cdma_chardev.c | 12 +++ drivers/ub/cdma/cdma_context.c | 134 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_context.h | 7 ++ drivers/ub/cdma/cdma_dev.c | 71 +++++++++++++++++ drivers/ub/cdma/cdma_dev.h | 1 + drivers/ub/cdma/cdma_ioctl.c | 121 +++++++++++++++++++++++++++- drivers/ub/cdma/cdma_queue.c | 103 ++++++++++++++++++++++++ drivers/ub/cdma/cdma_queue.h | 26 +++++++ drivers/ub/cdma/cdma_types.h | 4 + drivers/ub/cdma/cdma_uobj.c | 121 ++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_uobj.h | 34 ++++++++ include/uapi/ub/cdma/cdma_abi.h | 25 ++++++ include/ub/cdma/cdma_api.h | 14 ++++ 16 files changed, 799 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/cdma/cdma_context.c create mode 100644 drivers/ub/cdma/cdma_queue.c create mode 100644 drivers/ub/cdma/cdma_queue.h create mode 100644 drivers/ub/cdma/cdma_uobj.c create mode 100644 drivers/ub/cdma/cdma_uobj.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 332700d90004..8c536d7a26c7 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ - cdma_api.o + cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index 4a9720af8681..c527a1bee1af 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -21,6 +21,13 @@ enum cdma_status { CDMA_SUSPEND, }; +enum { + CDMA_CAP_FEATURE_AR = BIT(0), + CDMA_CAP_FEATURE_JFC_INLINE = BIT(4), + CDMA_CAP_FEATURE_DIRECT_WQE = BIT(11), + CDMA_CAP_FEATURE_CONG_CTRL = BIT(16), +}; + struct cdma_res { u32 max_cnt; u32 start_idx; @@ -78,6 +85,18 @@ struct cdma_caps { struct cdma_tbl seid; }; +struct cdma_idr { + struct idr idr; + u32 min; + u32 max; + u32 next; +}; + +struct cdma_table { + spinlock_t lock; + struct cdma_idr idr_tbl; +}; + struct cdma_chardev { struct device *dev; @@ -106,10 +125,15 @@ struct cdma_dev { void __iomem *k_db_base; resource_size_t db_base; struct iommu_sva *ksva; + /* ctx manager */ + struct list_head ctx_list; + struct idr ctx_idr; + spinlock_t ctx_lock; struct mutex eu_mutex; struct mutex db_mutex; struct list_head db_page; + struct cdma_table queue_table; struct mutex file_mutex; struct list_head file_list; struct page *arm_db_page; diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index e918f9899494..29053d5b0255 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -7,6 +7,7 @@ #include "cdma_dev.h" #include "cdma_cmd.h" #include "cdma_context.h" +#include "cdma_queue.h" #include "cdma.h" #include @@ -135,3 +136,104 @@ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) return NULL; } EXPORT_SYMBOL_GPL(dma_get_device_by_eid); + +int dma_create_context(struct dma_device *dma_dev) +{ + struct cdma_ctx_res *ctx_res; + struct cdma_context *ctx; + struct cdma_dev *cdev; + + if (!dma_dev || !dma_dev->private_data) { + pr_err("the dma_dev does not exist.\n"); + return -EINVAL; + } + + cdev = get_cdma_dev_by_eid(dma_dev->attr.eid.dw0); + if (!cdev) { + pr_err("can't find cdev by eid, eid = 0x%x\n", + dma_dev->attr.eid.dw0); + return -EINVAL; + } + + if (cdev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", + dma_dev->attr.eid.dw0); + return -EINVAL; + } + + ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; + if (ctx_res->ctx) { + pr_err("ctx has been created.\n"); + return -EEXIST; + } + + atomic_inc(&dma_dev->ref_cnt); + ctx = cdma_alloc_context(cdev, true); + if (IS_ERR(ctx)) { + pr_err("alloc context failed, ret = %ld\n", PTR_ERR(ctx)); + atomic_dec(&dma_dev->ref_cnt); + return PTR_ERR(ctx); + } + ctx_res->ctx = ctx; + + return ctx->handle; +} +EXPORT_SYMBOL_GPL(dma_create_context); + +int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cfg) +{ + struct cdma_ctx_res *ctx_res; + struct cdma_queue *queue; + struct cdma_context *ctx; + struct cdma_dev *cdev; + int ret; + + if (!cfg || !dma_dev || !dma_dev->private_data) + return -EINVAL; + + cdev = get_cdma_dev_by_eid(dma_dev->attr.eid.dw0); + if (!cdev) { + pr_err("can't find cdev by eid, eid = 0x%x.\n", + dma_dev->attr.eid.dw0); + return -EINVAL; + } + + if (cdev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", + dma_dev->attr.eid.dw0); + return -EINVAL; + } + + ctx = cdma_find_ctx_by_handle(cdev, ctx_id); + if (!ctx) { + dev_err(cdev->dev, "invalid ctx_id = %d.\n", ctx_id); + return -EINVAL; + } + atomic_inc(&ctx->ref_cnt); + + queue = cdma_create_queue(cdev, ctx, cfg, dma_dev->attr.eu.eid_idx, + true); + if (!queue) { + dev_err(cdev->dev, "create queue failed.\n"); + ret = -EINVAL; + goto decrease_cnt; + } + + ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; + ret = xa_err( + xa_store(&ctx_res->queue_xa, queue->id, queue, GFP_KERNEL)); + if (ret) { + dev_err(cdev->dev, "store queue to ctx_res failed, ret = %d\n", + ret); + goto free_queue; + } + + return queue->id; + +free_queue: + cdma_delete_queue(cdev, queue->id); +decrease_cnt: + atomic_dec(&ctx->ref_cnt); + return ret; +} +EXPORT_SYMBOL_GPL(dma_alloc_queue); diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index 094a71cbc531..9cd785cc6d41 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -7,8 +7,10 @@ #include #include #include "cdma_ioctl.h" +#include "cdma_context.h" #include "cdma_chardev.h" #include "cdma_types.h" +#include "cdma_uobj.h" #include "cdma.h" #define CDMA_DEVICE_NAME "cdma/dev" @@ -80,10 +82,13 @@ static int cdma_open(struct inode *inode, struct file *file) if (!cfile) return -ENOMEM; + cdma_init_uobj_idr(cfile); mutex_lock(&cdev->file_mutex); cfile->cdev = cdev; + cfile->uctx = NULL; kref_init(&cfile->ref); file->private_data = cfile; + mutex_init(&cfile->ctx_mutex); list_add_tail(&cfile->list, &cdev->file_list); nonseekable_open(inode, file); mutex_unlock(&cdev->file_mutex); @@ -102,6 +107,11 @@ static int cdma_close(struct inode *inode, struct file *file) list_del(&cfile->list); mutex_unlock(&cdev->file_mutex); + mutex_lock(&cfile->ctx_mutex); + cdma_cleanup_context_uobj(cfile); + cfile->uctx = NULL; + + mutex_unlock(&cfile->ctx_mutex); kref_put(&cfile->ref, cdma_release_file); pr_debug("cdma close success.\n"); @@ -184,5 +194,7 @@ void cdma_release_file(struct kref *ref) { struct cdma_file *cfile = container_of(ref, struct cdma_file, ref); + mutex_destroy(&cfile->ctx_mutex); + idr_destroy(&cfile->idr); kfree(cfile); } diff --git a/drivers/ub/cdma/cdma_context.c b/drivers/ub/cdma/cdma_context.c new file mode 100644 index 000000000000..f13dcf8ccdbd --- /dev/null +++ b/drivers/ub/cdma/cdma_context.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include +#include "cdma.h" +#include "cdma_context.h" + +static void cdma_ctx_handle_free(struct cdma_dev *cdev, + struct cdma_context *ctx) +{ + spin_lock(&cdev->ctx_lock); + idr_remove(&cdev->ctx_idr, ctx->handle); + spin_unlock(&cdev->ctx_lock); +} + +static int cdma_ctx_handle_alloc(struct cdma_dev *cdev, + struct cdma_context *ctx) +{ +#define CDMA_CTX_START 0 +#define CDMA_CTX_END 0xff + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&cdev->ctx_lock); + id = idr_alloc(&cdev->ctx_idr, ctx, CDMA_CTX_START, CDMA_CTX_END, + GFP_NOWAIT); + spin_unlock(&cdev->ctx_lock); + idr_preload_end(); + + return id; +} + +struct cdma_context *cdma_find_ctx_by_handle(struct cdma_dev *cdev, int handle) +{ + struct cdma_context *ctx; + + spin_lock(&cdev->ctx_lock); + ctx = idr_find(&cdev->ctx_idr, handle); + spin_unlock(&cdev->ctx_lock); + + return ctx; +} + +static int cdma_ctx_alloc_tid(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + struct ummu_param drvdata = { .mode = MAPT_MODE_TABLE }; + int ret; + + if (ctx->is_kernel) + ctx->sva = ummu_ksva_bind_device(cdev->dev, &drvdata); + else + ctx->sva = ummu_sva_bind_device(cdev->dev, current->mm, NULL); + + if (!ctx->sva) { + dev_err(cdev->dev, "%s bind device failed.\n", + ctx->is_kernel ? "KSVA" : "SVA"); + return -EFAULT; + } + + ret = ummu_get_tid(cdev->dev, ctx->sva, &ctx->tid); + if (ret) { + dev_err(cdev->dev, "get tid failed, ret = %d.\n", ret); + if (ctx->is_kernel) + ummu_ksva_unbind_device(ctx->sva); + else + ummu_sva_unbind_device(ctx->sva); + } + + return ret; +} + +static void cdma_ctx_free_tid(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + if (ctx->is_kernel) + ummu_ksva_unbind_device(ctx->sva); + else + ummu_sva_unbind_device(ctx->sva); +} + +struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel) +{ + struct cdma_context *ctx; + int ret; + + if (!cdev) + return ERR_PTR(-EINVAL); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->handle = cdma_ctx_handle_alloc(cdev, ctx); + if (ctx->handle < 0) { + dev_err(cdev->dev, + "Alloc context handle failed, ret = %d.\n", ctx->handle); + ret = ctx->handle; + goto free_ctx; + } + + ctx->cdev = cdev; + ctx->is_kernel = is_kernel; + ret = cdma_ctx_alloc_tid(cdev, ctx); + if (ret) { + dev_err(cdev->dev, "alloc ctx tid failed, ret = %d.\n", ret); + goto free_handle; + } + + spin_lock_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->pgdir_list); + mutex_init(&ctx->pgdir_mutex); + INIT_LIST_HEAD(&ctx->queue_list); + + return ctx; + +free_handle: + cdma_ctx_handle_free(cdev, ctx); +free_ctx: + kfree(ctx); + return ERR_PTR(ret); +} + +void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + if (!cdev || !ctx) + return; + + cdma_ctx_free_tid(cdev, ctx); + cdma_ctx_handle_free(cdev, ctx); + mutex_destroy(&ctx->pgdir_mutex); + kfree(ctx); +} diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 3614be608534..8cbc980dc726 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -7,6 +7,7 @@ #include #include #include +#include #include struct cdma_context { @@ -20,10 +21,16 @@ struct cdma_context { u32 tid; bool is_kernel; atomic_t ref_cnt; + struct list_head queue_list; }; struct cdma_ctx_res { struct cdma_context *ctx; + struct xarray queue_xa; }; +struct cdma_context *cdma_find_ctx_by_handle(struct cdma_dev *cdev, int handle); +struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel); +void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx); + #endif /* CDMA_CONTEXT_H */ diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 10a4baa61e78..da730589ffd5 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -12,13 +12,27 @@ #include "cdma.h" #include "cdma_cmd.h" #include "cdma_tid.h" +#include "cdma_context.h" #include #include +#include "cdma_queue.h" #include "cdma_dev.h" static DEFINE_XARRAY(cdma_devs_tbl); static atomic_t cdma_devs_num = ATOMIC_INIT(0); +struct cdma_dev *get_cdma_dev_by_eid(u32 eid) +{ + struct cdma_dev *cdev = NULL; + unsigned long index = 0; + + xa_for_each(&cdma_devs_tbl, index, cdev) + if (cdev->eid == eid) + return cdev; + + return NULL; +} + struct xarray *get_cdma_dev_tbl(u32 *devs_num) { *devs_num = atomic_read(&cdma_devs_num); @@ -63,6 +77,40 @@ static void cdma_del_device_from_list(struct cdma_dev *cdev) xa_erase(&cdma_devs_tbl, adev->id); } +static void cdma_tbl_init(struct cdma_table *table, u32 max, u32 min) +{ + if (!max || max < min) + return; + + spin_lock_init(&table->lock); + idr_init(&table->idr_tbl.idr); + table->idr_tbl.max = max; + table->idr_tbl.min = min; + table->idr_tbl.next = min; +} + +static void cdma_tbl_destroy(struct cdma_dev *cdev, struct cdma_table *table, + const char *table_name) +{ + if (!idr_is_empty(&table->idr_tbl.idr)) + dev_err(cdev->dev, "IDR not empty in clean up %s table.\n", + table_name); + idr_destroy(&table->idr_tbl.idr); +} + +static void cdma_init_tables(struct cdma_dev *cdev) +{ + struct cdma_res *queue = &cdev->caps.queue; + + cdma_tbl_init(&cdev->queue_table, queue->start_idx + queue->max_cnt - 1, + queue->start_idx); +} + +static void cdma_destroy_tables(struct cdma_dev *cdev) +{ + cdma_tbl_destroy(cdev, &cdev->queue_table, "QUEUE"); +} + static void cdma_init_base_dev(struct cdma_dev *cdev) { struct cdma_device_attr *attr = &cdev->base.attr; @@ -102,6 +150,7 @@ static int cdma_init_dev_param(struct cdma_dev *cdev, return ret; cdma_init_base_dev(cdev); + cdma_init_tables(cdev); dev_set_drvdata(&adev->dev, cdev); @@ -120,6 +169,16 @@ static void cdma_uninit_dev_param(struct cdma_dev *cdev) mutex_destroy(&cdev->eu_mutex); mutex_destroy(&cdev->file_mutex); dev_set_drvdata(&cdev->adev->dev, NULL); + cdma_destroy_tables(cdev); +} + +static void cdma_release_table_res(struct cdma_dev *cdev) +{ + struct cdma_queue *queue; + int id; + + idr_for_each_entry(&cdev->queue_table.idr_tbl.idr, queue, id) + cdma_delete_queue(cdev, queue->id); } static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) @@ -296,6 +355,9 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) if (cdma_create_arm_db_page(cdev)) goto unregister_crq; + idr_init(&cdev->ctx_idr); + spin_lock_init(&cdev->ctx_lock); + dev_dbg(&adev->dev, "cdma.%u init succeeded.\n", adev->id); return cdev; @@ -315,11 +377,20 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) void cdma_destroy_dev(struct cdma_dev *cdev) { + struct cdma_context *tmp; + int id; + if (!cdev) return; ubase_virt_unregister(cdev->adev); + cdma_release_table_res(cdev); + + idr_for_each_entry(&cdev->ctx_idr, tmp, id) + cdma_free_context(cdev, tmp); + idr_destroy(&cdev->ctx_idr); + cdma_destroy_arm_db_page(cdev); ubase_ctrlq_unregister_crq_event(cdev->adev, UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index 73dc4077a74e..75aa96b092c7 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -27,6 +27,7 @@ enum cdma_ctrlq_eu_op { struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); void cdma_destroy_dev(struct cdma_dev *cdev); +struct cdma_dev *get_cdma_dev_by_eid(u32 eid); struct xarray *get_cdma_dev_tbl(u32 *devices_num); bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, struct eu_info *eu_out); diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 518b5802f7e1..ba49d353408c 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -7,7 +7,10 @@ #include #include "cdma.h" +#include "cdma_context.h" #include "cdma_types.h" +#include "cdma_queue.h" +#include "cdma_uobj.h" #include "cdma_ioctl.h" typedef int (*cdma_cmd_handler)(struct cdma_ioctl_hdr *hdr, @@ -53,13 +56,125 @@ static int cdma_query_dev(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) return 0; } +static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_create_context_args args = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_context *ctx; + int ret; + + if (cfile->uctx) { + dev_err(cdev->dev, "create jfae failed, ctx handle = %d.\n", + ctx->handle); + return -EEXIST; + } + + if (!hdr->args_addr || hdr->args_len < sizeof(args)) + return -EINVAL; + + ret = (int)copy_from_user(&args, (void *)hdr->args_addr, + (u32)sizeof(args)); + if (ret) { + dev_err(cdev->dev, "get user data failed, ret = %d.\n", ret); + return ret; + } + + ctx = cdma_alloc_context(cdev, false); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + args.out.cqe_size = cdev->caps.cqe_size; + args.out.dwqe_enable = + !!(cdev->caps.feature & CDMA_CAP_FEATURE_DIRECT_WQE); + cfile->uctx = ctx; + + ret = (int)copy_to_user((void *)hdr->args_addr, &args, + (u32)sizeof(args)); + if (ret) { + dev_err(cdev->dev, "copy ctx to user failed, ret = %d.\n", ret); + goto free_context; + } + + return ret; + +free_context: + cfile->uctx = NULL; + cdma_free_context(cdev, ctx); + + return ret; +} + +static int cdma_cmd_create_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) +{ + struct cdma_cmd_create_queue_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct queue_cfg cfg; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != sizeof(arg) || !cfile->uctx) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, "create queue get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + cfg = (struct queue_cfg) { + .queue_depth = arg.in.queue_depth, + .dcna = arg.in.dcna, + .priority = arg.in.priority, + .rmt_eid.dw0 = arg.in.rmt_eid, + .user_ctx = arg.in.user_ctx, + .trans_mode = arg.in.trans_mode, + }; + + uobj = cdma_uobj_create(cfile, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "create queue uobj failed.\n"); + return -ENOMEM; + } + + queue = cdma_create_queue(cdev, cfile->uctx, &cfg, 0, false); + if (!queue) { + dev_err(cdev->dev, "create queue failed.\n"); + ret = -EINVAL; + goto err_create_queue; + } + + uobj->object = queue; + arg.out.queue_id = queue->id; + arg.out.handle = uobj->id; + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, "create queue copy to user failed, ret = %d.\n", ret); + ret = -EFAULT; + goto err_copy_to_user; + } + list_add_tail(&queue->list, &cfile->uctx->queue_list); + + return 0; +err_copy_to_user: + cdma_delete_queue(cdev, queue->id); +err_create_queue: + cdma_uobj_delete(uobj); + return ret; +} + static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, + [CDMA_CMD_CREATE_CTX] = cdma_create_ucontext, + [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, }; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) { struct cdma_dev *cdev = cfile->cdev; + int ret; if (hdr->command >= CDMA_CMD_MAX || !g_cdma_cmd_handler[hdr->command]) { dev_err(cdev->dev, @@ -68,5 +183,9 @@ int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) return -EINVAL; } - return g_cdma_cmd_handler[hdr->command](hdr, cfile); + mutex_lock(&cfile->ctx_mutex); + ret = g_cdma_cmd_handler[hdr->command](hdr, cfile); + mutex_unlock(&cfile->ctx_mutex); + + return ret; } diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c new file mode 100644 index 000000000000..93ffcab5ecc6 --- /dev/null +++ b/drivers/ub/cdma/cdma_queue.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include "cdma_context.h" +#include "cdma_queue.h" +#include "cdma.h" + +struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id) +{ + struct cdma_queue *queue; + + spin_lock(&cdev->queue_table.lock); + queue = (struct cdma_queue *)idr_find(&cdev->queue_table.idr_tbl.idr, + queue_id); + spin_unlock(&cdev->queue_table.lock); + + return queue; +} + +static int cdma_alloc_queue_id(struct cdma_dev *cdev, struct cdma_queue *queue) +{ + struct cdma_table *queue_tbl = &cdev->queue_table; + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&queue_tbl->lock); + id = idr_alloc(&queue_tbl->idr_tbl.idr, queue, queue_tbl->idr_tbl.min, + queue_tbl->idr_tbl.max, GFP_NOWAIT); + if (id < 0) + dev_err(cdev->dev, "alloc queue id failed.\n"); + spin_unlock(&queue_tbl->lock); + idr_preload_end(); + + return id; +} + +static void cdma_delete_queue_id(struct cdma_dev *cdev, int queue_id) +{ + struct cdma_table *queue_tbl = &cdev->queue_table; + + spin_lock(&queue_tbl->lock); + idr_remove(&queue_tbl->idr_tbl.idr, queue_id); + spin_unlock(&queue_tbl->lock); +} + +struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, + struct cdma_context *uctx, + struct queue_cfg *cfg, u32 eid_index, + bool is_kernel) +{ + struct cdma_queue *queue; + int id; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return NULL; + + id = cdma_alloc_queue_id(cdev, queue); + if (id < 0) { + kfree(queue); + return NULL; + } + + queue->ctx = uctx; + queue->id = id; + queue->cfg = *cfg; + + if (is_kernel) + queue->is_kernel = true; + + return queue; +} + +int cdma_delete_queue(struct cdma_dev *cdev, u32 queue_id) +{ + struct cdma_queue *queue; + + if (!cdev) + return -EINVAL; + + if (queue_id >= cdev->caps.queue.start_idx + cdev->caps.queue.max_cnt) { + dev_err(cdev->dev, + "queue id invalid, queue_id = %u, start_idx = %u, max_cnt = %u.\n", + queue_id, cdev->caps.queue.start_idx, + cdev->caps.queue.max_cnt); + return -EINVAL; + } + + queue = cdma_find_queue(cdev, queue_id); + if (!queue) { + dev_err(cdev->dev, "get queue from table failed, id = %u.\n", + queue_id); + return -EINVAL; + } + + cdma_delete_queue_id(cdev, queue_id); + + kfree(queue); + + return 0; +} diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h new file mode 100644 index 000000000000..0299a026a14b --- /dev/null +++ b/drivers/ub/cdma/cdma_queue.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_QUEUE_H__ +#define __CDMA_QUEUE_H__ + +struct cdma_dev; +struct cdma_context; +struct queue_cfg; + +struct cdma_queue { + struct cdma_context *ctx; + u32 id; + struct queue_cfg cfg; + bool is_kernel; + struct list_head list; +}; + +struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id); +struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, + struct cdma_context *uctx, + struct queue_cfg *cfg, u32 eid_index, + bool is_kernel); +int cdma_delete_queue(struct cdma_dev *cdev, u32 queue_id); + +#endif diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 4ef38c23e22c..9f3af5c06bbe 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -13,6 +13,10 @@ struct cdma_dev; struct cdma_file { struct cdma_dev *cdev; struct list_head list; + struct mutex ctx_mutex; + struct cdma_context *uctx; + struct idr idr; + spinlock_t idr_lock; struct kref ref; }; diff --git a/drivers/ub/cdma/cdma_uobj.c b/drivers/ub/cdma/cdma_uobj.c new file mode 100644 index 000000000000..3e6e1f9ad1b6 --- /dev/null +++ b/drivers/ub/cdma/cdma_uobj.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#include +#include "cdma_uobj.h" +#include "cdma_chardev.h" + +static int cdma_uobj_alloc_idr(struct cdma_uobj *uobj) +{ + int ret; + + idr_preload(GFP_KERNEL); + spin_lock(&uobj->cfile->idr_lock); + + ret = idr_alloc(&uobj->cfile->idr, uobj, 0, U32_MAX, GFP_NOWAIT); + if (ret >= 0) + uobj->id = ret; + + spin_unlock(&uobj->cfile->idr_lock); + idr_preload_end(); + + return ret < 0 ? ret : 0; +} + +static inline void cdma_uobj_remove_idr(struct cdma_uobj *uobj) +{ + spin_lock(&uobj->cfile->idr_lock); + idr_remove(&uobj->cfile->idr, uobj->id); + spin_unlock(&uobj->cfile->idr_lock); +} + +static struct cdma_uobj *cdma_uobj_alloc(struct cdma_file *cfile, + enum UOBJ_TYPE obj_type) +{ + struct cdma_uobj *uobj; + + uobj = kzalloc(sizeof(*uobj), GFP_KERNEL); + if (uobj == NULL) + return ERR_PTR(-ENOMEM); + + atomic_set(&uobj->rcnt, 0); + uobj->cfile = cfile; + uobj->type = obj_type; + + return uobj; +} + +static inline void cdma_uobj_free(struct cdma_uobj *uobj) +{ + kfree(uobj); +} + +static inline void cdma_uobj_remove(struct cdma_uobj *uobj) +{ + idr_remove(&uobj->cfile->idr, uobj->id); + cdma_uobj_free(uobj); +} + +void cdma_init_uobj_idr(struct cdma_file *cfile) +{ + idr_init(&cfile->idr); + spin_lock_init(&cfile->idr_lock); +} + +struct cdma_uobj *cdma_uobj_create(struct cdma_file *cfile, + enum UOBJ_TYPE obj_type) +{ + struct cdma_uobj *uobj; + int ret; + + uobj = cdma_uobj_alloc(cfile, obj_type); + if (IS_ERR(uobj)) + return uobj; + + ret = cdma_uobj_alloc_idr(uobj); + if (ret) + goto err_free_uobj; + + return uobj; + +err_free_uobj: + cdma_uobj_free(uobj); + + return ERR_PTR(ret); +} + +void cdma_uobj_delete(struct cdma_uobj *uobj) +{ + cdma_uobj_remove_idr(uobj); + cdma_uobj_free(uobj); +} + +struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, + enum UOBJ_TYPE obj_type) +{ + struct cdma_uobj *uobj; + + spin_lock(&cfile->idr_lock); + uobj = idr_find(&cfile->idr, id); + if (uobj == NULL || uobj->type != obj_type) + uobj = ERR_PTR(-ENOENT); + spin_unlock(&cfile->idr_lock); + + return uobj; +} + +void cdma_cleanup_context_uobj(struct cdma_file *cfile) +{ + struct cdma_uobj *uobj; + int id; + + spin_lock(&cfile->idr_lock); + idr_for_each_entry(&cfile->idr, uobj, id) + cdma_uobj_remove(uobj); + spin_unlock(&cfile->idr_lock); +} + +void cdma_close_uobj_fd(struct cdma_file *cfile) +{ + kref_put(&cfile->ref, cdma_release_file); +} diff --git a/drivers/ub/cdma/cdma_uobj.h b/drivers/ub/cdma/cdma_uobj.h new file mode 100644 index 000000000000..505a66911960 --- /dev/null +++ b/drivers/ub/cdma/cdma_uobj.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_UOBJ_H__ +#define __CDMA_UOBJ_H__ +#include "cdma_types.h" + +enum UOBJ_TYPE { + UOBJ_TYPE_JFCE, + UOBJ_TYPE_JFC, + UOBJ_TYPE_CTP, + UOBJ_TYPE_JFS, + UOBJ_TYPE_QUEUE, + UOBJ_TYPE_SEGMENT +}; + +struct cdma_uobj { + struct cdma_file *cfile; + enum UOBJ_TYPE type; + int id; + void *object; + atomic_t rcnt; +}; + +void cdma_init_uobj_idr(struct cdma_file *cfile); +struct cdma_uobj *cdma_uobj_create(struct cdma_file *cfile, + enum UOBJ_TYPE obj_type); +void cdma_uobj_delete(struct cdma_uobj *uobj); +struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, + enum UOBJ_TYPE type); +void cdma_cleanup_context_uobj(struct cdma_file *cfile); +void cdma_close_uobj_fd(struct cdma_file *cfile); + +#endif diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index c72268f4460a..a5142e0700ab 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -12,6 +12,8 @@ enum cdma_cmd { CDMA_CMD_QUERY_DEV_INFO, + CDMA_CMD_CREATE_CTX, + CDMA_CMD_CREATE_QUEUE, CDMA_CMD_MAX }; @@ -65,4 +67,27 @@ struct cdma_cmd_query_device_attr_args { } out; }; +struct cdma_create_context_args { + struct { + __u8 cqe_size; + __u8 dwqe_enable; + int async_fd; + } out; +}; + +struct cdma_cmd_create_queue_args { + struct { + __u32 queue_depth; + __u32 dcna; + __u32 rmt_eid; + __u8 priority; + __u64 user_ctx; + __u32 trans_mode; + } in; + struct { + int queue_id; + __u64 handle; + } out; +}; + #endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 759f455581a9..5b64d1498a48 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -13,6 +13,15 @@ struct dma_device { void *private_data; }; +struct queue_cfg { + u32 queue_depth; + u8 priority; + u64 user_ctx; + u32 dcna; + struct dev_eid rmt_eid; + u32 trans_mode; +}; + struct dma_context { struct dma_device *dma_dev; u32 tid; /* data valid only in bit 0-19 */ @@ -24,4 +33,9 @@ void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); struct dma_device *dma_get_device_by_eid(struct dev_eid *eid); +int dma_create_context(struct dma_device *dma_dev); + +int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, + struct queue_cfg *cfg); + #endif -- Gitee From a91b2cf5c6b0043f063bd2a08c0ed0ff60922d87 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Thu, 28 Aug 2025 21:47:12 +0800 Subject: [PATCH 011/243] ub: cdma: support for releasing queue commit f72102db2ef32d4d05890e6d9827cd3e955655f9 openEuler This patch implements the functionality of removing user context and deleting queue APIs in the CDMA driver. The implementation includes support for the dma_delete_context and dma_free_queue interfaces. Signed-off-by: Zhipeng Lu Signed-off-by: Xinchi Ma Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 71 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_ioctl.c | 60 ++++++++++++++++++++++++++++ include/uapi/ub/cdma/cdma_abi.h | 9 +++++ include/ub/cdma/cdma_api.h | 4 ++ 4 files changed, 144 insertions(+) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 29053d5b0255..34a2d96f7c3c 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -180,6 +180,44 @@ int dma_create_context(struct dma_device *dma_dev) } EXPORT_SYMBOL_GPL(dma_create_context); +void dma_delete_context(struct dma_device *dma_dev, int handle) +{ + struct cdma_ctx_res *ctx_res; + struct cdma_context *ctx; + struct cdma_dev *cdev; + int ref_cnt; + + if (!dma_dev || !dma_dev->private_data) + return; + + cdev = get_cdma_dev_by_eid(dma_dev->attr.eid.dw0); + if (!cdev) { + pr_err("can't find cdev by eid, eid = 0x%x\n", + dma_dev->attr.eid.dw0); + return; + } + + ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; + ctx = ctx_res->ctx; + if (!ctx) { + dev_err(cdev->dev, "no context needed to be free\n"); + return; + } + + ref_cnt = atomic_read(&ctx->ref_cnt); + if (ref_cnt > 0) { + dev_warn(cdev->dev, + "context resourse is still in use, cnt = %d.\n", + ref_cnt); + return; + } + + cdma_free_context(cdev, ctx); + ctx_res->ctx = NULL; + atomic_dec(&dma_dev->ref_cnt); +} +EXPORT_SYMBOL_GPL(dma_delete_context); + int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cfg) { struct cdma_ctx_res *ctx_res; @@ -237,3 +275,36 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cf return ret; } EXPORT_SYMBOL_GPL(dma_alloc_queue); + +void dma_free_queue(struct dma_device *dma_dev, int queue_id) +{ + struct cdma_ctx_res *ctx_res; + struct cdma_context *ctx; + struct cdma_queue *queue; + struct cdma_dev *cdev; + + if (!dma_dev || !dma_dev->private_data) + return; + + cdev = get_cdma_dev_by_eid(dma_dev->attr.eid.dw0); + if (!cdev) { + pr_err("can't find cdev by eid, eid = 0x%x\n", + dma_dev->attr.eid.dw0); + return; + } + + ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; + queue = (struct cdma_queue *)xa_load(&ctx_res->queue_xa, queue_id); + if (!queue) { + dev_err(cdev->dev, "no queue found in this device, id = %d\n", + queue_id); + return; + } + xa_erase(&ctx_res->queue_xa, queue_id); + ctx = queue->ctx; + + cdma_delete_queue(cdev, queue_id); + + atomic_dec(&ctx->ref_cnt); +} +EXPORT_SYMBOL_GPL(dma_free_queue); diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index ba49d353408c..89094aaebc1f 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -105,6 +105,28 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, return ret; } +static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_dev *cdev = cfile->cdev; + + if (!cfile->uctx) { + dev_err(cdev->dev, "cdma context has not been created.\n"); + return -ENOENT; + } + if (!list_empty(&cfile->uctx->queue_list)) { + dev_err(cdev->dev, + "queue/segment is still in use, ctx handle = %d.\n", + cfile->uctx->handle); + return -EBUSY; + } + + cdma_free_context(cdev, cfile->uctx); + cfile->uctx = NULL; + + return 0; +} + static int cdma_cmd_create_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { struct cdma_cmd_create_queue_args arg = { 0 }; @@ -165,10 +187,48 @@ static int cdma_cmd_create_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c return ret; } +static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) +{ + struct cdma_cmd_delete_queue_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != sizeof(arg)) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, "delete queue get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.handle, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "get queue uobj failed, handle = %llu.\n", + arg.in.handle); + return -EINVAL; + } + + queue = (struct cdma_queue *)uobj->object; + + cdma_uobj_delete(uobj); + list_del(&queue->list); + ret = cdma_delete_queue(cdev, queue->id); + if (ret) + dev_err(cdev->dev, "delete queue failed, ret = %d.\n", ret); + + return ret; +} + static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, [CDMA_CMD_CREATE_CTX] = cdma_create_ucontext, + [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, + [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, }; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index a5142e0700ab..50bca1fab02b 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -13,7 +13,9 @@ enum cdma_cmd { CDMA_CMD_QUERY_DEV_INFO, CDMA_CMD_CREATE_CTX, + CDMA_CMD_DELETE_CTX, CDMA_CMD_CREATE_QUEUE, + CDMA_CMD_DELETE_QUEUE, CDMA_CMD_MAX }; @@ -90,4 +92,11 @@ struct cdma_cmd_create_queue_args { } out; }; +struct cdma_cmd_delete_queue_args { + struct { + __u32 queue_id; + __u64 handle; + } in; +}; + #endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 5b64d1498a48..5ebe4feebd1c 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -35,7 +35,11 @@ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid); int dma_create_context(struct dma_device *dma_dev); +void dma_delete_context(struct dma_device *dma_dev, int handle); + int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cfg); +void dma_free_queue(struct dma_device *dma_dev, int queue_id); + #endif -- Gitee From 21f36cc94c335a59b8fe9dd2fe0bb68d04d0e5d8 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 09:04:22 +0800 Subject: [PATCH 012/243] ub: cdma: support the deletion of jfc commit 9d00a496c9121ebddeb9d8278da2669952db3d4d openEuler This patch implements the deletion functionality of jfc in the CDMA driver. The implementation involves deleting the jfc corresponding to the queue during the queue release process. Signed-off-by: Zhipeng Lu Signed-off-by: Bangwei Zhang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 3 +- drivers/ub/cdma/cdma.h | 41 +++++++++ drivers/ub/cdma/cdma_common.c | 51 +++++++++++ drivers/ub/cdma/cdma_common.h | 20 +++++ drivers/ub/cdma/cdma_db.c | 45 ++++++++++ drivers/ub/cdma/cdma_db.h | 39 +++++++++ drivers/ub/cdma/cdma_dev.c | 6 ++ drivers/ub/cdma/cdma_ioctl.c | 62 ++++++++++++++ drivers/ub/cdma/cdma_jfc.c | 145 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfc.h | 104 +++++++++++++++++++++++ drivers/ub/cdma/cdma_mbox.c | 65 ++++++++++++++ drivers/ub/cdma/cdma_mbox.h | 44 ++++++++++ drivers/ub/cdma/cdma_queue.c | 31 +++++++ drivers/ub/cdma/cdma_queue.h | 9 +- drivers/ub/cdma/cdma_types.h | 15 ++++ include/uapi/ub/cdma/cdma_abi.h | 58 +++++++++++++ 16 files changed, 736 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/cdma/cdma_common.c create mode 100644 drivers/ub/cdma/cdma_common.h create mode 100644 drivers/ub/cdma/cdma_db.c create mode 100644 drivers/ub/cdma/cdma_db.h create mode 100644 drivers/ub/cdma/cdma_jfc.c create mode 100644 drivers/ub/cdma/cdma_jfc.h create mode 100644 drivers/ub/cdma/cdma_mbox.c create mode 100644 drivers/ub/cdma/cdma_mbox.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 8c536d7a26c7..5bb71587ed6d 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0+ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ - cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o + cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ + cdma_db.o cdma_mbox.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index c527a1bee1af..d43fc7be17d1 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -11,6 +11,8 @@ #include +extern u32 jfc_arm_mode; + #define CDMA_RESET_WAIT_TIME 3000 #define CDMA_MAX_SL_NUM 16 @@ -107,6 +109,44 @@ struct cdma_chardev { dev_t devno; }; +union cdma_umem_flag { + struct { + u32 non_pin : 1; /* 0: pinned to physical memory. 1: non pin. */ + u32 writable : 1; /* 0: read-only. 1: writable. */ + u32 reserved : 30; + } bs; + u32 value; +}; + +struct cdma_umem { + struct mm_struct *owning_mm; + union cdma_umem_flag flag; + struct sg_table sg_head; + struct cdma_dev *dev; + + u64 length; + u32 nmap; + u64 va; +}; + +struct cdma_buf { + dma_addr_t addr; /* pass to hw */ + union { + void *kva; /* used for kernel mode */ + struct iova_slot *slot; + void *kva_or_slot; + }; + void *aligned_va; + struct cdma_umem *umem; + u32 entry_cnt_mask; + u32 entry_cnt_mask_ilog2; + u32 entry_size; + u32 entry_cnt; + u32 cnt_per_page_shift; + struct xarray id_table_xa; + struct mutex id_table_mutex; +}; + struct cdma_dev { struct dma_device base; struct device *dev; @@ -134,6 +174,7 @@ struct cdma_dev { struct list_head db_page; struct cdma_table queue_table; + struct cdma_table jfc_table; struct mutex file_mutex; struct list_head file_list; struct page *arm_db_page; diff --git a/drivers/ub/cdma/cdma_common.c b/drivers/ub/cdma/cdma_common.c new file mode 100644 index 000000000000..291231eab627 --- /dev/null +++ b/drivers/ub/cdma/cdma_common.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include +#include +#include "cdma_common.h" +#include "cdma.h" + +static void cdma_unpin_pages(struct cdma_umem *umem, u64 nents, bool is_kernel) +{ + struct scatterlist *sg; + struct page *page; + u32 i; + + for_each_sg(umem->sg_head.sgl, sg, nents, i) { + page = sg_page(sg); + + if (is_kernel) + put_page(page); + else + unpin_user_page(page); + } +} + +void cdma_umem_release(struct cdma_umem *umem, bool is_kernel) +{ + if (IS_ERR_OR_NULL(umem)) + return; + + cdma_unpin_pages(umem, umem->sg_head.nents, is_kernel); + sg_free_table(&umem->sg_head); + kfree(umem); +} + +void cdma_k_free_buf(struct cdma_dev *cdev, size_t memory_size, + struct cdma_buf *buf) +{ + cdma_umem_release(buf->umem, true); + vfree(buf->aligned_va); + buf->aligned_va = NULL; + buf->kva = NULL; + buf->addr = 0; +} + +void cdma_unpin_queue_addr(struct cdma_umem *umem) +{ + cdma_umem_release(umem, false); +} diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h new file mode 100644 index 000000000000..644868418bf5 --- /dev/null +++ b/drivers/ub/cdma/cdma_common.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_COMMON_H__ +#define __CDMA_COMMON_H__ + +#include + +struct cdma_umem; +struct cdma_dev; +struct cdma_buf; + +void cdma_umem_release(struct cdma_umem *umem, bool is_kernel); + +void cdma_k_free_buf(struct cdma_dev *cdev, size_t memory_size, + struct cdma_buf *buf); + +void cdma_unpin_queue_addr(struct cdma_umem *umem); + +#endif diff --git a/drivers/ub/cdma/cdma_db.c b/drivers/ub/cdma/cdma_db.c new file mode 100644 index 000000000000..e1c39e612bf4 --- /dev/null +++ b/drivers/ub/cdma/cdma_db.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include "cdma_common.h" +#include "cdma_context.h" +#include "cdma_db.h" + +static void cdma_free_db_page(struct cdma_dev *cdev, struct cdma_sw_db *db) +{ + cdma_k_free_buf(cdev, PAGE_SIZE, &db->kpage->db_buf); + bitmap_free(db->kpage->bitmap); + kfree(db->kpage); + db->kpage = NULL; +} + +void cdma_unpin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db) +{ + mutex_lock(&ctx->pgdir_mutex); + + if (refcount_dec_and_test(&db->page->refcount)) { + list_del(&db->page->list); + cdma_umem_release(db->page->umem, false); + kfree(db->page); + db->page = NULL; + } + + mutex_unlock(&ctx->pgdir_mutex); +} + +void cdma_free_sw_db(struct cdma_dev *cdev, struct cdma_sw_db *db) +{ + mutex_lock(&cdev->db_mutex); + + set_bit(db->index, db->kpage->bitmap); + + if (bitmap_full(db->kpage->bitmap, db->kpage->num_db)) { + list_del(&db->kpage->list); + cdma_free_db_page(cdev, db); + } + + mutex_unlock(&cdev->db_mutex); +} diff --git a/drivers/ub/cdma/cdma_db.h b/drivers/ub/cdma/cdma_db.h new file mode 100644 index 000000000000..5337b41d4a78 --- /dev/null +++ b/drivers/ub/cdma/cdma_db.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_DB_H__ +#define __CDMA_DB_H__ + +#include "cdma.h" + +struct cdma_context; + +struct cdma_sw_db_page { + struct list_head list; + struct cdma_umem *umem; + u64 user_virt; + refcount_t refcount; +}; + +struct cdma_k_sw_db_page { + struct list_head list; + u32 num_db; + unsigned long *bitmap; + struct cdma_buf db_buf; +}; + +struct cdma_sw_db { + union { + struct cdma_sw_db_page *page; + struct cdma_k_sw_db_page *kpage; + }; + u32 index; + u64 db_addr; + u32 *db_record; +}; + +void cdma_unpin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db); + +void cdma_free_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); + +#endif /* CDMA_DB_H */ diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index da730589ffd5..69eb70d47ae6 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -15,6 +15,7 @@ #include "cdma_context.h" #include #include +#include "cdma_jfc.h" #include "cdma_queue.h" #include "cdma_dev.h" @@ -108,6 +109,7 @@ static void cdma_init_tables(struct cdma_dev *cdev) static void cdma_destroy_tables(struct cdma_dev *cdev) { + cdma_tbl_destroy(cdev, &cdev->jfc_table, "JFC"); cdma_tbl_destroy(cdev, &cdev->queue_table, "QUEUE"); } @@ -175,8 +177,12 @@ static void cdma_uninit_dev_param(struct cdma_dev *cdev) static void cdma_release_table_res(struct cdma_dev *cdev) { struct cdma_queue *queue; + struct cdma_jfc *jfc; int id; + idr_for_each_entry(&cdev->jfc_table.idr_tbl.idr, jfc, id) + cdma_delete_jfc(cdev, jfc->jfcn, NULL); + idr_for_each_entry(&cdev->queue_table.idr_tbl.idr, queue, id) cdma_delete_queue(cdev, queue->id); } diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 89094aaebc1f..310d0fb6add4 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -10,6 +10,7 @@ #include "cdma_context.h" #include "cdma_types.h" #include "cdma_queue.h" +#include "cdma_jfc.h" #include "cdma_uobj.h" #include "cdma_ioctl.h" @@ -213,6 +214,10 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c } queue = (struct cdma_queue *)uobj->object; + if (queue->jfc) { + dev_err(cdev->dev, "jfc is still in use."); + return -EBUSY; + } cdma_uobj_delete(uobj); list_del(&queue->list); @@ -223,12 +228,69 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c return ret; } +static int cdma_cmd_delete_jfc(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_delete_jfc_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_base_jfc *base_jfc; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != (u32)sizeof(arg)) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, "get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.queue_id, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, + "delete jfc, get queue uobj failed, queue id = %u.\n", + arg.in.queue_id); + return -EINVAL; + } + queue = (struct cdma_queue *)uobj->object; + + uobj = cdma_uobj_get(cfile, arg.in.handle, UOBJ_TYPE_JFC); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "get jfc uobj failed.\n"); + return -EINVAL; + } + + base_jfc = (struct cdma_base_jfc *)uobj->object; + ret = cdma_delete_jfc(cdev, base_jfc->id, &arg); + if (ret) { + dev_err(cdev->dev, "cdma delete jfc failed, ret = %d.\n", ret); + return -EFAULT; + } + + cdma_set_queue_res(cdev, queue, QUEUE_RES_JFC, NULL); + cdma_uobj_delete(uobj); + + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, + "delete jfc copy to user data failed, ret = %d.\n", + ret); + return -EFAULT; + } + + return 0; +} + static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, [CDMA_CMD_CREATE_CTX] = cdma_create_ucontext, [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, + [CDMA_CMD_DELETE_JFC] = cdma_cmd_delete_jfc, }; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c new file mode 100644 index 000000000000..83f55462f297 --- /dev/null +++ b/drivers/ub/cdma/cdma_jfc.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include "cdma_cmd.h" +#include "cdma_mbox.h" +#include "cdma_common.h" +#include "cdma_db.h" +#include "cdma_jfc.h" + +static void cdma_jfc_id_free(struct cdma_dev *cdev, u32 jfcn) +{ + struct cdma_table *jfc_tbl = &cdev->jfc_table; + unsigned long flags; + + spin_lock_irqsave(&jfc_tbl->lock, flags); + idr_remove(&jfc_tbl->idr_tbl.idr, jfcn); + spin_unlock_irqrestore(&jfc_tbl->lock, flags); +} + +static struct cdma_jfc *cdma_id_find_jfc(struct cdma_dev *cdev, u32 jfcn) +{ + struct cdma_table *jfc_tbl = &cdev->jfc_table; + struct cdma_jfc *jfc; + unsigned long flags; + + spin_lock_irqsave(&jfc_tbl->lock, flags); + jfc = idr_find(&jfc_tbl->idr_tbl.idr, jfcn); + if (!jfc) + dev_err(cdev->dev, "find jfc failed, id = %u.\n", jfcn); + spin_unlock_irqrestore(&jfc_tbl->lock, flags); + + return jfc; +} + +static void cdma_free_jfc_buf(struct cdma_dev *cdev, struct cdma_jfc *jfc) +{ + u32 size; + + if (!jfc->buf.kva) { + cdma_unpin_sw_db(jfc->base.ctx, &jfc->db); + cdma_unpin_queue_addr(jfc->buf.umem); + } else { + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + cdma_k_free_buf(cdev, size, &jfc->buf); + cdma_free_sw_db(cdev, &jfc->db); + } +} + +static int cdma_query_jfc_destroy_done(struct cdma_dev *cdev, uint32_t jfcn) +{ + struct ubase_mbx_attr attr = { 0 }; + struct ubase_cmd_mailbox *mailbox; + struct cdma_jfc_ctx *jfc_ctx; + int ret; + + cdma_fill_mbx_attr(&attr, jfcn, CDMA_CMD_QUERY_JFC_CONTEXT, 0); + mailbox = cdma_mailbox_query_ctx(cdev, &attr); + if (!mailbox) + return -ENOMEM; + + jfc_ctx = mailbox->buf; + ret = jfc_ctx->pi == jfc_ctx->wr_cqe_idx ? 0 : -EAGAIN; + + cdma_free_cmd_mailbox(cdev, mailbox); + + return ret; +} + +static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, u32 jfcn) +{ +#define QUERY_MAX_TIMES 5 + u32 wait_times = 0; + int ret; + + ret = cdma_post_destroy_jfc_mbox(cdev, jfcn, CDMA_JFC_STATE_INVALID); + if (ret) { + dev_err(cdev->dev, "post mbox to destroy jfc failed, id: %u.\n", jfcn); + return ret; + } + + while (true) { + if (!cdma_query_jfc_destroy_done(cdev, jfcn)) + return 0; + if (wait_times > QUERY_MAX_TIMES) + break; + msleep(1 << wait_times); + wait_times++; + } + dev_err(cdev->dev, "jfc flush time out, id = %u.\n", jfcn); + + return -ETIMEDOUT; +} + +int cdma_post_destroy_jfc_mbox(struct cdma_dev *cdev, u32 jfcn, + enum cdma_jfc_state state) +{ + struct ubase_mbx_attr attr = { 0 }; + struct cdma_jfc_ctx ctx = { 0 }; + + ctx.state = state; + cdma_fill_mbx_attr(&attr, jfcn, CDMA_CMD_DESTROY_JFC_CONTEXT, 0); + + return cdma_post_mailbox_ctx(cdev, (void *)&ctx, sizeof(ctx), &attr); +} + +int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, + struct cdma_cmd_delete_jfc_args *arg) +{ + struct cdma_jfc *jfc; + int ret; + + if (!cdev) + return -EINVAL; + + if (jfcn >= cdev->caps.jfc.max_cnt + cdev->caps.jfc.start_idx || + jfcn < cdev->caps.jfc.start_idx) { + dev_err(cdev->dev, + "jfc id invalid, jfcn = %u, start_idx = %u, max_cnt = %u.\n", + jfcn, cdev->caps.jfc.start_idx, + cdev->caps.jfc.max_cnt); + return -EINVAL; + } + + jfc = cdma_id_find_jfc(cdev, jfcn); + if (!jfc) { + dev_err(cdev->dev, "find jfc failed, jfcn = %u.\n", jfcn); + return -EINVAL; + } + + ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); + if (ret) + dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); + + cdma_free_jfc_buf(cdev, jfc); + cdma_jfc_id_free(cdev, jfc->jfcn); + + pr_debug("Leave %s, jfcn: %u.\n", __func__, jfc->jfcn); + + kfree(jfc); + + return 0; +} diff --git a/drivers/ub/cdma/cdma_jfc.h b/drivers/ub/cdma/cdma_jfc.h new file mode 100644 index 000000000000..28144b317774 --- /dev/null +++ b/drivers/ub/cdma/cdma_jfc.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_JFC_H__ +#define __CDMA_JFC_H__ + +#include "cdma_types.h" +#include "cdma_db.h" + +enum cdma_jfc_state { + CDMA_JFC_STATE_INVALID, + CDMA_JFC_STATE_VALID, + CDMA_JFC_STATE_ERROR +}; + +struct cdma_jfc { + struct cdma_base_jfc base; + u32 jfcn; + u32 ceqn; + u32 tid; + struct cdma_buf buf; + struct cdma_sw_db db; + u32 ci; + u32 arm_sn; + spinlock_t lock; + u32 mode; +}; + +struct cdma_jfc_ctx { + /* DW0 */ + u32 state : 2; + u32 arm_st : 2; + u32 shift : 4; + u32 cqe_size : 1; + u32 record_db_en : 1; + u32 jfc_type : 1; + u32 inline_en : 1; + u32 cqe_va_l : 20; + /* DW1 */ + u32 cqe_va_h; + /* DW2 */ + u32 cqe_token_id : 20; + u32 cq_cnt_mode : 1; + u32 rsv0 : 3; + u32 ceqn : 8; + /* DW3 */ + u32 cqe_token_value : 24; + u32 rsv1 : 8; + /* DW4 */ + u32 pi : 22; + u32 cqe_coalesce_cnt : 10; + /* DW5 */ + u32 ci : 22; + u32 cqe_coalesce_period : 3; + u32 rsv2 : 7; + /* DW6 */ + u32 record_db_addr_l; + /* DW7 */ + u32 record_db_addr_h : 26; + u32 rsv3 : 6; + /* DW8 */ + u32 push_usi_en : 1; + u32 push_cqe_en : 1; + u32 token_en : 1; + u32 rsv4 : 9; + u32 tpn : 20; + /* DW9 ~ DW12 */ + u32 rmt_eid[4]; + /* DW13 */ + u32 seid_idx : 10; + u32 rmt_token_id : 20; + u32 rsv5 : 2; + /* DW14 */ + u32 remote_token_value; + /* DW15 */ + u32 int_vector : 16; + u32 stars_en : 1; + u32 rsv6 : 15; + /* DW16 */ + u32 poll : 1; + u32 cqe_report_timer : 24; + u32 se : 1; + u32 arm_sn : 2; + u32 rsv7 : 4; + /* DW17 */ + u32 se_cqe_idx : 24; + u32 rsv8 : 8; + /* DW18 */ + u32 wr_cqe_idx : 22; + u32 rsv9 : 10; + /* DW19 */ + u32 cqe_cnt : 24; + u32 rsv10 : 8; + /* DW20 ~ DW31 */ + u32 rsv11[12]; +}; + +int cdma_post_destroy_jfc_mbox(struct cdma_dev *cdev, u32 jfcn, + enum cdma_jfc_state state); + +int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, + struct cdma_cmd_delete_jfc_args *arg); + +#endif /* CDMA_JFC_H */ diff --git a/drivers/ub/cdma/cdma_mbox.c b/drivers/ub/cdma/cdma_mbox.c new file mode 100644 index 000000000000..194eba8a920d --- /dev/null +++ b/drivers/ub/cdma/cdma_mbox.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include "cdma_mbox.h" + +static int cdma_post_mailbox(struct cdma_dev *cdev, struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + int ret; + + ret = ubase_hw_upgrade_ctx_ex(cdev->adev, attr, mailbox); + if (ret) + dev_err(cdev->dev, + "send mailbox err, tag = 0x%x, op = %u, mbx_ue_id = %u.\n", + attr->tag, attr->op, attr->mbx_ue_id); + + return ret; +} + +int cdma_post_mailbox_ctx(struct cdma_dev *cdev, void *ctx, u32 size, + struct ubase_mbx_attr *attr) +{ + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = cdma_alloc_cmd_mailbox(cdev); + if (!mailbox) { + dev_err(cdev->dev, "alloc mailbox failed, opcode = %u.\n", + attr->op); + return -ENOMEM; + } + + if (ctx && size) + memcpy(mailbox->buf, ctx, size); + + ret = cdma_post_mailbox(cdev, attr, mailbox); + + cdma_free_cmd_mailbox(cdev, mailbox); + + return ret; +} + +struct ubase_cmd_mailbox *cdma_mailbox_query_ctx(struct cdma_dev *cdev, + struct ubase_mbx_attr *attr) +{ + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = cdma_alloc_cmd_mailbox(cdev); + if (!mailbox) { + dev_err(cdev->dev, "alloc mailbox failed, opcode = %u.\n", + attr->op); + return NULL; + } + + ret = cdma_post_mailbox(cdev, attr, mailbox); + if (ret) { + cdma_free_cmd_mailbox(cdev, mailbox); + return NULL; + } + + return mailbox; +} diff --git a/drivers/ub/cdma/cdma_mbox.h b/drivers/ub/cdma/cdma_mbox.h new file mode 100644 index 000000000000..0841ea606bbd --- /dev/null +++ b/drivers/ub/cdma/cdma_mbox.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_MBOX_H__ +#define __CDMA_MBOX_H__ + +#include "cdma.h" +#include + +enum { + /* JFC CMDS */ + CDMA_CMD_WRITE_JFC_CONTEXT_VA = 0x20, + CDMA_CMD_READ_JFC_CONTEXT_VA = 0x21, + CDMA_CMD_DESTROY_JFC_CONTEXT_VA = 0x22, + CDMA_CMD_CREATE_JFC_CONTEXT = 0x24, + CDMA_CMD_MODIFY_JFC_CONTEXT = 0x25, + CDMA_CMD_QUERY_JFC_CONTEXT = 0x26, + CDMA_CMD_DESTROY_JFC_CONTEXT = 0x27, +}; + +/* The mailbox operation is as follows: */ +static inline void cdma_fill_mbx_attr(struct ubase_mbx_attr *attr, u32 tag, + u8 op, u8 mbx_ue_id) +{ + ubase_fill_mbx_attr(attr, tag, op, mbx_ue_id); +} + +static inline struct ubase_cmd_mailbox *cdma_alloc_cmd_mailbox(struct cdma_dev *cdev) +{ + return ubase_alloc_cmd_mailbox(cdev->adev); +} + +static inline void cdma_free_cmd_mailbox(struct cdma_dev *cdev, + struct ubase_cmd_mailbox *mailbox) +{ + ubase_free_cmd_mailbox(cdev->adev, mailbox); +} + +int cdma_post_mailbox_ctx(struct cdma_dev *cdev, void *ctx, u32 size, + struct ubase_mbx_attr *attr); +struct ubase_cmd_mailbox *cdma_mailbox_query_ctx(struct cdma_dev *cdev, + struct ubase_mbx_attr *attr); + +#endif /* CDMA_MBOX_H */ diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index 93ffcab5ecc6..a25de76cdb4e 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -3,7 +3,9 @@ #define dev_fmt(fmt) "CDMA: " fmt +#include "cdma_common.h" #include "cdma_context.h" +#include "cdma_jfc.h" #include "cdma_queue.h" #include "cdma.h" @@ -19,6 +21,13 @@ struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id) return queue; } +static void cdma_delete_queue_res(struct cdma_dev *cdev, + struct cdma_queue *queue) +{ + cdma_delete_jfc(cdev, queue->jfc->id, NULL); + queue->jfc = NULL; +} + static int cdma_alloc_queue_id(struct cdma_dev *cdev, struct cdma_queue *queue) { struct cdma_table *queue_tbl = &cdev->queue_table; @@ -97,7 +106,29 @@ int cdma_delete_queue(struct cdma_dev *cdev, u32 queue_id) cdma_delete_queue_id(cdev, queue_id); + if (queue->is_kernel) + cdma_delete_queue_res(cdev, queue); kfree(queue); return 0; } + +void cdma_set_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue, + enum cdma_queue_res_type type, void *res) +{ + dev_dbg(cdev->dev, + "set queue %u resource type = %u, null flag = %u.\n", + queue->id, type, res == NULL); + + spin_lock(&cdev->queue_table.lock); + switch (type) { + case QUEUE_RES_JFC: + queue->jfc = res; + if (queue->jfc) + queue->jfc_id = queue->jfc->id; + break; + default: + break; + } + spin_unlock(&cdev->queue_table.lock); +} diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index 0299a026a14b..3808c23a1934 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -8,12 +8,18 @@ struct cdma_dev; struct cdma_context; struct queue_cfg; +enum cdma_queue_res_type { + QUEUE_RES_JFC +}; + struct cdma_queue { + struct cdma_base_jfc *jfc; struct cdma_context *ctx; u32 id; struct queue_cfg cfg; bool is_kernel; struct list_head list; + u32 jfc_id; }; struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id); @@ -22,5 +28,6 @@ struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, struct queue_cfg *cfg, u32 eid_index, bool is_kernel); int cdma_delete_queue(struct cdma_dev *cdev, u32 queue_id); - +void cdma_set_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue, + enum cdma_queue_res_type type, void *res); #endif diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 9f3af5c06bbe..f8f97a9f9f6d 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -10,6 +10,21 @@ struct cdma_dev; +struct cdma_jfc_cfg { + u32 depth; + u32 ceqn; + u32 queue_id; +}; + +struct cdma_base_jfc { + struct cdma_dev *dev; + struct cdma_context *ctx; + struct cdma_jfc_cfg jfc_cfg; + u32 id; + struct hlist_node hnode; + atomic_t use_cnt; +}; + struct cdma_file { struct cdma_dev *cdev; struct list_head list; diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 50bca1fab02b..da4aefa119d8 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -10,12 +10,21 @@ #define CDMA_IOC_MAGIC 'C' #define CDMA_SYNC _IOWR(CDMA_IOC_MAGIC, 0, struct cdma_ioctl_hdr) +#define MAP_COMMAND_MASK 0xff + +enum db_mmap_type { + CDMA_MMAP_JFC_PAGE, + CDMA_MMAP_JETTY_DSQE +}; + enum cdma_cmd { CDMA_CMD_QUERY_DEV_INFO, CDMA_CMD_CREATE_CTX, CDMA_CMD_DELETE_CTX, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, + CDMA_CMD_CREATE_JFC, + CDMA_CMD_DELETE_JFC, CDMA_CMD_MAX }; @@ -25,6 +34,37 @@ struct cdma_ioctl_hdr { __u64 args_addr; }; +struct cdma_cmd_udrv_priv { + __u64 in_addr; + __u32 in_len; + __u64 out_addr; + __u32 out_len; +}; + +struct cdma_cmd_create_jfc_args { + struct { + __u32 depth; /* in terms of CQEBB */ + int jfce_fd; + int jfce_id; + __u32 ceqn; + __u32 queue_id; + } in; + struct { + __u32 id; + __u32 depth; + __u64 handle; /* handle of the allocated jfc obj in kernel */ + } out; + struct cdma_cmd_udrv_priv udata; +}; + +struct cdma_cmd_delete_jfc_args { + struct { + __u32 jfcn; + __u64 handle; /* handle of jfc */ + __u32 queue_id; + } in; +}; + struct dev_eid { __u32 dw0; __u32 dw1; @@ -77,6 +117,24 @@ struct cdma_create_context_args { } out; }; +struct cdma_jfc_db { + __u32 ci : 24; + __u32 notify : 1; + __u32 arm_sn : 2; + __u32 type : 1; + __u32 rsv1 : 4; + __u32 jfcn : 20; + __u32 rsv2 : 12; +}; + +struct cdma_create_jfc_ucmd { + __u64 buf_addr; + __u32 buf_len; + __u64 db_addr; + __u32 mode; + __u32 tid; +}; + struct cdma_cmd_create_queue_args { struct { __u32 queue_depth; -- Gitee From dea3b64bfab29aa296ec378acf33b08f014618e2 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 09:54:00 +0800 Subject: [PATCH 013/243] ub: cdma: support the creation of jfc commit 681171cd155e62f4a64ef5e8612819a049d88478 openEuler This patch implements the creation functionality of jfc in the CDMA driver. The implementation involves creating the jfc corresponding to the queue during the queue creation process. Signed-off-by: Zhipeng Lu Signed-off-by: Bangwei Zhang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma.h | 10 ++ drivers/ub/cdma/cdma_chardev.c | 58 ++++++++ drivers/ub/cdma/cdma_common.c | 236 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 26 +++- drivers/ub/cdma/cdma_db.c | 123 +++++++++++++++++ drivers/ub/cdma/cdma_db.h | 7 +- drivers/ub/cdma/cdma_dev.c | 3 + drivers/ub/cdma/cdma_dev.h | 2 - drivers/ub/cdma/cdma_ioctl.c | 73 ++++++++++ drivers/ub/cdma/cdma_jfc.c | 239 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfc.h | 39 ++++++ drivers/ub/cdma/cdma_main.c | 4 + drivers/ub/cdma/cdma_mbox.h | 4 - drivers/ub/cdma/cdma_queue.c | 40 +++++- drivers/ub/cdma/cdma_types.h | 12 ++ 15 files changed, 863 insertions(+), 13 deletions(-) diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index d43fc7be17d1..b379a3f43884 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -12,12 +12,22 @@ #include extern u32 jfc_arm_mode; +extern bool cqe_mode; +#define CDMA_HW_PAGE_SHIFT 12 +#define CDMA_HW_PAGE_SIZE (1 << CDMA_HW_PAGE_SHIFT) + +#define CDMA_DEFAULT_CQE_SIZE 128 #define CDMA_RESET_WAIT_TIME 3000 #define CDMA_MAX_SL_NUM 16 #define CDMA_UPI_MASK 0x7FFF +enum cdma_cqe_size { + CDMA_64_CQE_SIZE, + CDMA_128_CQE_SIZE, +}; + enum cdma_status { CDMA_NORMAL, CDMA_SUSPEND, diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index 9cd785cc6d41..b3a8b75b019e 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -32,6 +32,11 @@ static void cdma_num_free(struct cdma_dev *cdev) spin_unlock(&cdma_num_mg.lock); } +static inline int cdma_get_mmap_cmd(struct vm_area_struct *vma) +{ + return (vma->vm_pgoff & MAP_COMMAND_MASK); +} + static int cdma_num_alloc(struct cdma_dev *cdev) { #define CDMA_START 0 @@ -69,6 +74,58 @@ static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -ENOIOCTLCMD; } +static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct *vma) +{ +#define JFC_DB_UNMAP_BOUND 1 + struct cdma_dev *cdev = cfile->cdev; + resource_size_t db_addr; + u32 cmd; + + db_addr = cdev->db_base; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + cmd = cdma_get_mmap_cmd(vma); + switch (cmd) { + case CDMA_MMAP_JFC_PAGE: + if (io_remap_pfn_range(vma, vma->vm_start, + jfc_arm_mode > JFC_DB_UNMAP_BOUND ? + (uint64_t)db_addr >> PAGE_SHIFT : + page_to_pfn(cdev->arm_db_page), + PAGE_SIZE, vma->vm_page_prot)) { + dev_err(cdev->dev, "remap jfc page fail.\n"); + return -EAGAIN; + } + break; + default: + dev_err(cdev->dev, + "mmap failed, cmd(%u) is not supported.\n", cmd); + return -EINVAL; + } + + return 0; +} + +static int cdma_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct cdma_file *cfile = (struct cdma_file *)file->private_data; + int ret; + + if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { + pr_err("mmap failed, expect vm area size to be an integer multiple of page size.\n"); + return -EINVAL; + } + + mutex_lock(&cfile->ctx_mutex); + ret = cdma_remap_pfn_range(cfile, vma); + if (ret) { + mutex_unlock(&cfile->ctx_mutex); + return ret; + } + mutex_unlock(&cfile->ctx_mutex); + + return 0; +} + static int cdma_open(struct inode *inode, struct file *file) { struct cdma_chardev *chardev; @@ -121,6 +178,7 @@ static int cdma_close(struct inode *inode, struct file *file) static const struct file_operations cdma_ops = { .owner = THIS_MODULE, .unlocked_ioctl = cdma_ioctl, + .mmap = cdma_mmap, .open = cdma_open, .release = cdma_close, }; diff --git a/drivers/ub/cdma/cdma_common.c b/drivers/ub/cdma/cdma_common.c index 291231eab627..62ae5f8be48c 100644 --- a/drivers/ub/cdma/cdma_common.c +++ b/drivers/ub/cdma/cdma_common.c @@ -9,6 +9,97 @@ #include "cdma_common.h" #include "cdma.h" +static inline void cdma_fill_umem(struct cdma_umem *umem, + struct cdma_umem_param *param) +{ + umem->dev = param->dev; + umem->va = param->va; + umem->length = param->len; + umem->flag = param->flag; +} + +static int cdma_pin_part_of_pages(u64 cur_base, u64 npages, u32 gup_flags, + struct page **page_list) +{ + /* + * page_list size is 4kB, the nr_pages should not larger than + * PAGE_SIZE / sizeof(struct page *) + */ + return pin_user_pages_fast(cur_base, + min_t(unsigned long, (unsigned long)npages, + PAGE_SIZE / sizeof(struct page *)), + gup_flags | FOLL_LONGTERM, page_list); +} + +static struct scatterlist *cdma_sg_set_page(struct scatterlist *sg_start, + int pinned, struct page **page_list) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sg_start, sg, pinned, i) + sg_set_page(sg, page_list[i], PAGE_SIZE, 0); + + return sg; +} + +static u64 cdma_pin_pages(struct cdma_dev *cdev, struct cdma_umem *umem, + u64 npages, u32 gup_flags, struct page **pages) +{ + struct scatterlist *sg_list_start = umem->sg_head.sgl; + u64 cur_base = umem->va & PAGE_MASK; + u64 page_count = npages; + int pinned; + + while (page_count > 0) { + cond_resched(); + + pinned = cdma_pin_part_of_pages(cur_base, page_count, gup_flags, + pages); + if (pinned <= 0) { + dev_err(cdev->dev, + "pin pages failed, page_count = 0x%llx, pinned = %d.\n", + page_count, pinned); + return npages - page_count; + } + cur_base += (u64)pinned * PAGE_SIZE; + page_count -= (u64)pinned; + sg_list_start = cdma_sg_set_page(sg_list_start, pinned, pages); + } + + return npages; +} + +static u64 cdma_k_pin_pages(struct cdma_dev *cdev, struct cdma_umem *umem, + u64 npages) +{ + struct scatterlist *sg_cur = umem->sg_head.sgl; + u64 cur_base = umem->va & PAGE_MASK; + struct page *pg; + u64 n; + + for (n = 0; n < npages; n++) { + if (is_vmalloc_addr((void *)(uintptr_t)cur_base)) + pg = vmalloc_to_page((void *)(uintptr_t)cur_base); + else + pg = kmap_to_page((void *)(uintptr_t)cur_base); + + if (!pg) { + dev_err(cdev->dev, "vmalloc or kmap to page failed.\n"); + break; + } + + get_page(pg); + + cur_base += PAGE_SIZE; + + sg_set_page(sg_cur, pg, PAGE_SIZE, 0); + sg_cur = sg_next(sg_cur); + } + + return n; +} + static void cdma_unpin_pages(struct cdma_umem *umem, u64 nents, bool is_kernel) { struct scatterlist *sg; @@ -25,6 +116,99 @@ static void cdma_unpin_pages(struct cdma_umem *umem, u64 nents, bool is_kernel) } } +static struct cdma_umem *cdma_get_target_umem(struct cdma_umem_param *param, + struct page **page_list) +{ + struct cdma_dev *cdev = param->dev; + struct cdma_umem *umem; + u64 npages, pinned; + u32 gup_flags; + int ret = 0; + + umem = kzalloc(sizeof(*umem), GFP_KERNEL); + if (!umem) { + ret = -ENOMEM; + goto out; + } + + cdma_fill_umem(umem, param); + + npages = cdma_cal_npages(umem->va, umem->length); + if (!npages || npages > UINT_MAX) { + dev_err(cdev->dev, + "invalid npages %llu in getting target umem process.\n", npages); + ret = -EINVAL; + goto umem_kfree; + } + + ret = sg_alloc_table(&umem->sg_head, (unsigned int)npages, GFP_KERNEL); + if (ret) + goto umem_kfree; + + if (param->is_kernel) { + pinned = cdma_k_pin_pages(cdev, umem, npages); + } else { + gup_flags = param->flag.bs.writable ? FOLL_WRITE : 0; + pinned = cdma_pin_pages(cdev, umem, npages, gup_flags, + page_list); + } + + if (pinned != npages) { + ret = -ENOMEM; + goto umem_release; + } + + goto out; + +umem_release: + cdma_unpin_pages(umem, pinned, param->is_kernel); + sg_free_table(&umem->sg_head); +umem_kfree: + kfree(umem); +out: + return ret != 0 ? ERR_PTR(ret) : umem; +} + +static int cdma_verify_input(struct cdma_dev *cdev, u64 va, u64 len) +{ + if (((va + len) <= va) || PAGE_ALIGN(va + len) < (va + len)) { + dev_err(cdev->dev, "invalid address parameter, len = %llu.\n", + len); + return -EINVAL; + } + return 0; +} + +struct cdma_umem *cdma_umem_get(struct cdma_dev *cdev, u64 va, u64 len, + bool is_kernel) +{ + struct cdma_umem_param param; + struct page **page_list; + struct cdma_umem *umem; + int ret; + + ret = cdma_verify_input(cdev, va, len); + if (ret) + return ERR_PTR(ret); + + page_list = (struct page **)__get_free_page(GFP_KERNEL); + if (!page_list) + return ERR_PTR(-ENOMEM); + + param.dev = cdev; + param.va = va; + param.len = len; + param.flag.bs.writable = true; + param.flag.bs.non_pin = 0; + param.is_kernel = is_kernel; + umem = cdma_get_target_umem(¶m, page_list); + if (IS_ERR(umem)) + dev_err(cdev->dev, "get target umem failed.\n"); + + free_page((unsigned long)(uintptr_t)page_list); + return umem; +} + void cdma_umem_release(struct cdma_umem *umem, bool is_kernel) { if (IS_ERR_OR_NULL(umem)) @@ -35,6 +219,38 @@ void cdma_umem_release(struct cdma_umem *umem, bool is_kernel) kfree(umem); } +int cdma_k_alloc_buf(struct cdma_dev *cdev, size_t memory_size, + struct cdma_buf *buf) +{ + size_t aligned_memory_size; + int ret; + + aligned_memory_size = memory_size + CDMA_HW_PAGE_SIZE - 1; + buf->aligned_va = vmalloc(aligned_memory_size); + if (!buf->aligned_va) { + dev_err(cdev->dev, + "vmalloc kernel buf failed, size = %lu.\n", + aligned_memory_size); + return -ENOMEM; + } + + memset(buf->aligned_va, 0, aligned_memory_size); + buf->umem = cdma_umem_get(cdev, (u64)buf->aligned_va, + aligned_memory_size, true); + if (IS_ERR(buf->umem)) { + ret = PTR_ERR(buf->umem); + vfree(buf->aligned_va); + dev_err(cdev->dev, "pin kernel buf failed, ret = %d.\n", ret); + return ret; + } + + buf->addr = ((u64)buf->aligned_va + CDMA_HW_PAGE_SIZE - 1) & + ~(CDMA_HW_PAGE_SIZE - 1); + buf->kva = (void *)buf->addr; + + return 0; +} + void cdma_k_free_buf(struct cdma_dev *cdev, size_t memory_size, struct cdma_buf *buf) { @@ -45,6 +261,26 @@ void cdma_k_free_buf(struct cdma_dev *cdev, size_t memory_size, buf->addr = 0; } +int cdma_pin_queue_addr(struct cdma_dev *cdev, u64 addr, u32 len, + struct cdma_buf *buf) +{ + int ret = 0; + + if (IS_ERR_OR_NULL(buf)) + return -EINVAL; + + buf->umem = cdma_umem_get(cdev, addr, len, false); + if (IS_ERR(buf->umem)) { + dev_err(cdev->dev, "get umem failed.\n"); + ret = PTR_ERR(buf->umem); + return ret; + } + + buf->addr = addr; + + return ret; +} + void cdma_unpin_queue_addr(struct cdma_umem *umem) { cdma_umem_release(umem, false); diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 644868418bf5..57e241c3b946 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -5,16 +5,34 @@ #define __CDMA_COMMON_H__ #include +#include "cdma.h" -struct cdma_umem; -struct cdma_dev; -struct cdma_buf; +#define CDMA_DB_SIZE 64 +struct cdma_umem_param { + struct cdma_dev *dev; + u64 va; + u64 len; + union cdma_umem_flag flag; + bool is_kernel; +}; + +static inline u64 cdma_cal_npages(u64 va, u64 len) +{ + return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / + PAGE_SIZE; +} + +struct cdma_umem *cdma_umem_get(struct cdma_dev *cdev, u64 va, u64 len, + bool is_kernel); void cdma_umem_release(struct cdma_umem *umem, bool is_kernel); +int cdma_k_alloc_buf(struct cdma_dev *cdev, size_t memory_size, + struct cdma_buf *buf); void cdma_k_free_buf(struct cdma_dev *cdev, size_t memory_size, struct cdma_buf *buf); - +int cdma_pin_queue_addr(struct cdma_dev *cdev, u64 addr, u32 len, + struct cdma_buf *buf); void cdma_unpin_queue_addr(struct cdma_umem *umem); #endif diff --git a/drivers/ub/cdma/cdma_db.c b/drivers/ub/cdma/cdma_db.c index e1c39e612bf4..d241144a6845 100644 --- a/drivers/ub/cdma/cdma_db.c +++ b/drivers/ub/cdma/cdma_db.c @@ -8,6 +8,57 @@ #include "cdma_context.h" #include "cdma_db.h" +static int cdma_alloc_db_from_page(struct cdma_k_sw_db_page *page, + struct cdma_sw_db *db) +{ + u32 index; + + index = find_first_bit(page->bitmap, page->num_db); + if (index == page->num_db) + return -ENOMEM; + + clear_bit(index, page->bitmap); + + db->index = index; + db->kpage = page; + db->db_addr = page->db_buf.addr + db->index * CDMA_DB_SIZE; + db->db_record = (u32 *)(page->db_buf.kva + db->index * CDMA_DB_SIZE); + + return 0; +} + +static struct cdma_k_sw_db_page *cdma_alloc_db_page(struct cdma_dev *dev) +{ + struct cdma_k_sw_db_page *page; + int ret; + + page = kzalloc(sizeof(*page), GFP_KERNEL); + if (!page) + return NULL; + + page->num_db = PAGE_SIZE / CDMA_DB_SIZE; + + page->bitmap = bitmap_alloc(page->num_db, GFP_KERNEL); + if (!page->bitmap) { + dev_err(dev->dev, "alloc db bitmap failed.\n"); + goto err_bitmap; + } + + bitmap_fill(page->bitmap, page->num_db); + + ret = cdma_k_alloc_buf(dev, PAGE_SIZE, &page->db_buf); + if (ret) + goto err_kva; + + return page; +err_kva: + bitmap_free(page->bitmap); +err_bitmap: + kfree(page); + + return NULL; +} + static void cdma_free_db_page(struct cdma_dev *cdev, struct cdma_sw_db *db) { cdma_k_free_buf(cdev, PAGE_SIZE, &db->kpage->db_buf); @@ -16,6 +67,49 @@ static void cdma_free_db_page(struct cdma_dev *cdev, struct cdma_sw_db *db) db->kpage = NULL; } +int cdma_pin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db) +{ + u64 page_addr = db->db_addr & PAGE_MASK; + struct cdma_sw_db_page *page; + int ret = 0; + + mutex_lock(&ctx->pgdir_mutex); + + list_for_each_entry(page, &ctx->pgdir_list, list) { + if (page->user_virt == page_addr) + goto found; + } + + page = kzalloc(sizeof(*page), GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto out; + } + + refcount_set(&page->refcount, 1); + page->user_virt = page_addr; + page->umem = cdma_umem_get(ctx->cdev, page_addr, PAGE_SIZE, false); + if (IS_ERR(page->umem)) { + ret = PTR_ERR(page->umem); + dev_err(ctx->cdev->dev, "get umem failed, ret = %d.\n", ret); + kfree(page); + goto out; + } + + list_add(&page->list, &ctx->pgdir_list); + db->page = page; + mutex_unlock(&ctx->pgdir_mutex); + return 0; + +found: + db->page = page; + refcount_inc(&page->refcount); +out: + mutex_unlock(&ctx->pgdir_mutex); + + return ret; +} + void cdma_unpin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db) { mutex_lock(&ctx->pgdir_mutex); @@ -30,6 +124,35 @@ void cdma_unpin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db) mutex_unlock(&ctx->pgdir_mutex); } +int cdma_alloc_sw_db(struct cdma_dev *cdev, struct cdma_sw_db *db) +{ + struct cdma_k_sw_db_page *page; + int ret = 0; + + mutex_lock(&cdev->db_mutex); + + list_for_each_entry(page, &cdev->db_page, list) + if (!cdma_alloc_db_from_page(page, db)) + goto out; + + page = cdma_alloc_db_page(cdev); + if (!page) { + ret = -ENOMEM; + dev_err(cdev->dev, "alloc sw db page failed.\n"); + goto out; + } + + list_add(&page->list, &cdev->db_page); + + ret = cdma_alloc_db_from_page(page, db); + if (ret) + dev_err(cdev->dev, "alloc sw db from page failed, ret = %d.\n", ret); +out: + mutex_unlock(&cdev->db_mutex); + + return ret; +} + void cdma_free_sw_db(struct cdma_dev *cdev, struct cdma_sw_db *db) { mutex_lock(&cdev->db_mutex); diff --git a/drivers/ub/cdma/cdma_db.h b/drivers/ub/cdma/cdma_db.h index 5337b41d4a78..fa3ef8c0f570 100644 --- a/drivers/ub/cdma/cdma_db.h +++ b/drivers/ub/cdma/cdma_db.h @@ -4,9 +4,8 @@ #ifndef __CDMA_DB_H__ #define __CDMA_DB_H__ -#include "cdma.h" - struct cdma_context; +struct cdma_dev; struct cdma_sw_db_page { struct list_head list; @@ -32,8 +31,12 @@ struct cdma_sw_db { u32 *db_record; }; +int cdma_pin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db); + void cdma_unpin_sw_db(struct cdma_context *ctx, struct cdma_sw_db *db); +int cdma_alloc_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); + void cdma_free_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); #endif /* CDMA_DB_H */ diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 69eb70d47ae6..bbf44a75fffc 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -102,9 +102,12 @@ static void cdma_tbl_destroy(struct cdma_dev *cdev, struct cdma_table *table, static void cdma_init_tables(struct cdma_dev *cdev) { struct cdma_res *queue = &cdev->caps.queue; + struct cdma_res *jfc = &cdev->caps.jfc; cdma_tbl_init(&cdev->queue_table, queue->start_idx + queue->max_cnt - 1, queue->start_idx); + cdma_tbl_init(&cdev->jfc_table, jfc->start_idx + jfc->max_cnt - 1, + jfc->start_idx); } static void cdma_destroy_tables(struct cdma_dev *cdev) diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index 75aa96b092c7..85d41cbe0773 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -11,8 +11,6 @@ #define CDMA_UE_MAX_NUM 64 struct cdma_dev; -struct eu_info; -struct dev_eid; struct cdma_ctrlq_eu_info { struct eu_info eu; diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 310d0fb6add4..c95230dcc443 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -228,6 +228,78 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c return ret; } +static int cdma_cmd_create_jfc(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_create_jfc_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfc_cfg cfg = { 0 }; + struct cdma_udata udata = { 0 }; + struct cdma_base_jfc *jfc; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret = 0; + + if (!hdr->args_addr || hdr->args_len != (u32)sizeof(arg) || !cfile->uctx) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, "get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.queue_id, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, + "create jfc, get queue uobj failed, queue id = %u.\n", + arg.in.queue_id); + return -EINVAL; + } + queue = (struct cdma_queue *)uobj->object; + + uobj = cdma_uobj_create(cfile, UOBJ_TYPE_JFC); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "create jfc uobj failed.\n"); + return -ENOMEM; + } + + cfg.depth = arg.in.depth; + cfg.ceqn = arg.in.ceqn; + cfg.queue_id = queue->id; + udata.uctx = cfile->uctx; + udata.udrv_data = (struct cdma_udrv_priv *)&arg.udata; + jfc = cdma_create_jfc(cdev, &cfg, &udata); + if (!jfc) { + dev_err(cdev->dev, "create jfc failed.\n"); + ret = -EFAULT; + goto err_create_jfc; + } + + uobj->object = jfc; + + arg.out.id = jfc->id; + arg.out.depth = jfc->jfc_cfg.depth; + arg.out.handle = uobj->id; + + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret != 0) { + dev_err(cdev->dev, "copy jfc to user failed, ret = %d.\n", ret); + ret = -EFAULT; + goto err_copy_to_user; + } else { + cdma_set_queue_res(cdev, queue, QUEUE_RES_JFC, jfc); + } + + return 0; +err_copy_to_user: + cdma_delete_jfc(cdev, jfc->id, NULL); +err_create_jfc: + cdma_uobj_delete(uobj); + return ret; +} + static int cdma_cmd_delete_jfc(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { @@ -290,6 +362,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, + [CDMA_CMD_CREATE_JFC] = cdma_cmd_create_jfc, [CDMA_CMD_DELETE_JFC] = cdma_cmd_delete_jfc, }; diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index 83f55462f297..4609fd22382a 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -5,11 +5,113 @@ #include #include "cdma_cmd.h" +#include "cdma_context.h" #include "cdma_mbox.h" #include "cdma_common.h" #include "cdma_db.h" #include "cdma_jfc.h" +static int cdma_get_cmd_from_user(struct cdma_create_jfc_ucmd *ucmd, + struct cdma_dev *cdev, + struct cdma_udata *udata, + struct cdma_jfc *jfc, + struct cdma_jfc_cfg *cfg) +{ + struct cdma_context *ctx; + u32 depth = cfg->depth; + int ret; + + if (!udata) { + jfc->arm_sn = 1; + jfc->buf.entry_cnt = depth ? roundup_pow_of_two(depth) : depth; + return 0; + } + + if (!udata->udrv_data || !udata->udrv_data->in_addr || + udata->udrv_data->in_len != (u32)sizeof(*ucmd)) { + dev_err(cdev->dev, "invalid parameter.\n"); + return -EINVAL; + } + + ret = (int)copy_from_user(ucmd, (void *)udata->udrv_data->in_addr, + (u32)sizeof(*ucmd)); + if (ret) { + dev_err(cdev->dev, + "copy udata from user failed, ret = %d.\n", ret); + return -EFAULT; + } + + jfc->mode = ucmd->mode; + jfc->db.db_addr = ucmd->db_addr; + + ctx = udata->uctx; + jfc->base.ctx = ctx; + jfc->tid = ctx->tid; + + if (cdev->caps.cqe_size == CDMA_DEFAULT_CQE_SIZE) + jfc->buf.entry_cnt = ucmd->buf_len >> CDMA_JFC_DEFAULT_CQE_SHIFT; + else + jfc->buf.entry_cnt = ucmd->buf_len >> CDMA_JFC_OTHER_CQE_SHIFT; + + return ret; +} + +static int cdma_check_jfc_cfg(struct cdma_dev *cdev, struct cdma_jfc *jfc, + struct cdma_jfc_cfg *cfg) +{ + if (!jfc->buf.entry_cnt || jfc->buf.entry_cnt > cdev->caps.jfc.depth) { + dev_err(cdev->dev, "invalid jfc depth = %u, cap depth = %u.\n", + jfc->buf.entry_cnt, cdev->caps.jfc.depth); + return -EINVAL; + } + + if (jfc->buf.entry_cnt < CDMA_JFC_DEPTH_MIN) + jfc->buf.entry_cnt = CDMA_JFC_DEPTH_MIN; + + if (cfg->ceqn >= cdev->caps.comp_vector_cnt) { + dev_err(cdev->dev, "invalid ceqn = %u, cap ceq cnt = %u.\n", + cfg->ceqn, cdev->caps.comp_vector_cnt); + return -EINVAL; + } + + return 0; +} + +static void cdma_init_jfc_param(struct cdma_jfc_cfg *cfg, struct cdma_jfc *jfc) +{ + jfc->base.id = jfc->jfcn; + jfc->base.jfc_cfg = *cfg; + jfc->ceqn = cfg->ceqn; +} + +static int cdma_jfc_id_alloc(struct cdma_dev *cdev, struct cdma_jfc *jfc) +{ + struct cdma_table *jfc_tbl = &cdev->jfc_table; + u32 min = jfc_tbl->idr_tbl.min; + u32 max = jfc_tbl->idr_tbl.max; + unsigned long flags; + int id; + + idr_preload(GFP_KERNEL); + spin_lock_irqsave(&jfc_tbl->lock, flags); + id = idr_alloc(&jfc_tbl->idr_tbl.idr, jfc, jfc_tbl->idr_tbl.next, max, + GFP_NOWAIT); + if (id < 0) { + id = idr_alloc(&jfc_tbl->idr_tbl.idr, jfc, min, max, + GFP_NOWAIT); + if (id < 0) + dev_err(cdev->dev, "alloc jfc id failed.\n"); + } + + jfc_tbl->idr_tbl.next = (id >= 0 && id + 1 <= max) ? id + 1 : min; + spin_unlock_irqrestore(&jfc_tbl->lock, flags); + idr_preload_end(); + + jfc->jfcn = id; + + return id; +} + static void cdma_jfc_id_free(struct cdma_dev *cdev, u32 jfcn) { struct cdma_table *jfc_tbl = &cdev->jfc_table; @@ -35,6 +137,50 @@ static struct cdma_jfc *cdma_id_find_jfc(struct cdma_dev *cdev, u32 jfcn) return jfc; } +static int cdma_get_jfc_buf(struct cdma_dev *cdev, + struct cdma_create_jfc_ucmd *ucmd, + struct cdma_udata *udata, struct cdma_jfc *jfc) +{ + u32 size; + int ret; + + if (udata) { + jfc->buf.umem = cdma_umem_get(cdev, ucmd->buf_addr, + ucmd->buf_len, false); + if (IS_ERR(jfc->buf.umem)) { + ret = PTR_ERR(jfc->buf.umem); + dev_err(cdev->dev, "get umem failed, ret = %d.\n", + ret); + return ret; + } + jfc->buf.addr = ucmd->buf_addr; + ret = cdma_pin_sw_db(jfc->base.ctx, &jfc->db); + if (ret) + cdma_umem_release(jfc->buf.umem, false); + + return ret; + } + + spin_lock_init(&jfc->lock); + jfc->buf.entry_size = cdev->caps.cqe_size; + jfc->tid = cdev->tid; + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + ret = cdma_k_alloc_buf(cdev, size, &jfc->buf); + if (ret) { + dev_err(cdev->dev, "alloc buffer for jfc failed.\n"); + return ret; + } + + ret = cdma_alloc_sw_db(cdev, &jfc->db); + if (ret) { + dev_err(cdev->dev, "alloc sw db for jfc failed: %u.\n", + jfc->jfcn); + cdma_k_free_buf(cdev, size, &jfc->buf); + } + + return ret; +} + static void cdma_free_jfc_buf(struct cdma_dev *cdev, struct cdma_jfc *jfc) { u32 size; @@ -49,6 +195,37 @@ static void cdma_free_jfc_buf(struct cdma_dev *cdev, struct cdma_jfc *jfc) } } +static void cdma_construct_jfc_ctx(struct cdma_dev *cdev, + struct cdma_jfc *jfc, + struct cdma_jfc_ctx *ctx) +{ + memset(ctx, 0, sizeof(*ctx)); + + ctx->state = CDMA_JFC_STATE_VALID; + ctx->arm_st = jfc_arm_mode ? CDMA_CTX_NO_ARMED : CDMA_CTX_ALWAYS_ARMED; + ctx->shift = ilog2(jfc->buf.entry_cnt) - CDMA_JFC_DEPTH_SHIFT_BASE; + + if (cdev->caps.cqe_size == CDMA_DEFAULT_CQE_SIZE) + ctx->cqe_size = CDMA_128_CQE_SIZE; + else + ctx->cqe_size = CDMA_64_CQE_SIZE; + + ctx->record_db_en = CDMA_RECORD_EN; + ctx->jfc_type = CDMA_NORMAL_JFC_TYPE; + ctx->cqe_va_l = jfc->buf.addr >> CQE_VA_L_OFFSET; + ctx->cqe_va_h = jfc->buf.addr >> CQE_VA_H_OFFSET; + ctx->cqe_token_id = jfc->tid; + + if (cqe_mode) + ctx->cq_cnt_mode = CDMA_CQE_CNT_MODE_BY_CI_PI_GAP; + else + ctx->cq_cnt_mode = CDMA_CQE_CNT_MODE_BY_COUNT; + + ctx->ceqn = jfc->ceqn; + ctx->record_db_addr_l = jfc->db.db_addr >> CDMA_DB_L_OFFSET; + ctx->record_db_addr_h = jfc->db.db_addr >> CDMA_DB_H_OFFSET; +} + static int cdma_query_jfc_destroy_done(struct cdma_dev *cdev, uint32_t jfcn) { struct ubase_mbx_attr attr = { 0 }; @@ -94,6 +271,17 @@ static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, u32 jfcn) return -ETIMEDOUT; } +static int cdma_post_create_jfc_mbox(struct cdma_dev *cdev, struct cdma_jfc *jfc) +{ + struct ubase_mbx_attr attr = { 0 }; + struct cdma_jfc_ctx ctx = { 0 }; + + cdma_construct_jfc_ctx(cdev, jfc, &ctx); + cdma_fill_mbx_attr(&attr, jfc->jfcn, CDMA_CMD_CREATE_JFC_CONTEXT, 0); + + return cdma_post_mailbox_ctx(cdev, (void *)&ctx, sizeof(ctx), &attr); +} + int cdma_post_destroy_jfc_mbox(struct cdma_dev *cdev, u32 jfcn, enum cdma_jfc_state state) { @@ -106,6 +294,57 @@ int cdma_post_destroy_jfc_mbox(struct cdma_dev *cdev, u32 jfcn, return cdma_post_mailbox_ctx(cdev, (void *)&ctx, sizeof(ctx), &attr); } +struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, + struct cdma_jfc_cfg *cfg, + struct cdma_udata *udata) +{ + struct cdma_create_jfc_ucmd ucmd = { 0 }; + struct cdma_jfc *jfc; + int ret; + + jfc = kzalloc(sizeof(*jfc), GFP_KERNEL); + if (!jfc) + return NULL; + + ret = cdma_get_cmd_from_user(&ucmd, cdev, udata, jfc, cfg); + if (ret) + goto err_get_cmd; + + ret = cdma_check_jfc_cfg(cdev, jfc, cfg); + if (ret) + goto err_check_cfg; + + ret = cdma_jfc_id_alloc(cdev, jfc); + if (ret < 0) + goto err_alloc_jfc_id; + + cdma_init_jfc_param(cfg, jfc); + ret = cdma_get_jfc_buf(cdev, &ucmd, udata, jfc); + if (ret) + goto err_get_jfc_buf; + + ret = cdma_post_create_jfc_mbox(cdev, jfc); + if (ret) + goto err_alloc_cqc; + + jfc->base.dev = cdev; + + dev_dbg(cdev->dev, "create jfc id = %u, queue id = %u.\n", + jfc->jfcn, cfg->queue_id); + + return &jfc->base; + +err_alloc_cqc: + cdma_free_jfc_buf(cdev, jfc); +err_get_jfc_buf: + cdma_jfc_id_free(cdev, jfc->jfcn); +err_alloc_jfc_id: +err_check_cfg: +err_get_cmd: + kfree(jfc); + return NULL; +} + int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, struct cdma_cmd_delete_jfc_args *arg) { diff --git a/drivers/ub/cdma/cdma_jfc.h b/drivers/ub/cdma/cdma_jfc.h index 28144b317774..612887837e39 100644 --- a/drivers/ub/cdma/cdma_jfc.h +++ b/drivers/ub/cdma/cdma_jfc.h @@ -7,12 +7,45 @@ #include "cdma_types.h" #include "cdma_db.h" +#define CDMA_JFC_DEPTH_MIN 64 +#define CDMA_JFC_DEPTH_SHIFT_BASE 6 +#define CDMA_JFC_DEFAULT_CQE_SHIFT 7 +#define CDMA_JFC_OTHER_CQE_SHIFT 6 + +#define CDMA_DB_L_OFFSET 6 +#define CDMA_DB_H_OFFSET 38 + +#define CQE_VA_L_OFFSET 12 +#define CQE_VA_H_OFFSET 32 + +enum cdma_record_db { + CDMA_NO_RECORD_EN, + CDMA_RECORD_EN +}; + enum cdma_jfc_state { CDMA_JFC_STATE_INVALID, CDMA_JFC_STATE_VALID, CDMA_JFC_STATE_ERROR }; +enum cdma_armed_jfc { + CDMA_CTX_NO_ARMED, + CDMA_CTX_ALWAYS_ARMED, + CDMA_CTX_REG_NEXT_CEQE, + CDMA_CTX_REG_NEXT_SOLICITED_CEQE +}; + +enum cdma_jfc_type { + CDMA_NORMAL_JFC_TYPE, + CDMA_RAW_JFC_TYPE +}; + +enum cdma_cq_cnt_mode { + CDMA_CQE_CNT_MODE_BY_COUNT, + CDMA_CQE_CNT_MODE_BY_CI_PI_GAP +}; + struct cdma_jfc { struct cdma_base_jfc base; u32 jfcn; @@ -23,6 +56,8 @@ struct cdma_jfc { u32 ci; u32 arm_sn; spinlock_t lock; + refcount_t event_refcount; + struct completion event_comp; u32 mode; }; @@ -98,6 +133,10 @@ struct cdma_jfc_ctx { int cdma_post_destroy_jfc_mbox(struct cdma_dev *cdev, u32 jfcn, enum cdma_jfc_state state); +struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, + struct cdma_jfc_cfg *cfg, + struct cdma_udata *udata); + int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, struct cdma_cmd_delete_jfc_args *arg); diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index d04b4b43c989..8519d972c48f 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -18,6 +18,10 @@ module_param(jfc_arm_mode, uint, 0444); MODULE_PARM_DESC(jfc_arm_mode, "Set the ARM mode of the JFC, default: 0(0:Always ARM, others: NO ARM)"); +bool cqe_mode = true; +module_param(cqe_mode, bool, 0444); +MODULE_PARM_DESC(cqe_mode, "Set cqe reporting mode, default: 1 (0:BY_COUNT, 1:BY_CI_PI_GAP)"); + struct class *cdma_cdev_class; static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) diff --git a/drivers/ub/cdma/cdma_mbox.h b/drivers/ub/cdma/cdma_mbox.h index 0841ea606bbd..584508b592aa 100644 --- a/drivers/ub/cdma/cdma_mbox.h +++ b/drivers/ub/cdma/cdma_mbox.h @@ -9,11 +9,7 @@ enum { /* JFC CMDS */ - CDMA_CMD_WRITE_JFC_CONTEXT_VA = 0x20, - CDMA_CMD_READ_JFC_CONTEXT_VA = 0x21, - CDMA_CMD_DESTROY_JFC_CONTEXT_VA = 0x22, CDMA_CMD_CREATE_JFC_CONTEXT = 0x24, - CDMA_CMD_MODIFY_JFC_CONTEXT = 0x25, CDMA_CMD_QUERY_JFC_CONTEXT = 0x26, CDMA_CMD_DESTROY_JFC_CONTEXT = 0x27, }; diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index a25de76cdb4e..a638efe33af1 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -21,6 +21,35 @@ struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id) return queue; } +static void cdma_k_assemble_jfc_cfg(struct cdma_jfc_cfg *jfc_cfg, + struct queue_cfg *cfg, + struct cdma_queue *queue) +{ + jfc_cfg->depth = cfg->queue_depth; + jfc_cfg->queue_id = queue->id; +} + +static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, + struct cdma_queue *queue, u32 eid_index) +{ + struct cdma_jfc_cfg jfc_cfg = { 0 }; + + cdma_k_assemble_jfc_cfg(&jfc_cfg, cfg, queue); + + queue->jfc = cdma_create_jfc(cdev, &jfc_cfg, NULL); + if (!queue->jfc) { + dev_err(cdev->dev, "create jfc failed.\n"); + return -EFAULT; + } + + queue->jfc_id = queue->jfc->id; + + dev_dbg(cdev->dev, "set queue %u jfc id: %u.\n", + queue->id, queue->jfc_id); + + return 0; +} + static void cdma_delete_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue) { @@ -60,6 +89,7 @@ struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, bool is_kernel) { struct cdma_queue *queue; + int ret; int id; queue = kzalloc(sizeof(*queue), GFP_KERNEL); @@ -76,8 +106,16 @@ struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, queue->id = id; queue->cfg = *cfg; - if (is_kernel) + if (is_kernel) { + ret = cdma_create_queue_res(cdev, cfg, queue, eid_index); + if (ret) { + dev_err(cdev->dev, "create queue res failed.\n"); + cdma_delete_queue_id(cdev, id); + kfree(queue); + return NULL; + } queue->is_kernel = true; + } return queue; } diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index f8f97a9f9f6d..f55214b84352 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -10,6 +10,18 @@ struct cdma_dev; +struct cdma_udrv_priv { + u64 in_addr; + u32 in_len; + u64 out_addr; + u32 out_len; +}; + +struct cdma_udata { + struct cdma_context *uctx; + struct cdma_udrv_priv *udrv_data; +}; + struct cdma_jfc_cfg { u32 depth; u32 ceqn; -- Gitee From 0dca26f9279b41b2cf9cef4b0cce08c01bf2d491 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 10:16:38 +0800 Subject: [PATCH 014/243] ub: cdma: support the deletion of ctp commit 0fb06bebea508db2a00b06bd46bb1b08c2654f4b openEuler This patch implements the deletion functionality of ctp in the CDMA driver. The implementation involves deleting the ctp corresponding to the queue during the queue release process. Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 1 + drivers/ub/cdma/cdma_dev.c | 7 ++ drivers/ub/cdma/cdma_ioctl.c | 51 ++++++++++++- drivers/ub/cdma/cdma_queue.c | 6 ++ drivers/ub/cdma/cdma_queue.h | 2 + drivers/ub/cdma/cdma_tp.c | 127 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_tp.h | 60 +++++++++++++++ drivers/ub/cdma/cdma_types.h | 21 ++++++ include/uapi/ub/cdma/cdma_abi.h | 11 +++ 10 files changed, 285 insertions(+), 3 deletions(-) create mode 100644 drivers/ub/cdma/cdma_tp.c create mode 100644 drivers/ub/cdma/cdma_tp.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 5bb71587ed6d..10bc5ed6b71c 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -2,6 +2,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ - cdma_db.o cdma_mbox.o + cdma_db.o cdma_mbox.o cdma_tp.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index b379a3f43884..b121ef4bc704 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -184,6 +184,7 @@ struct cdma_dev { struct list_head db_page; struct cdma_table queue_table; + struct cdma_table ctp_table; struct cdma_table jfc_table; struct mutex file_mutex; struct list_head file_list; diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index bbf44a75fffc..57e5939b3205 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -15,6 +15,8 @@ #include "cdma_context.h" #include #include +#include "cdma_common.h" +#include "cdma_tp.h" #include "cdma_jfc.h" #include "cdma_queue.h" #include "cdma_dev.h" @@ -112,6 +114,7 @@ static void cdma_init_tables(struct cdma_dev *cdev) static void cdma_destroy_tables(struct cdma_dev *cdev) { + cdma_tbl_destroy(cdev, &cdev->ctp_table, "CTP"); cdma_tbl_destroy(cdev, &cdev->jfc_table, "JFC"); cdma_tbl_destroy(cdev, &cdev->queue_table, "QUEUE"); } @@ -181,8 +184,12 @@ static void cdma_release_table_res(struct cdma_dev *cdev) { struct cdma_queue *queue; struct cdma_jfc *jfc; + struct cdma_tp *tmp; int id; + idr_for_each_entry(&cdev->ctp_table.idr_tbl.idr, tmp, id) + cdma_destroy_ctp_imm(cdev, tmp->base.tp_id); + idr_for_each_entry(&cdev->jfc_table.idr_tbl.idr, jfc, id) cdma_delete_jfc(cdev, jfc->jfcn, NULL); diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index c95230dcc443..5ddc21ae0e76 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -9,6 +9,7 @@ #include "cdma.h" #include "cdma_context.h" #include "cdma_types.h" +#include "cdma_tp.h" #include "cdma_queue.h" #include "cdma_jfc.h" #include "cdma_uobj.h" @@ -128,6 +129,51 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, return 0; } +static int cdma_cmd_delete_ctp(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_delete_ctp_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_base_tp *ctp; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len < sizeof(arg)) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(&cdev->adev->dev, + "delete tp get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.queue_id, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, + "delete ctp, get queue uobj failed, queue id = %u.\n", + arg.in.queue_id); + return -EINVAL; + } + queue = uobj->object; + + uobj = cdma_uobj_get(cfile, arg.in.handle, UOBJ_TYPE_CTP); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "get ctp uobj failed, handle = %llu.\n", + arg.in.handle); + return -EINVAL; + } + ctp = uobj->object; + + cdma_delete_ctp(cdev, ctp->tp_id); + cdma_uobj_delete(uobj); + cdma_set_queue_res(cdev, queue, QUEUE_RES_TP, NULL); + + return ret; +} + static int cdma_cmd_create_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { struct cdma_cmd_create_queue_args arg = { 0 }; @@ -214,8 +260,8 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c } queue = (struct cdma_queue *)uobj->object; - if (queue->jfc) { - dev_err(cdev->dev, "jfc is still in use."); + if (queue->jfc || queue->tp) { + dev_err(cdev->dev, "jfc/tp is still in use."); return -EBUSY; } @@ -360,6 +406,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, [CDMA_CMD_CREATE_CTX] = cdma_create_ucontext, [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, + [CDMA_CMD_DELETE_CTP] = cdma_cmd_delete_ctp, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, [CDMA_CMD_CREATE_JFC] = cdma_cmd_create_jfc, diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index a638efe33af1..6b28cb679548 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -6,6 +6,7 @@ #include "cdma_common.h" #include "cdma_context.h" #include "cdma_jfc.h" +#include "cdma_tp.h" #include "cdma_queue.h" #include "cdma.h" @@ -53,6 +54,8 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, static void cdma_delete_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue) { + cdma_delete_ctp(cdev, queue->tp->tp_id); + queue->tp = NULL; cdma_delete_jfc(cdev, queue->jfc->id, NULL); queue->jfc = NULL; } @@ -160,6 +163,9 @@ void cdma_set_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue, spin_lock(&cdev->queue_table.lock); switch (type) { + case QUEUE_RES_TP: + queue->tp = res; + break; case QUEUE_RES_JFC: queue->jfc = res; if (queue->jfc) diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index 3808c23a1934..af1c54b771ba 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -9,11 +9,13 @@ struct cdma_context; struct queue_cfg; enum cdma_queue_res_type { + QUEUE_RES_TP, QUEUE_RES_JFC }; struct cdma_queue { struct cdma_base_jfc *jfc; + struct cdma_base_tp *tp; struct cdma_context *ctx; u32 id; struct queue_cfg cfg; diff --git a/drivers/ub/cdma/cdma_tp.c b/drivers/ub/cdma/cdma_tp.c new file mode 100644 index 000000000000..ea8ed73d0b76 --- /dev/null +++ b/drivers/ub/cdma/cdma_tp.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include "cdma_common.h" +#include "cdma_mbox.h" +#include "cdma_tp.h" +#include + +static inline int cdma_ctrlq_msg_send(struct cdma_dev *cdev, + struct ubase_ctrlq_msg *msg) +{ + int ret; + + ret = ubase_ctrlq_send_msg(cdev->adev, msg); + if (ret) + dev_err(cdev->dev, "ctrlq send msg failed, ret = %d.\n", ret); + + return ret; +} + +static void cdma_ctrlq_delete_ctp(struct cdma_dev *cdev, u32 tpn, + struct cdma_tp_cfg *cfg) +{ + struct cdma_ctrlq_tp_delete_cfg ctrlq_tp = { 0 }; + struct cdma_ctrlq_tp_ret tp_out = { 0 }; + struct ubase_ctrlq_msg msg = { 0 }; + int ret; + + ctrlq_tp.seid_flag = CDMA_CTRLQ_FLAG_ON; + ctrlq_tp.deid_flag = CDMA_CTRLQ_FLAG_ON; + ctrlq_tp.scna = cfg->scna; + ctrlq_tp.dcna = cfg->dcna; + ctrlq_tp.seid[0] = cfg->seid; + ctrlq_tp.deid[0] = cfg->deid; + ctrlq_tp.tpn = tpn; + ctrlq_tp.route_type = CDMA_ROUTE_TYPE_CNA; + ctrlq_tp.trans_type = CDMA_TRANS_TYPE_CDMA_CTP; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = CDMA_CTRLQ_DELETE_CTP; + msg.need_resp = CDMA_CTRLQ_FLAG_ON; + msg.is_resp = CDMA_CTRLQ_FLAG_OFF; + msg.in_size = sizeof(ctrlq_tp); + msg.in = &ctrlq_tp; + msg.out_size = sizeof(tp_out); + msg.out = &tp_out; + + ret = cdma_ctrlq_msg_send(cdev, &msg); + if (ret) + dev_err(cdev->dev, + "delete ctp failed, tpn = %u, dcna = %u, ret = %d.\n", + tpn, cfg->dcna, ret); +} + +static struct cdma_tp *cdma_id_find_ctp(struct cdma_dev *cdev, u32 id) +{ + struct cdma_tp *tp; + + spin_lock(&cdev->ctp_table.lock); + tp = idr_find(&cdev->ctp_table.idr_tbl.idr, id); + if (!tp) + dev_err(cdev->dev, + "get tp from table failed, id = %u.\n", id); + spin_unlock(&cdev->ctp_table.lock); + + return tp; +} + +void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id) +{ + struct cdma_tp_cfg cfg = { 0 }; + struct cdma_tp *tp; + bool flag = false; + u32 tpn; + + if (!cdev) + return; + + tp = cdma_id_find_ctp(cdev, tp_id); + if (!tp) + return; + + spin_lock(&cdev->ctp_table.lock); + refcount_dec(&tp->refcount); + if (refcount_dec_if_one(&tp->refcount)) { + if (cdev->status != CDMA_SUSPEND) { + flag = true; + tpn = tp->base.tpn; + cfg = tp->base.cfg; + } + + dev_dbg(cdev->dev, + "refcout of tp %u is equal to one and erased.\n", tp_id); + idr_remove(&cdev->ctp_table.idr_tbl.idr, tp_id); + kfree(tp); + } + spin_unlock(&cdev->ctp_table.lock); + + if (flag) + cdma_ctrlq_delete_ctp(cdev, tpn, &cfg); +} + +void cdma_destroy_ctp_imm(struct cdma_dev *cdev, u32 tp_id) +{ + struct cdma_tp_cfg cfg = { 0 }; + struct cdma_tp *tp; + u32 tpn; + + if (!cdev) + return; + + tp = cdma_id_find_ctp(cdev, tp_id); + if (!tp) + return; + + spin_lock(&cdev->ctp_table.lock); + tpn = tp->base.tpn; + cfg = tp->base.cfg; + idr_remove(&cdev->ctp_table.idr_tbl.idr, tp_id); + kfree(tp); + spin_unlock(&cdev->ctp_table.lock); + + cdma_ctrlq_delete_ctp(cdev, tpn, &cfg); +} diff --git a/drivers/ub/cdma/cdma_tp.h b/drivers/ub/cdma/cdma_tp.h new file mode 100644 index 000000000000..51ae0bbe5035 --- /dev/null +++ b/drivers/ub/cdma/cdma_tp.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_TP_H__ +#define __CDMA_TP_H__ + +#include "cdma_types.h" + +#define CDMA_CTRLQ_FLAG_ON 1 +#define CDMA_CTRLQ_FLAG_OFF 0 +#define CDMA_EID_DW_SIZE 4 + +struct cdma_tp { + struct cdma_dev *dev; + struct cdma_base_tp base; + refcount_t refcount; + struct completion ae_comp; +}; + +enum cdma_tp_ctrlq_cmd { + CDMA_CTRLQ_CREATE_CTP = 0x01, + CDMA_CTRLQ_DELETE_CTP = 0x02 +}; + +enum cdma_tp_route_type { + CDMA_ROUTE_TYPE_IPV4, + CDMA_ROUTE_TYPE_IPV6, + CDMA_ROUTE_TYPE_CNA, + CDMA_ROUTE_TYPE_MAX +}; + +enum cdma_tp_trans_type { + CDMA_TRANS_TYPE_URMA_TP, + CDMA_TRANS_TYPE_URMA_CTP, + CDMA_TRANS_TYPE_UMS_TP, + CDMA_TRANS_TYPE_CDMA_CTP, + CDMA_TRANS_TYPE_MAX +}; + +struct cdma_ctrlq_tp_ret { + int ret; +}; + +struct cdma_ctrlq_tp_delete_cfg { + u32 seid_flag; + u32 seid[CDMA_EID_DW_SIZE]; + u32 scna; + u32 deid_flag; + u32 deid[CDMA_EID_DW_SIZE]; + u32 dcna; + u32 route_type : 4; /* 0-IPv4, 1-IPv6, 2-CNA */ + u32 trans_type : 4; + u32 rsv : 24; + u32 tpn; +}; + +void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id); + +void cdma_destroy_ctp_imm(struct cdma_dev *cdev, uint32_t tp_id); +#endif /* CDMA_TP_H */ diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index f55214b84352..8458926b1605 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -10,6 +10,12 @@ struct cdma_dev; +struct cdma_ucontext { + struct cdma_dev *dev; + u32 eid; + u32 eid_index; +}; + struct cdma_udrv_priv { u64 in_addr; u32 in_len; @@ -17,6 +23,21 @@ struct cdma_udrv_priv { u32 out_len; }; +struct cdma_tp_cfg { + u32 scna; + u32 dcna; + u32 seid; + u32 deid; +}; + +struct cdma_base_tp { + struct cdma_ucontext *uctx; + struct cdma_tp_cfg cfg; + u64 usr_tp; + u32 tpn; + u32 tp_id; +}; + struct cdma_udata { struct cdma_context *uctx; struct cdma_udrv_priv *udrv_data; diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index da4aefa119d8..38de05508049 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -21,6 +21,7 @@ enum cdma_cmd { CDMA_CMD_QUERY_DEV_INFO, CDMA_CMD_CREATE_CTX, CDMA_CMD_DELETE_CTX, + CDMA_CMD_DELETE_CTP, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, CDMA_CMD_CREATE_JFC, @@ -41,6 +42,16 @@ struct cdma_cmd_udrv_priv { __u32 out_len; }; +struct cdma_cmd_delete_ctp_args { + struct { + __u32 tpn; + __u64 handle; + __u32 queue_id; + } in; + struct { + } out; +}; + struct cdma_cmd_create_jfc_args { struct { __u32 depth; /* in terms of CQEBB */ -- Gitee From 550cb621d2961fae9e3f2ec2824b2a943424c19d Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 11:10:28 +0800 Subject: [PATCH 015/243] ub: cdma: support the creation of ctp commit 574cc24bc868dac8e37a1a5cc369cbde1867b46d openEuler This patch implements the creation functionality of ctp in the CDMA driver. The implementation involves creating the ctp corresponding to the queue during the queue creation process. Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_common.h | 2 + drivers/ub/cdma/cdma_dev.c | 1 + drivers/ub/cdma/cdma_ioctl.c | 72 ++++++++++++++++++ drivers/ub/cdma/cdma_queue.c | 24 ++++++ drivers/ub/cdma/cdma_tp.c | 129 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_tp.h | 16 ++++ include/uapi/ub/cdma/cdma_abi.h | 18 +++++ 7 files changed, 262 insertions(+) diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 57e241c3b946..4580038b5b1b 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -7,6 +7,8 @@ #include #include "cdma.h" +#define CDMA_RANGE_INDEX_ENTRY_CNT 0x100000 + #define CDMA_DB_SIZE 64 struct cdma_umem_param { diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 57e5939b3205..88367e69b8c4 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -110,6 +110,7 @@ static void cdma_init_tables(struct cdma_dev *cdev) queue->start_idx); cdma_tbl_init(&cdev->jfc_table, jfc->start_idx + jfc->max_cnt - 1, jfc->start_idx); + cdma_tbl_init(&cdev->ctp_table, CDMA_RANGE_INDEX_ENTRY_CNT, 0); } static void cdma_destroy_tables(struct cdma_dev *cdev) diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 5ddc21ae0e76..c8ba6d9bfd8a 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -129,6 +129,77 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, return 0; } +static int cdma_cmd_create_ctp(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_create_ctp_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_tp_cfg cfg = { 0 }; + struct cdma_base_tp *ctp; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len < sizeof(arg) || !cfile->uctx) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(&cdev->adev->dev, + "create tp get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.queue_id, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, + "create ctp, get queue uobj failed, queue id = %u.\n", + arg.in.queue_id); + return -EINVAL; + } + queue = (struct cdma_queue *)uobj->object; + + uobj = cdma_uobj_create(cfile, UOBJ_TYPE_CTP); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "create ctp uobj failed.\n"); + return -ENOMEM; + } + + cfg.scna = arg.in.scna; + cfg.dcna = arg.in.dcna; + cfg.seid = arg.in.seid; + cfg.deid = arg.in.deid; + ctp = cdma_create_ctp(cdev, &cfg); + if (!ctp) { + dev_err(&cdev->adev->dev, "create tp failed.\n"); + ret = -EINVAL; + goto delete_obj; + } + uobj->object = ctp; + + arg.out.handle = uobj->id; + arg.out.tpn = ctp->tpn; + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret) { + dev_err(&cdev->adev->dev, + "create tp copy to user data failed, ret = %d.\n", ret); + ret = -EFAULT; + goto delete_ctp; + } + + cdma_set_queue_res(cdev, queue, QUEUE_RES_TP, ctp); + + return 0; + +delete_ctp: + cdma_delete_ctp(cdev, ctp->tp_id); +delete_obj: + cdma_uobj_delete(uobj); + + return ret; +} + static int cdma_cmd_delete_ctp(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { @@ -406,6 +477,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, [CDMA_CMD_CREATE_CTX] = cdma_create_ucontext, [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, + [CDMA_CMD_CREATE_CTP] = cdma_cmd_create_ctp, [CDMA_CMD_DELETE_CTP] = cdma_cmd_delete_ctp, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index 6b28cb679548..28fdedc258ad 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -30,12 +30,24 @@ static void cdma_k_assemble_jfc_cfg(struct cdma_jfc_cfg *jfc_cfg, jfc_cfg->queue_id = queue->id; } +static void cdma_k_assemble_tp_cfg(struct cdma_tp_cfg *tp_cfg, + struct cdma_dev *cdev, + struct queue_cfg *cfg) +{ + tp_cfg->seid = cdev->base.attr.eu.eid.dw0; + tp_cfg->dcna = cfg->dcna; + tp_cfg->deid = cfg->rmt_eid.dw0; +} + static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, struct cdma_queue *queue, u32 eid_index) { struct cdma_jfc_cfg jfc_cfg = { 0 }; + struct cdma_tp_cfg tp_cfg = { 0 }; + int ret; cdma_k_assemble_jfc_cfg(&jfc_cfg, cfg, queue); + cdma_k_assemble_tp_cfg(&tp_cfg, cdev, cfg); queue->jfc = cdma_create_jfc(cdev, &jfc_cfg, NULL); if (!queue->jfc) { @@ -43,12 +55,24 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, return -EFAULT; } + queue->tp = cdma_create_ctp(cdev, &tp_cfg); + if (!queue->tp) { + dev_err(cdev->dev, "create tp failed.\n"); + ret = -EFAULT; + goto delete_jfc; + } + queue->jfc_id = queue->jfc->id; dev_dbg(cdev->dev, "set queue %u jfc id: %u.\n", queue->id, queue->jfc_id); return 0; + +delete_jfc: + cdma_delete_jfc(cdev, queue->jfc->id, NULL); + + return ret; } static void cdma_delete_queue_res(struct cdma_dev *cdev, diff --git a/drivers/ub/cdma/cdma_tp.c b/drivers/ub/cdma/cdma_tp.c index ea8ed73d0b76..a77f1164b416 100644 --- a/drivers/ub/cdma/cdma_tp.c +++ b/drivers/ub/cdma/cdma_tp.c @@ -20,6 +20,53 @@ static inline int cdma_ctrlq_msg_send(struct cdma_dev *cdev, return ret; } +static int cdma_ctrlq_create_ctp(struct cdma_dev *cdev, + struct cdma_tp_cfg *cfg, u32 *tpn) +{ + struct cdma_ctrlq_tp_create_cfg ctrlq_tp; + struct cdma_ctrlq_tp_ret tp_out = { 0 }; + struct ubase_ctrlq_msg msg = { 0 }; + int ret; + + ctrlq_tp = (struct cdma_ctrlq_tp_create_cfg) { + .seid_flag = CDMA_CTRLQ_FLAG_ON, + .deid_flag = CDMA_CTRLQ_FLAG_ON, + .scna = cfg->scna, + .dcna = cfg->dcna, + .seid[0] = cfg->seid, + .deid[0] = cfg->deid, + .route_type = CDMA_ROUTE_TYPE_CNA, + .trans_type = CDMA_TRANS_TYPE_CDMA_CTP + }; + + msg = (struct ubase_ctrlq_msg) { + .service_ver = UBASE_CTRLQ_SER_VER_01, + .service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL, + .opcode = CDMA_CTRLQ_CREATE_CTP, + .need_resp = CDMA_CTRLQ_FLAG_ON, + .is_resp = CDMA_CTRLQ_FLAG_OFF, + .in_size = sizeof(ctrlq_tp), + .in = &ctrlq_tp, + .out_size = sizeof(tp_out), + .out = &tp_out + }; + + ret = cdma_ctrlq_msg_send(cdev, &msg); + if (ret) + return ret; + + ret = tp_out.ret; + if (ret <= 0) { + dev_err(cdev->dev, + "create ctp failed, scna = %u, dcna = %u, ret = %d.\n", + ctrlq_tp.scna, ctrlq_tp.dcna, ret); + return -EFAULT; + } + *tpn = ret & CDMA_TPN_MASK; + + return 0; +} + static void cdma_ctrlq_delete_ctp(struct cdma_dev *cdev, u32 tpn, struct cdma_tp_cfg *cfg) { @@ -69,6 +116,88 @@ static struct cdma_tp *cdma_id_find_ctp(struct cdma_dev *cdev, u32 id) return tp; } +static struct cdma_tp *cdma_tpn_find_ctp(struct cdma_dev *cdev, u32 tpn) +{ + struct cdma_tp *tmp; + int id; + + spin_lock(&cdev->ctp_table.lock); + idr_for_each_entry(&cdev->ctp_table.idr_tbl.idr, tmp, id) { + if (tmp && tmp->base.tpn == tpn) { + spin_unlock(&cdev->ctp_table.lock); + return tmp; + } + } + + spin_unlock(&cdev->ctp_table.lock); + return NULL; +} + +static int cdma_alloc_tp_id(struct cdma_dev *cdev, struct cdma_tp *tp) +{ + struct cdma_table *tp_tbl = &cdev->ctp_table; + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&tp_tbl->lock); + id = idr_alloc(&tp_tbl->idr_tbl.idr, tp, tp_tbl->idr_tbl.min, + tp_tbl->idr_tbl.max, GFP_NOWAIT); + if (id < 0) + dev_err(cdev->dev, "cdma tp id alloc failed.\n"); + spin_unlock(&tp_tbl->lock); + idr_preload_end(); + + return id; +} + +struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, + struct cdma_tp_cfg *cfg) +{ + struct cdma_tp *tp; + u32 tpn; + int ret; + + ret = cdma_ctrlq_create_ctp(cdev, cfg, &tpn); + if (ret) { + dev_err(cdev->dev, "get tp failed, ret = %d.\n", ret); + return NULL; + } + + tp = (struct cdma_tp *)cdma_tpn_find_ctp(cdev, tpn); + if (tp) { + refcount_inc(&tp->refcount); + return &tp->base; + } + + tp = kzalloc(sizeof(*tp), GFP_KERNEL); + if (!tp) + goto err_alloc_tp; + + refcount_set(&tp->refcount, 1); + tp->base.cfg = *cfg; + tp->base.tpn = tpn; + tp->dev = cdev; + + ret = cdma_alloc_tp_id(cdev, tp); + if (ret < 0) + goto err_alloc_tpid; + + tp->base.tp_id = ret; + refcount_inc(&tp->refcount); + + dev_dbg(cdev->dev, "create ctp id = %u, tpn = %u, seid = %u, dcna = %u\n", + tp->base.tp_id, tpn, cfg->seid, cfg->dcna); + + return &tp->base; + +err_alloc_tpid: + kfree(tp); +err_alloc_tp: + cdma_ctrlq_delete_ctp(cdev, tpn, cfg); + + return NULL; +} + void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id) { struct cdma_tp_cfg cfg = { 0 }; diff --git a/drivers/ub/cdma/cdma_tp.h b/drivers/ub/cdma/cdma_tp.h index 51ae0bbe5035..72019df35d74 100644 --- a/drivers/ub/cdma/cdma_tp.h +++ b/drivers/ub/cdma/cdma_tp.h @@ -8,6 +8,7 @@ #define CDMA_CTRLQ_FLAG_ON 1 #define CDMA_CTRLQ_FLAG_OFF 0 +#define CDMA_TPN_MASK 0xffffff #define CDMA_EID_DW_SIZE 4 struct cdma_tp { @@ -37,6 +38,18 @@ enum cdma_tp_trans_type { CDMA_TRANS_TYPE_MAX }; +struct cdma_ctrlq_tp_create_cfg { + u32 seid_flag; /* 0: 128bits eid, 1: 20bits eid */ + u32 seid[CDMA_EID_DW_SIZE]; + u32 scna; + u32 deid_flag; + u32 deid[CDMA_EID_DW_SIZE]; + u32 dcna; + u32 route_type : 4; /* 0-IPv4, 1-IPv6, 2-CNA */ + u32 trans_type : 4; + u32 rsv : 24; +}; + struct cdma_ctrlq_tp_ret { int ret; }; @@ -54,6 +67,9 @@ struct cdma_ctrlq_tp_delete_cfg { u32 tpn; }; +struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, + struct cdma_tp_cfg *cfg); + void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id); void cdma_destroy_ctp_imm(struct cdma_dev *cdev, uint32_t tp_id); diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 38de05508049..1d34391de601 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -21,6 +21,7 @@ enum cdma_cmd { CDMA_CMD_QUERY_DEV_INFO, CDMA_CMD_CREATE_CTX, CDMA_CMD_DELETE_CTX, + CDMA_CMD_CREATE_CTP, CDMA_CMD_DELETE_CTP, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, @@ -42,6 +43,23 @@ struct cdma_cmd_udrv_priv { __u32 out_len; }; +struct cdma_cmd_create_ctp_args { + struct { + __u32 scna; + __u32 dcna; + __u32 eid_idx; + __u32 upi; + __u64 dma_tp; + __u32 seid; + __u32 deid; + __u32 queue_id; + } in; + struct { + __u32 tpn; + __u64 handle; + } out; +}; + struct cdma_cmd_delete_ctp_args { struct { __u32 tpn; -- Gitee From 11d65d0b12c68312b2b2de2fd2f8264dc6c29697 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 11:21:09 +0800 Subject: [PATCH 016/243] ub: cdma: support the deletion of jfs commit e3525452704396bd4c8213f09f728c06f4df5302 openEuler This patch implements the deletion functionality of jfs in the CDMA driver. The implementation involves deleting the jfs corresponding to the queue during the queue release process. Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 1 + drivers/ub/cdma/cdma_common.h | 27 ++++ drivers/ub/cdma/cdma_dev.c | 6 + drivers/ub/cdma/cdma_ioctl.c | 56 ++++++- drivers/ub/cdma/cdma_jfs.c | 250 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfs.h | 145 ++++++++++++++++++ drivers/ub/cdma/cdma_mbox.h | 6 + drivers/ub/cdma/cdma_queue.c | 8 + drivers/ub/cdma/cdma_queue.h | 3 + drivers/ub/cdma/cdma_types.h | 37 +++++ include/uapi/ub/cdma/cdma_abi.h | 11 ++ 12 files changed, 549 insertions(+), 3 deletions(-) create mode 100644 drivers/ub/cdma/cdma_jfs.c create mode 100644 drivers/ub/cdma/cdma_jfs.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 10bc5ed6b71c..92cd9c3b9f58 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -2,6 +2,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ - cdma_db.o cdma_mbox.o cdma_tp.o + cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index b121ef4bc704..20835ae6429d 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -185,6 +185,7 @@ struct cdma_dev { struct cdma_table queue_table; struct cdma_table ctp_table; + struct cdma_table jfs_table; struct cdma_table jfc_table; struct mutex file_mutex; struct list_head file_list; diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 4580038b5b1b..5e2c84114e35 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -11,6 +11,33 @@ #define CDMA_DB_SIZE 64 +enum cdma_jetty_state { + CDMA_JETTY_RESET, + CDMA_JETTY_READY, + CDMA_JETTY_SUSPENDED, + CDMA_JETTY_ERROR, +}; + +struct cdma_jetty_queue { + struct cdma_buf buf; + void *kva_curr; + u32 id; + void __iomem *db_addr; + void __iomem *dwqe_addr; + u32 pi; + u32 ci; + spinlock_t lock; + u32 max_inline_size; + u32 max_sge_num; + u32 tid; + bool flush_flag; + bool is_jetty; + u32 sqe_bb_cnt; + enum cdma_jetty_state state; + u32 non_pin; + u32 ta_tmo; +}; + struct cdma_umem_param { struct cdma_dev *dev; u64 va; diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 88367e69b8c4..3806b19f8d52 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -17,6 +17,7 @@ #include #include "cdma_common.h" #include "cdma_tp.h" +#include "cdma_jfs.h" #include "cdma_jfc.h" #include "cdma_queue.h" #include "cdma_dev.h" @@ -116,6 +117,7 @@ static void cdma_init_tables(struct cdma_dev *cdev) static void cdma_destroy_tables(struct cdma_dev *cdev) { cdma_tbl_destroy(cdev, &cdev->ctp_table, "CTP"); + cdma_tbl_destroy(cdev, &cdev->jfs_table, "JFS"); cdma_tbl_destroy(cdev, &cdev->jfc_table, "JFC"); cdma_tbl_destroy(cdev, &cdev->queue_table, "QUEUE"); } @@ -185,12 +187,16 @@ static void cdma_release_table_res(struct cdma_dev *cdev) { struct cdma_queue *queue; struct cdma_jfc *jfc; + struct cdma_jfs *jfs; struct cdma_tp *tmp; int id; idr_for_each_entry(&cdev->ctp_table.idr_tbl.idr, tmp, id) cdma_destroy_ctp_imm(cdev, tmp->base.tp_id); + idr_for_each_entry(&cdev->jfs_table.idr_tbl.idr, jfs, id) + cdma_delete_jfs(cdev, jfs->id); + idr_for_each_entry(&cdev->jfc_table.idr_tbl.idr, jfc, id) cdma_delete_jfc(cdev, jfc->jfcn, NULL); diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index c8ba6d9bfd8a..28ca199eee3a 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -10,6 +10,7 @@ #include "cdma_context.h" #include "cdma_types.h" #include "cdma_tp.h" +#include "cdma_jfs.h" #include "cdma_queue.h" #include "cdma_jfc.h" #include "cdma_uobj.h" @@ -245,6 +246,56 @@ static int cdma_cmd_delete_ctp(struct cdma_ioctl_hdr *hdr, return ret; } +static int cdma_cmd_delete_jfs(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_delete_jfs_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_base_jfs *base_jfs; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != (u32)sizeof(arg)) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(&cdev->adev->dev, + "delete jfs get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.queue_id, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, + "delete jfs, get queue uobj failed, queue id = %u.\n", + arg.in.queue_id); + return -EINVAL; + } + queue = uobj->object; + + uobj = cdma_uobj_get(cfile, arg.in.handle, UOBJ_TYPE_JFS); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "get jfs uobj failed, handle = %llu.\n", + arg.in.handle); + return -EINVAL; + } + + base_jfs = uobj->object; + ret = cdma_delete_jfs(cdev, base_jfs->id); + if (ret) { + dev_err(&cdev->adev->dev, "delete jfs failed.\n"); + return ret; + } + + cdma_set_queue_res(cdev, queue, QUEUE_RES_JFS, NULL); + cdma_uobj_delete(uobj); + + return 0; +} + static int cdma_cmd_create_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { struct cdma_cmd_create_queue_args arg = { 0 }; @@ -331,8 +382,8 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c } queue = (struct cdma_queue *)uobj->object; - if (queue->jfc || queue->tp) { - dev_err(cdev->dev, "jfc/tp is still in use."); + if (queue->jfc || queue->jfs || queue->tp) { + dev_err(cdev->dev, "jfc/jfs/tp is still in use."); return -EBUSY; } @@ -479,6 +530,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, [CDMA_CMD_CREATE_CTP] = cdma_cmd_create_ctp, [CDMA_CMD_DELETE_CTP] = cdma_cmd_delete_ctp, + [CDMA_CMD_DELETE_JFS] = cdma_cmd_delete_jfs, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, [CDMA_CMD_CREATE_JFC] = cdma_cmd_create_jfc, diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c new file mode 100644 index 000000000000..62846e395ba8 --- /dev/null +++ b/drivers/ub/cdma/cdma_jfs.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt +#define dev_fmt pr_fmt + +#include +#include +#include +#include +#include +#include "cdma_cmd.h" +#include "cdma_common.h" +#include "cdma_mbox.h" +#include "cdma_context.h" +#include "cdma_jfs.h" + +static void cdma_free_sq_buf(struct cdma_dev *cdev, struct cdma_jetty_queue *sq) +{ + u32 size; + + if (sq->buf.kva) { + size = sq->buf.entry_cnt * sq->buf.entry_size; + cdma_k_free_buf(cdev, size, &sq->buf); + } else { + cdma_unpin_queue_addr(sq->buf.umem); + sq->buf.umem = NULL; + } +} + +static inline void cdma_free_jfs_id(struct cdma_dev *cdev, u32 id) +{ + spin_lock(&cdev->jfs_table.lock); + idr_remove(&cdev->jfs_table.idr_tbl.idr, id); + spin_unlock(&cdev->jfs_table.lock); +} + +static int cdma_set_jfs_state(struct cdma_dev *cdev, u32 jfs_id, + enum cdma_jetty_state state) +{ + struct cdma_jfs_ctx ctx[SZ_2] = { 0 }; + struct ubase_mbx_attr attr = { 0 }; + struct cdma_jfs_ctx *ctx_mask; + + ctx_mask = (struct cdma_jfs_ctx *)((char *)ctx + SZ_128); + memset(ctx_mask, 0xff, sizeof(*ctx_mask)); + ctx->state = state; + ctx_mask->state = 0; + + cdma_fill_mbx_attr(&attr, jfs_id, CDMA_CMD_MODIFY_JFS_CONTEXT, 0); + + return cdma_post_mailbox_ctx(cdev, (void *)ctx, sizeof(ctx), &attr); +} + +static int cdma_query_jfs_ctx(struct cdma_dev *cdev, + struct cdma_jfs_ctx *jfs_ctx, + u32 jfs_id) +{ + struct ubase_mbx_attr attr = { 0 }; + struct ubase_cmd_mailbox *mailbox; + + cdma_fill_mbx_attr(&attr, jfs_id, CDMA_CMD_QUERY_JFS_CONTEXT, 0); + mailbox = cdma_mailbox_query_ctx(cdev, &attr); + if (!mailbox) + return -ENOMEM; + memcpy((void *)jfs_ctx, mailbox->buf, sizeof(*jfs_ctx)); + + cdma_free_cmd_mailbox(cdev, mailbox); + + return 0; +} + +static int cdma_destroy_hw_jfs_ctx(struct cdma_dev *cdev, u32 jfs_id) +{ + struct ubase_mbx_attr attr = { 0 }; + int ret; + + cdma_fill_mbx_attr(&attr, jfs_id, CDMA_CMD_DESTROY_JFS_CONTEXT, 0); + ret = cdma_post_mailbox_ctx(cdev, NULL, 0, &attr); + if (ret) + dev_err(cdev->dev, + "post mailbox destroy jfs ctx failed, ret = %d.\n", ret); + + return ret; +} + +static bool cdma_wait_timeout(u32 *sum_times, u32 times, u32 ta_timeout) +{ + u32 wait_time; + + if (*sum_times > ta_timeout) + return true; + + wait_time = 1 << times; + msleep(wait_time); + *sum_times += wait_time; + + return false; +} + +static bool cdma_query_jfs_fd(struct cdma_dev *cdev, + struct cdma_jetty_queue *sq) +{ + struct cdma_jfs_ctx ctx = { 0 }; + u16 rcv_send_diff = 0; + u32 sum_times = 0; + u32 times = 0; + + while (true) { + if (cdma_query_jfs_ctx(cdev, &ctx, sq->id)) + return false; + + if (ctx.flush_cqe_done) + return true; + + if (cdma_wait_timeout(&sum_times, times, sq->ta_tmo)) { + dev_warn(cdev->dev, + "ta timeout, id = %u. PI = %u, CI = %u, next_send_ssn = %u next_rcv_ssn = %u state = %u.\n", + sq->id, ctx.pi, ctx.ci, ctx.next_send_ssn, + ctx.next_rcv_ssn, ctx.state); + break; + } + + times++; + } + + /* In the flip scenario, ctx.next_rcv_ssn - ctx.next_send_ssn value is less than 512. */ + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.flush_ssn_vld && rcv_send_diff < CDMA_RCV_SEND_MAX_DIFF) + return true; + + dev_err(cdev->dev, "query jfs flush ssn error, id = %u", sq->id); + + return false; +} + +int cdma_modify_jfs_precondition(struct cdma_dev *cdev, + struct cdma_jetty_queue *sq) +{ + struct cdma_jfs_ctx ctx = { 0 }; + u16 rcv_send_diff = 0; + u32 sum_times = 0; + u32 times = 0; + + while (true) { + if (cdma_query_jfs_ctx(cdev, &ctx, sq->id)) { + dev_err(cdev->dev, "query jfs ctx failed, id = %u.\n", + sq->id); + return -ENOMEM; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if ((ctx.pi == ctx.ci) && (rcv_send_diff < CDMA_RCV_SEND_MAX_DIFF) && + (ctx.state == CDMA_JETTY_READY)) + break; + + if ((rcv_send_diff < CDMA_RCV_SEND_MAX_DIFF) && + (ctx.state == CDMA_JETTY_ERROR)) + break; + + if (cdma_wait_timeout(&sum_times, times, sq->ta_tmo)) { + dev_warn(cdev->dev, + "ta timeout, id = %u. PI = %u, CI = %u, next_send_ssn = %u next_rcv_ssn = %u state = %u.\n", + sq->id, ctx.pi, ctx.ci, ctx.next_send_ssn, + ctx.next_rcv_ssn, ctx.state); + break; + } + times++; + } + + return 0; +} + +static bool cdma_destroy_jfs_precondition(struct cdma_dev *cdev, + struct cdma_jetty_queue *sq) +{ +#define CDMA_DESTROY_JETTY_DELAY_TIME 100U + + if ((sq->state == CDMA_JETTY_READY) || + (sq->state == CDMA_JETTY_SUSPENDED)) { + if (cdma_modify_jfs_precondition(cdev, sq)) + return false; + + if (cdma_set_jfs_state(cdev, sq->id, CDMA_JETTY_ERROR)) { + dev_err(cdev->dev, "modify jfs state to error failed, id = %u.\n", + sq->id); + return false; + } + + sq->state = CDMA_JETTY_ERROR; + dev_dbg(cdev->dev, "set jfs %u status finished.\n", sq->id); + } + + if (!cdma_query_jfs_fd(cdev, sq)) + return false; + + udelay(CDMA_DESTROY_JETTY_DELAY_TIME); + + return true; +} + +static int cdma_modify_and_destroy_jfs(struct cdma_dev *cdev, + struct cdma_jetty_queue *sq) +{ + int ret = 0; + + if (!cdma_destroy_jfs_precondition(cdev, sq)) + return -EINVAL; + + if (sq->state != CDMA_JETTY_RESET) + ret = cdma_destroy_hw_jfs_ctx(cdev, sq->id); + + return ret; +} + +int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) +{ + struct cdma_jfs *jfs; + int ret; + + if (jfs_id >= cdev->caps.jfs.start_idx + cdev->caps.jfs.max_cnt) { + dev_info(cdev->dev, + "jfs id invalid, jfs_id = %u, start_idx = %u, max_cnt = %u.\n", + jfs_id, cdev->caps.jfs.start_idx, + cdev->caps.jfs.max_cnt); + return -EINVAL; + } + + spin_lock(&cdev->jfs_table.lock); + jfs = idr_find(&cdev->jfs_table.idr_tbl.idr, jfs_id); + spin_unlock(&cdev->jfs_table.lock); + if (!jfs) { + dev_err(cdev->dev, "get jfs from table failed, id = %u.\n", jfs_id); + return -EINVAL; + } + + ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); + if (ret) + dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); + + cdma_free_sq_buf(cdev, &jfs->sq); + + cdma_free_jfs_id(cdev, jfs_id); + + pr_debug("Leave %s, jfsn: %u.\n", __func__, jfs_id); + + kfree(jfs); + + return 0; +} diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h new file mode 100644 index 000000000000..414f18647d8f --- /dev/null +++ b/drivers/ub/cdma/cdma_jfs.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_JFS_H__ +#define __CDMA_JFS_H__ + +#include "cdma_common.h" +#include "cdma_types.h" + +#define CDMA_RCV_SEND_MAX_DIFF 512U + +struct cdma_jfs { + struct cdma_base_jfs base_jfs; + struct cdma_dev *dev; + struct cdma_jetty_queue sq; + struct cdma_jfs_cfg cfg; + u64 jfs_addr; + u32 id; + u32 queue_id; + bool is_kernel; + refcount_t ae_ref_cnt; + struct completion ae_comp; +}; + +struct cdma_jfs_ctx { + /* DW0 */ + u32 ta_timeout : 2; + u32 rnr_retry_num : 3; + u32 type : 3; + u32 sqe_bb_shift : 4; + u32 sl : 4; + u32 state : 3; + u32 jfs_mode : 1; + u32 sqe_token_id_l : 12; + /* DW1 */ + u32 sqe_token_id_h : 8; + u32 err_mode : 1; + u32 rsv0 : 1; + u32 cmp_odr : 1; + u32 rsv1 : 1; + u32 sqe_base_addr_l : 20; + /* DW2 */ + u32 sqe_base_addr_h; + /* DW3 */ + u32 rsv2; + /* DW4 */ + u32 tx_jfcn : 20; + u32 jfrn_l : 12; + /* DW5 */ + u32 jfrn_h : 8; + u32 rsv3 : 4; + u32 rx_jfcn : 20; + /* DW6 */ + u32 seid_idx : 10; + u32 rsv4 : 22; + /* DW7 */ + u32 user_data_l; + /* DW8 */ + u32 user_data_h; + /* DW9 */ + u32 sqe_pos : 1; + u32 sqe_pld_pos : 1; + u32 sqe_pld_tokenid : 20; + u32 rsv5 : 10; + /* DW10 */ + u32 tpn : 24; + u32 rsv6 : 8; + /* DW11 */ + u32 rmt_eid : 20; + u32 rsv7 : 12; + /* DW12 */ + u32 rmt_tokenid : 20; + u64 rsv9 : 12; + /* DW13-DW15 */ + u32 rsv12[3]; + /* DW16 */ + u32 next_send_ssn : 16; + u32 src_order_wqe : 16; + /* DW17 */ + u32 src_order_ssn : 16; + u32 src_order_sgme_cnt : 16; + /* DW18 */ + u32 src_order_sgme_send_cnt : 16; + u32 ci : 16; + /* DW19 */ + u32 rsv13; + /* DW20 */ + u32 pi : 16; + u32 sq_db_doing : 1; + u32 ost_rce_credit : 15; + /* DW21 */ + u32 sq_db_retrying : 1; + u32 wmtp_rsv0 : 31; + /* DW22 */ + u32 wait_ack_timeout : 1; + u32 wait_rnr_timeout : 1; + u32 cqe_ie : 1; + u32 cqe_sz : 1; + u32 wmtp_rsv1 : 28; + /* DW23 */ + u32 wml_rsv1; + /* DW24 */ + u32 next_rcv_ssn : 16; + u32 next_cpl_bb_idx : 16; + /* DW25 */ + u32 next_cpl_sgmt_num : 20; + u32 we_rsv0 : 12; + /* DW26 */ + u32 next_cpl_bb_num : 4; + u32 next_cpl_cqe_en : 1; + u32 next_cpl_info_vld : 1; + u32 rpting_cqe : 1; + u32 not_rpt_cqe : 1; + u32 flush_ssn : 16; + u32 flush_ssn_vld : 1; + u32 flush_vld : 1; + u32 flush_cqe_done : 1; + u32 we_rsv1 : 5; + /* DW27 */ + u32 rcved_cont_ssn_num : 20; + u32 we_rsv2 : 12; + /* DW28 */ + u32 sq_timer; + /* DW29 */ + u32 rnr_cnt : 3; + u32 abt_ssn : 16; + u32 abt_ssn_vld : 1; + u32 taack_timeout_flag : 1; + u32 we_rsv3 : 9; + u32 err_type_l : 2; + /* DW30 */ + u32 err_type_h : 7; + u32 sq_flush_ssn : 16; + u32 we_rsv4 : 9; + /* DW31 */ + u32 avail_sgmt_ost : 10; + u32 read_op_cnt : 10; + u32 we_rsv5 : 12; + /* DW32 - DW63 */ + u32 taack_nack_bm[32]; +}; + +int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id); + +#endif diff --git a/drivers/ub/cdma/cdma_mbox.h b/drivers/ub/cdma/cdma_mbox.h index 584508b592aa..e8a00f5c9b97 100644 --- a/drivers/ub/cdma/cdma_mbox.h +++ b/drivers/ub/cdma/cdma_mbox.h @@ -8,6 +8,12 @@ #include enum { + /* JFS CMDS */ + CDMA_CMD_CREATE_JFS_CONTEXT = 0x04, + CDMA_CMD_MODIFY_JFS_CONTEXT = 0x05, + CDMA_CMD_QUERY_JFS_CONTEXT = 0x06, + CDMA_CMD_DESTROY_JFS_CONTEXT = 0x07, + /* JFC CMDS */ CDMA_CMD_CREATE_JFC_CONTEXT = 0x24, CDMA_CMD_QUERY_JFC_CONTEXT = 0x26, diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index 28fdedc258ad..20a11bfa5194 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -6,6 +6,7 @@ #include "cdma_common.h" #include "cdma_context.h" #include "cdma_jfc.h" +#include "cdma_jfs.h" #include "cdma_tp.h" #include "cdma_queue.h" #include "cdma.h" @@ -78,6 +79,8 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, static void cdma_delete_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue) { + cdma_delete_jfs(cdev, queue->jfs->id); + queue->jfs = NULL; cdma_delete_ctp(cdev, queue->tp->tp_id); queue->tp = NULL; cdma_delete_jfc(cdev, queue->jfc->id, NULL); @@ -190,6 +193,11 @@ void cdma_set_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue, case QUEUE_RES_TP: queue->tp = res; break; + case QUEUE_RES_JFS: + queue->jfs = res; + if (queue->jfs) + queue->jfs_id = queue->jfs->id; + break; case QUEUE_RES_JFC: queue->jfc = res; if (queue->jfc) diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index af1c54b771ba..5b434ae66bb9 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -10,17 +10,20 @@ struct queue_cfg; enum cdma_queue_res_type { QUEUE_RES_TP, + QUEUE_RES_JFS, QUEUE_RES_JFC }; struct cdma_queue { struct cdma_base_jfc *jfc; + struct cdma_base_jfs *jfs; struct cdma_base_tp *tp; struct cdma_context *ctx; u32 id; struct queue_cfg cfg; bool is_kernel; struct list_head list; + u32 jfs_id; u32 jfc_id; }; diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 8458926b1605..689db795d0c9 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -23,6 +23,34 @@ struct cdma_udrv_priv { u32 out_len; }; +union cdma_jfs_flag { + struct { + u32 error_suspend : 1; + u32 outorder_comp : 1; + u32 reserved : 30; + } bs; + u32 value; +}; + +struct cdma_jfs_cfg { + u32 depth; + union cdma_jfs_flag flag; + u32 eid_index; + u8 priority; + u8 max_sge; + u8 max_rsge; + u8 rnr_retry; + u8 err_timeout; + u32 jfc_id; + u32 sqe_pos; + u32 rmt_eid; + u32 tpn; + u32 pld_pos; + u32 pld_token_id; + u32 queue_id; + u32 trans_mode; +}; + struct cdma_tp_cfg { u32 scna; u32 dcna; @@ -43,6 +71,15 @@ struct cdma_udata { struct cdma_udrv_priv *udrv_data; }; +struct cdma_base_jfs { + struct cdma_dev *dev; + struct cdma_context *ctx; + struct cdma_jfs_cfg cfg; + u64 usr_jfs; + u32 id; + atomic_t use_cnt; +}; + struct cdma_jfc_cfg { u32 depth; u32 ceqn; diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 1d34391de601..77d916e6d737 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -23,6 +23,7 @@ enum cdma_cmd { CDMA_CMD_DELETE_CTX, CDMA_CMD_CREATE_CTP, CDMA_CMD_DELETE_CTP, + CDMA_CMD_DELETE_JFS, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, CDMA_CMD_CREATE_JFC, @@ -43,6 +44,16 @@ struct cdma_cmd_udrv_priv { __u32 out_len; }; +struct cdma_cmd_delete_jfs_args { + struct { + __u32 jfs_id; + __u64 handle; + __u32 queue_id; + } in; + struct { + } out; +}; + struct cdma_cmd_create_ctp_args { struct { __u32 scna; -- Gitee From 66389d10eb29ebb7d089a841325f7959980de76c Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 11:36:13 +0800 Subject: [PATCH 017/243] ub: cdma: support the creation of jfs commit 23359f7079fc7b585d1d5aa770680c3d9e347cfd openEuler This patch implements the creation functionality of jfs in the CDMA driver. The implementation involves creating the jfs corresponding to the queue during the queue creation process. Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_chardev.c | 50 ++++++ drivers/ub/cdma/cdma_common.h | 26 +++ drivers/ub/cdma/cdma_dev.c | 3 + drivers/ub/cdma/cdma_ioctl.c | 97 +++++++++++ drivers/ub/cdma/cdma_jfs.c | 286 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfs.h | 13 ++ drivers/ub/cdma/cdma_queue.c | 36 +++- include/uapi/ub/cdma/cdma_abi.h | 53 ++++++ 8 files changed, 562 insertions(+), 2 deletions(-) diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index b3a8b75b019e..124b5701b253 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -9,6 +9,7 @@ #include "cdma_ioctl.h" #include "cdma_context.h" #include "cdma_chardev.h" +#include "cdma_jfs.h" #include "cdma_types.h" #include "cdma_uobj.h" #include "cdma.h" @@ -32,6 +33,11 @@ static void cdma_num_free(struct cdma_dev *cdev) spin_unlock(&cdma_num_mg.lock); } +static inline u64 cdma_get_mmap_idx(struct vm_area_struct *vma) +{ + return (vma->vm_pgoff >> MAP_INDEX_SHIFT) & MAP_INDEX_MASK; +} + static inline int cdma_get_mmap_cmd(struct vm_area_struct *vma) { return (vma->vm_pgoff & MAP_COMMAND_MASK); @@ -74,11 +80,39 @@ static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -ENOIOCTLCMD; } +static int cdma_remap_check_jfs_id(struct cdma_file *cfile, u32 jfs_id) +{ + struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfs *jfs; + int ret = -EINVAL; + + spin_lock(&cdev->jfs_table.lock); + jfs = idr_find(&cdev->jfs_table.idr_tbl.idr, jfs_id); + if (!jfs) { + spin_unlock(&cdev->jfs_table.lock); + dev_err(cdev->dev, + "check failed, jfs_id = %u not exist.\n", jfs_id); + return ret; + } + + if (cfile->uctx != jfs->base_jfs.ctx) { + dev_err(cdev->dev, + "check failed, jfs_id = %u, uctx invalid\n", jfs_id); + spin_unlock(&cdev->jfs_table.lock); + return -EINVAL; + } + spin_unlock(&cdev->jfs_table.lock); + + return 0; +} + static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct *vma) { #define JFC_DB_UNMAP_BOUND 1 struct cdma_dev *cdev = cfile->cdev; resource_size_t db_addr; + u64 address; + u32 jfs_id; u32 cmd; db_addr = cdev->db_base; @@ -96,6 +130,22 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * return -EAGAIN; } break; + case CDMA_MMAP_JETTY_DSQE: + jfs_id = cdma_get_mmap_idx(vma); + if (cdma_remap_check_jfs_id(cfile, jfs_id)) { + dev_err(cdev->dev, + "mmap failed, invalid jfs_id = %u\n", jfs_id); + return -EINVAL; + } + + address = (uint64_t)db_addr + CDMA_JETTY_DSQE_OFFSET + jfs_id * PAGE_SIZE; + + if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) { + dev_err(cdev->dev, "remap jetty page failed.\n"); + return -EAGAIN; + } + break; default: dev_err(cdev->dev, "mmap failed, cmd(%u) is not supported.\n", cmd); diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 5e2c84114e35..b5a149658847 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -7,10 +7,30 @@ #include #include "cdma.h" +#define JETTY_DSQE_OFFSET 0x1000 +#define CDMA_USER_DATA_H_OFFSET 32U + +#define SQE_TOKEN_ID_L_MASK GENMASK(11, 0) +#define SQE_TOKEN_ID_H_OFFSET 12U +#define SQE_TOKEN_ID_H_MASK GENMASK(7, 0) +#define SQE_VA_L_OFFSET 12U +#define SQE_VA_L_VALID_BIT GENMASK(19, 0) +#define SQE_VA_H_OFFSET 32U +#define SQE_VA_H_VALID_BIT GENMASK(31, 0) +#define WQE_BB_SIZE_SHIFT 6 +#define AVAIL_SGMT_OST_INIT 512 + #define CDMA_RANGE_INDEX_ENTRY_CNT 0x100000 #define CDMA_DB_SIZE 64 +#define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) + +enum cdma_jfsc_mode { + CDMA_JFS_MODE, + CDMA_JETTY_MODE, +}; + enum cdma_jetty_state { CDMA_JETTY_RESET, CDMA_JETTY_READY, @@ -18,6 +38,12 @@ enum cdma_jetty_state { CDMA_JETTY_ERROR, }; +enum cdma_jetty_type { + CDMA_JETTY_ROL = 2, + CDMA_JETTY_ROI, + CDMA_JETTY_TYPE_RESERVED, +}; + struct cdma_jetty_queue { struct cdma_buf buf; void *kva_curr; diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 3806b19f8d52..75c87176e868 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -105,12 +105,15 @@ static void cdma_tbl_destroy(struct cdma_dev *cdev, struct cdma_table *table, static void cdma_init_tables(struct cdma_dev *cdev) { struct cdma_res *queue = &cdev->caps.queue; + struct cdma_res *jfs = &cdev->caps.jfs; struct cdma_res *jfc = &cdev->caps.jfc; cdma_tbl_init(&cdev->queue_table, queue->start_idx + queue->max_cnt - 1, queue->start_idx); cdma_tbl_init(&cdev->jfc_table, jfc->start_idx + jfc->max_cnt - 1, jfc->start_idx); + cdma_tbl_init(&cdev->jfs_table, jfs->max_cnt + jfs->start_idx - 1, + jfs->start_idx); cdma_tbl_init(&cdev->ctp_table, CDMA_RANGE_INDEX_ENTRY_CNT, 0); } diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 28ca199eee3a..5f63cad58088 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -246,6 +246,102 @@ static int cdma_cmd_delete_ctp(struct cdma_ioctl_hdr *hdr, return ret; } +static void cdma_config_jfs(struct cdma_jfs_cfg *cfg, + const struct cdma_cmd_create_jfs_args *arg) +{ + cfg->depth = arg->in.depth; + cfg->flag.value = arg->in.flag; + cfg->eid_index = arg->in.eid_idx; + cfg->max_sge = arg->in.max_sge; + cfg->max_rsge = arg->in.max_rsge; + cfg->rnr_retry = arg->in.rnr_retry; + cfg->err_timeout = arg->in.err_timeout; + cfg->priority = arg->in.priority; + cfg->jfc_id = arg->in.jfc_id; + cfg->rmt_eid = arg->in.rmt_eid; + cfg->pld_token_id = arg->in.pld_token_id; + cfg->tpn = arg->in.tpn; + cfg->queue_id = arg->in.queue_id; + cfg->trans_mode = arg->in.trans_mode; +} + +static int cdma_cmd_create_jfs(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_create_jfs_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfs_cfg cfg = { 0 }; + struct cdma_udata udata = { 0 }; + struct cdma_base_jfs *jfs; + struct cdma_queue *queue; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != (u32)sizeof(arg) || !cfile->uctx) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(&cdev->adev->dev, + "create jfs get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.queue_id, UOBJ_TYPE_QUEUE); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, + "create jfs, get queue uobj failed, queue id = %u.\n", + arg.in.queue_id); + return -EINVAL; + } + queue = (struct cdma_queue *)uobj->object; + + uobj = cdma_uobj_create(cfile, UOBJ_TYPE_JFS); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "create jfs uobj failed.\n"); + return -ENOMEM; + } + + udata.uctx = cfile->uctx; + udata.udrv_data = (struct cdma_udrv_priv *)&arg.udata; + arg.in.queue_id = queue->id; + cdma_config_jfs(&cfg, &arg); + + jfs = cdma_create_jfs(cdev, &cfg, &udata); + if (!jfs) { + dev_err(&cdev->adev->dev, "create jfs failed.\n"); + ret = -EFAULT; + goto err_create_jfs; + } + + uobj->object = jfs; + + arg.out.id = jfs->id; + arg.out.handle = uobj->id; + arg.out.depth = jfs->cfg.depth; + arg.out.max_sge = jfs->cfg.max_sge; + arg.out.max_rsge = jfs->cfg.max_rsge; + + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret) { + ret = -EFAULT; + dev_err(&cdev->adev->dev, + "create jfs copy to user data failed, ret = %d.\n", + ret); + goto err_copy_to_usr; + } + + cdma_set_queue_res(cdev, queue, QUEUE_RES_JFS, jfs); + + return 0; +err_copy_to_usr: + cdma_delete_jfs(cdev, jfs->id); +err_create_jfs: + cdma_uobj_delete(uobj); + return ret; +} + static int cdma_cmd_delete_jfs(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { @@ -530,6 +626,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_DELETE_CTX] = cdma_delete_ucontext, [CDMA_CMD_CREATE_CTP] = cdma_cmd_create_ctp, [CDMA_CMD_DELETE_CTP] = cdma_cmd_delete_ctp, + [CDMA_CMD_CREATE_JFS] = cdma_cmd_create_jfs, [CDMA_CMD_DELETE_JFS] = cdma_cmd_delete_jfs, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 62846e395ba8..599068837a4b 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -15,6 +15,204 @@ #include "cdma_context.h" #include "cdma_jfs.h" +static int cdma_get_user_jfs_cmd(struct cdma_dev *cdev, struct cdma_jfs *jfs, + struct cdma_udata *udata, + struct cdma_create_jfs_ucmd *ucmd) +{ + struct cdma_context *ctx; + int ret; + + if (!udata) { + jfs->jfs_addr = (uintptr_t)&jfs->sq; + jfs->is_kernel = true; + return 0; + } + + if (!udata->udrv_data || !udata->udrv_data->in_addr || + udata->udrv_data->in_len != (u32)sizeof(*ucmd)) { + dev_err(cdev->dev, "invalid parameter.\n"); + return -EINVAL; + } + + ret = (int)copy_from_user(ucmd, (void *)udata->udrv_data->in_addr, + (u32)sizeof(*ucmd)); + if (ret) { + dev_err(cdev->dev, + "copy jfs udata failed, ret = %d.\n", ret); + return -EFAULT; + } + + if (!ucmd->jetty_addr || !ucmd->buf_len || !ucmd->buf_addr) { + dev_err(cdev->dev, "user cmd param is invalid.\n"); + return -EINVAL; + } + + ctx = udata->uctx; + jfs->base_jfs.ctx = ctx; + jfs->sq.tid = ctx->tid; + jfs->jfs_addr = ucmd->jetty_addr; + jfs->sq.id = ucmd->jfs_id; + jfs->queue_id = ucmd->queue_id; + jfs->sq.non_pin = ucmd->non_pin; + + return 0; +} + +static int cdma_alloc_jfs_id(struct cdma_dev *cdev, struct cdma_jfs *jfs) +{ + struct cdma_idr *idr_tbl = &cdev->jfs_table.idr_tbl; + u32 max = idr_tbl->max; + u32 min = idr_tbl->min; + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&cdev->jfs_table.lock); + id = idr_alloc(&idr_tbl->idr, jfs, idr_tbl->next, max, GFP_NOWAIT); + if (id < 0) { + id = idr_alloc(&idr_tbl->idr, jfs, min, max, GFP_NOWAIT); + if (id < 0) + dev_err(cdev->dev, "alloc cdma jfs id failed.\n"); + } + + idr_tbl->next = (id >= 0 && id + 1 <= max) ? id + 1 : min; + spin_unlock(&cdev->jfs_table.lock); + idr_preload_end(); + + return id; +} + +static inline u32 cdma_sq_cal_wqebb_num(u32 sqe_ctl_len, u32 sge_num) +{ + return (sqe_ctl_len + (sge_num - 1) * CDMA_JFS_SGE_SIZE) / CDMA_JFS_WQEBB_SIZE + 1; +} + +static inline void cdma_set_kernel_db(struct cdma_dev *cdev, + struct cdma_jetty_queue *queue) +{ + queue->dwqe_addr = + cdev->k_db_base + JETTY_DSQE_OFFSET + PAGE_SIZE * queue->id; + queue->db_addr = queue->dwqe_addr + CDMA_DOORBELL_OFFSET; +} + +static int cdma_get_sq_buf(struct cdma_dev *cdev, struct cdma_jetty_queue *sq, + struct cdma_jfs_cfg *jfs_cfg, + struct cdma_create_jfs_ucmd *ucmd, bool is_kernel) +{ + u32 wqe_bb_depth; + u32 sqe_bb_cnt; + int ret = 0; + u32 size; + + if (!is_kernel) { + ret = cdma_pin_queue_addr(cdev, ucmd->buf_addr, + ucmd->buf_len, &sq->buf); + if (ret) { + dev_err(cdev->dev, + "pin jfs queue addr failed, ret = %d.\n", + ret); + return ret; + } + + sq->buf.entry_cnt = ucmd->buf_len >> WQE_BB_SIZE_SHIFT; + sq->sqe_bb_cnt = ucmd->sqe_bb_cnt; + if (sq->sqe_bb_cnt > MAX_WQEBB_NUM) + sq->sqe_bb_cnt = MAX_WQEBB_NUM; + } else { + spin_lock_init(&sq->lock); + sq->tid = cdev->tid; + sq->max_sge_num = jfs_cfg->max_sge; + sqe_bb_cnt = + cdma_sq_cal_wqebb_num(SQE_WRITE_NOTIFY_CTL_LEN, + jfs_cfg->max_sge); + if (sqe_bb_cnt > MAX_WQEBB_NUM) + sqe_bb_cnt = MAX_WQEBB_NUM; + sq->sqe_bb_cnt = sqe_bb_cnt; + + wqe_bb_depth = roundup_pow_of_two(sqe_bb_cnt * jfs_cfg->depth); + sq->buf.entry_size = CDMA_JFS_WQEBB_SIZE; + size = ALIGN(wqe_bb_depth * sq->buf.entry_size, CDMA_HW_PAGE_SIZE); + sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; + + ret = cdma_k_alloc_buf(cdev, size, &sq->buf); + if (ret) { + dev_err(cdev->dev, + "alloc jfs (%u) sq buf failed, size = %u.\n", + sq->id, size); + return ret; + } + + cdma_set_kernel_db(cdev, sq); + sq->kva_curr = sq->buf.kva; + } + + return ret; +} + +static void cdma_init_jfsc(struct cdma_dev *cdev, struct cdma_jfs_cfg *cfg, + struct cdma_jfs *jfs, void *mb_buf) +{ + struct cdma_jfs_ctx *ctx = mb_buf; + + ctx->state = CDMA_JETTY_READY; + ctx->sl = cdev->sl[cfg->priority % cdev->sl_num]; + ctx->jfs_mode = CDMA_JFS_MODE; + ctx->type = (cfg->trans_mode == CDMA_JETTY_ROL) ? CDMA_JETTY_ROL : CDMA_JETTY_ROI; + ctx->sqe_base_addr_l = (jfs->sq.buf.addr >> SQE_VA_L_OFFSET) & + (u32)SQE_VA_L_VALID_BIT; + ctx->sqe_base_addr_h = (jfs->sq.buf.addr >> SQE_VA_H_OFFSET) & + (u32)SQE_VA_H_VALID_BIT; + ctx->sqe_token_id_l = jfs->sq.tid & (u32)SQE_TOKEN_ID_L_MASK; + ctx->sqe_token_id_h = (jfs->sq.tid >> SQE_TOKEN_ID_H_OFFSET) & + (u32)SQE_TOKEN_ID_H_MASK; + ctx->sqe_bb_shift = ilog2(roundup_pow_of_two(jfs->sq.buf.entry_cnt)); + ctx->tx_jfcn = cfg->jfc_id; + ctx->ta_timeout = cfg->err_timeout; + ctx->rnr_retry_num = cfg->rnr_retry; + ctx->user_data_l = jfs->jfs_addr; + ctx->user_data_h = jfs->jfs_addr >> CDMA_USER_DATA_H_OFFSET; + ctx->seid_idx = cfg->eid_index; + ctx->err_mode = cfg->flag.bs.error_suspend; + ctx->cmp_odr = cfg->flag.bs.outorder_comp; + ctx->avail_sgmt_ost = AVAIL_SGMT_OST_INIT; + ctx->sqe_pld_tokenid = jfs->sq.tid & (u32)SQE_PLD_TOKEN_ID_MASK; + ctx->next_send_ssn = get_random_u16(); + ctx->next_rcv_ssn = ctx->next_send_ssn; + + ctx->sqe_pos = cfg->sqe_pos; + ctx->sqe_pld_pos = cfg->pld_pos; + ctx->rmt_eid = cfg->rmt_eid; + ctx->rmt_tokenid = cfg->pld_token_id; + ctx->tpn = cfg->tpn; +} + +static inline void cdma_reset_jfs_queue(struct cdma_jetty_queue *sq) +{ + sq->kva_curr = sq->buf.kva; + sq->pi = 0; + sq->ci = 0; + sq->flush_flag = false; +} + +static int cdma_create_hw_jfs_ctx(struct cdma_dev *cdev, struct cdma_jfs *jfs, + struct cdma_jfs_cfg *cfg) +{ + struct ubase_mbx_attr attr = { 0 }; + struct cdma_jfs_ctx ctx = { 0 }; + int ret; + + cdma_init_jfsc(cdev, cfg, jfs, &ctx); + cdma_fill_mbx_attr(&attr, jfs->sq.id, CDMA_CMD_CREATE_JFS_CONTEXT, 0); + ret = cdma_post_mailbox_ctx(cdev, &ctx, sizeof(ctx), &attr); + if (ret) { + dev_err(cdev->dev, "upgrade jfs ctx failed, ret = %d.\n", ret); + return ret; + } + + cdma_reset_jfs_queue(&jfs->sq); + + return 0; +} + static void cdma_free_sq_buf(struct cdma_dev *cdev, struct cdma_jetty_queue *sq) { u32 size; @@ -28,6 +226,23 @@ static void cdma_free_sq_buf(struct cdma_dev *cdev, struct cdma_jetty_queue *sq) } } +static void cdma_set_query_flush_time(struct cdma_jetty_queue *sq, + u8 err_timeout) +{ + static u32 time[] = { + CDMA_TA_TIMEOUT_128MS, + CDMA_TA_TIMEOUT_1000MS, + CDMA_TA_TIMEOUT_8000MS, + CDMA_TA_TIMEOUT_64000MS, + }; + static u8 time_index_max = ARRAY_SIZE(time) - 1; + + if (err_timeout > time_index_max) + err_timeout = time_index_max; + + sq->ta_tmo = time[err_timeout]; +} + static inline void cdma_free_jfs_id(struct cdma_dev *cdev, u32 id) { spin_lock(&cdev->jfs_table.lock); @@ -35,6 +250,77 @@ static inline void cdma_free_jfs_id(struct cdma_dev *cdev, u32 id) spin_unlock(&cdev->jfs_table.lock); } +static int cdma_verify_jfs_cfg(struct cdma_dev *cdev, struct cdma_jfs_cfg *cfg) +{ + if (!cfg->depth || cfg->depth > cdev->caps.jfs.depth) { + dev_err(cdev->dev, + "jfs param is invalid, depth = %u, max_depth = %u.\n", + cfg->depth, cdev->caps.jfs.depth); + return -EINVAL; + } + + return 0; +} + +struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, + struct cdma_jfs_cfg *cfg, + struct cdma_udata *udata) +{ + struct cdma_create_jfs_ucmd ucmd = { 0 }; + struct cdma_jfs *jfs; + int ret; + + if (cdma_verify_jfs_cfg(cdev, cfg)) + return NULL; + + jfs = kzalloc(sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return NULL; + + ret = cdma_get_user_jfs_cmd(cdev, jfs, udata, &ucmd); + if (ret) + goto err_alloc_jfsn; + + ret = cdma_alloc_jfs_id(cdev, jfs); + if (ret < 0) + goto err_alloc_jfsn; + + jfs->id = ret; + jfs->sq.id = ret; + jfs->base_jfs.id = jfs->sq.id; + jfs->base_jfs.cfg = *cfg; + jfs->dev = cdev; + jfs->queue_id = cfg->queue_id; + + ret = cdma_get_sq_buf(cdev, &jfs->sq, cfg, &ucmd, jfs->is_kernel); + if (ret) + goto err_get_jfs_buf; + + ret = cdma_create_hw_jfs_ctx(cdev, jfs, cfg); + if (ret) + goto err_create_hw_jfsc; + + cdma_set_query_flush_time(&jfs->sq, cfg->err_timeout); + + jfs->sq.state = CDMA_JETTY_READY; + jfs->base_jfs.dev = cdev; + + dev_dbg(cdev->dev, + "create jfs id = %u, queue id = %u, depth = %u, priority = %u, jfc id = %u.\n", + jfs->id, jfs->queue_id, cfg->depth, cfg->priority, cfg->jfc_id); + + return &jfs->base_jfs; + +err_create_hw_jfsc: + cdma_free_sq_buf(cdev, &jfs->sq); +err_get_jfs_buf: + cdma_free_jfs_id(cdev, jfs->sq.id); +err_alloc_jfsn: + kfree(jfs); + + return NULL; +} + static int cdma_set_jfs_state(struct cdma_dev *cdev, u32 jfs_id, enum cdma_jetty_state state) { diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 414f18647d8f..7625ace4b5c7 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -7,6 +7,16 @@ #include "cdma_common.h" #include "cdma_types.h" +#define MAX_WQEBB_NUM 4 +#define CDMA_JFS_WQEBB_SIZE 64 +#define CDMA_JFS_SGE_SIZE 16 +#define SQE_WRITE_NOTIFY_CTL_LEN 80 + +#define CDMA_TA_TIMEOUT_128MS 128 +#define CDMA_TA_TIMEOUT_1000MS 1000 +#define CDMA_TA_TIMEOUT_8000MS 8000 +#define CDMA_TA_TIMEOUT_64000MS 64000 + #define CDMA_RCV_SEND_MAX_DIFF 512U struct cdma_jfs { @@ -140,6 +150,9 @@ struct cdma_jfs_ctx { u32 taack_nack_bm[32]; }; +struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, + struct cdma_jfs_cfg *cfg, + struct cdma_udata *udata); int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id); #endif diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index 20a11bfa5194..9b03baef162c 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -23,6 +23,24 @@ struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id) return queue; } +static void cdma_k_assemble_jfs_cfg(struct cdma_jfs_cfg *jfs_cfg, + struct cdma_dev *cdev, + u32 eid_index, + struct queue_cfg *cfg, + struct cdma_queue *queue) +{ + jfs_cfg->eid_index = eid_index; + jfs_cfg->max_rsge = cdev->base.attr.dev_cap.max_jfs_rsge; + jfs_cfg->max_sge = cdev->base.attr.dev_cap.max_jfs_sge; + jfs_cfg->depth = cfg->queue_depth; + jfs_cfg->err_timeout = CDMA_TYPICAL_ERR_TIMEOUT; + jfs_cfg->priority = cfg->priority; + jfs_cfg->rnr_retry = CDMA_TYPICAL_RNR_RETRY; + jfs_cfg->rmt_eid = cfg->rmt_eid.dw0; + jfs_cfg->queue_id = queue->id; + jfs_cfg->trans_mode = cfg->trans_mode; +} + static void cdma_k_assemble_jfc_cfg(struct cdma_jfc_cfg *jfc_cfg, struct queue_cfg *cfg, struct cdma_queue *queue) @@ -44,9 +62,11 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, struct cdma_queue *queue, u32 eid_index) { struct cdma_jfc_cfg jfc_cfg = { 0 }; + struct cdma_jfs_cfg jfs_cfg = { 0 }; struct cdma_tp_cfg tp_cfg = { 0 }; int ret; + cdma_k_assemble_jfs_cfg(&jfs_cfg, cdev, eid_index, cfg, queue); cdma_k_assemble_jfc_cfg(&jfc_cfg, cfg, queue); cdma_k_assemble_tp_cfg(&tp_cfg, cdev, cfg); @@ -63,13 +83,25 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, goto delete_jfc; } + jfs_cfg.tpn = queue->tp->tpn; + jfs_cfg.jfc_id = queue->jfc->id; + queue->jfs = cdma_create_jfs(cdev, &jfs_cfg, NULL); + if (!queue->jfs) { + dev_err(cdev->dev, "create jfs failed.\n"); + ret = -EFAULT; + goto delete_tp; + } + + queue->jfs_id = queue->jfs->id; queue->jfc_id = queue->jfc->id; - dev_dbg(cdev->dev, "set queue %u jfc id: %u.\n", - queue->id, queue->jfc_id); + dev_dbg(cdev->dev, "set queue %u jfs id: %u, jfc id: %u.\n", + queue->id, queue->jfs_id, queue->jfc_id); return 0; +delete_tp: + cdma_delete_ctp(cdev, queue->tp->tp_id); delete_jfc: cdma_delete_jfc(cdev, queue->jfc->id, NULL); diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 77d916e6d737..59367df04865 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -10,7 +10,15 @@ #define CDMA_IOC_MAGIC 'C' #define CDMA_SYNC _IOWR(CDMA_IOC_MAGIC, 0, struct cdma_ioctl_hdr) +#define CDMA_DOORBELL_OFFSET 0x80 + #define MAP_COMMAND_MASK 0xff +#define MAP_INDEX_MASK 0xffffff +#define MAP_INDEX_SHIFT 8 + +/* cdma queue cfg deault value */ +#define CDMA_TYPICAL_RNR_RETRY 7 +#define CDMA_TYPICAL_ERR_TIMEOUT 2 /* 0:128ms 1:1s 2:8s 3:64s */ enum db_mmap_type { CDMA_MMAP_JFC_PAGE, @@ -23,6 +31,7 @@ enum cdma_cmd { CDMA_CMD_DELETE_CTX, CDMA_CMD_CREATE_CTP, CDMA_CMD_DELETE_CTP, + CDMA_CMD_CREATE_JFS, CDMA_CMD_DELETE_JFS, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, @@ -37,6 +46,21 @@ struct cdma_ioctl_hdr { __u64 args_addr; }; +struct cdma_create_jfs_ucmd { + __u64 buf_addr; + __u32 buf_len; + __u64 db_addr; + __u64 idx_addr; + __u32 idx_len; + __u64 jetty_addr; + __u32 sqe_bb_cnt; + __u32 jetty_type; + __u32 non_pin; + __u32 jfs_id; + __u32 queue_id; + __u32 tid; +}; + struct cdma_cmd_udrv_priv { __u64 in_addr; __u32 in_len; @@ -44,6 +68,35 @@ struct cdma_cmd_udrv_priv { __u32 out_len; }; +struct cdma_cmd_create_jfs_args { + struct { + __u32 depth; + __u32 flag; + __u32 eid_idx; + __u8 priority; + __u8 max_sge; + __u8 max_rsge; + __u8 retry_cnt; + __u8 rnr_retry; + __u8 err_timeout; + __u32 jfc_id; + __u32 queue_id; + __u32 rmt_eid; + __u32 pld_token_id; + __u32 tpn; + __u64 dma_jfs; /* dma jfs pointer */ + __u32 trans_mode; + } in; + struct { + __u32 id; + __u32 depth; + __u8 max_sge; + __u8 max_rsge; + __u64 handle; + } out; + struct cdma_cmd_udrv_priv udata; +}; + struct cdma_cmd_delete_jfs_args { struct { __u32 jfs_id; -- Gitee From 778334d30f4a5ed2a0806995cf8af42986c19500 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 11:47:02 +0800 Subject: [PATCH 018/243] ub: cdma: support reporting asynchronous events commit 35203448b9d150f435ec8bfa8072b5dc748127a0 openEuler This patch implements the handling and reporting of asynchronous events in the CDMA driver. The implementation includes writing the corresponding asynchronous events to the device's asynchronous event queue when exceptions occur in jfs or jfc. Signed-off-by: Zhipeng Lu Signed-off-by: Jingjing Ku Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 19 +- drivers/ub/cdma/cdma_context.h | 1 + drivers/ub/cdma/cdma_eq.c | 194 ++++++++++++++ drivers/ub/cdma/cdma_eq.h | 16 ++ drivers/ub/cdma/cdma_event.c | 431 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_event.h | 58 +++++ drivers/ub/cdma/cdma_ioctl.c | 25 +- drivers/ub/cdma/cdma_jfc.c | 29 +++ drivers/ub/cdma/cdma_jfs.c | 26 +- drivers/ub/cdma/cdma_main.c | 22 ++ drivers/ub/cdma/cdma_types.h | 23 +- include/uapi/ub/cdma/cdma_abi.h | 15 ++ 13 files changed, 855 insertions(+), 6 deletions(-) create mode 100644 drivers/ub/cdma/cdma_eq.c create mode 100644 drivers/ub/cdma/cdma_eq.h create mode 100644 drivers/ub/cdma/cdma_event.c create mode 100644 drivers/ub/cdma/cdma_event.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 92cd9c3b9f58..58a355df4c33 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -2,6 +2,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ - cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o + cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index 20835ae6429d..995f28def668 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -8,7 +8,7 @@ #include #include #include - +#include #include extern u32 jfc_arm_mode; @@ -187,9 +187,26 @@ struct cdma_dev { struct cdma_table ctp_table; struct cdma_table jfs_table; struct cdma_table jfc_table; + struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; struct mutex file_mutex; struct list_head file_list; struct page *arm_db_page; }; +struct cdma_jfs_event { + struct list_head async_event_list; + u32 async_events_reported; +}; + +struct cdma_jfc_event { + struct cdma_base_jfc *jfc; + struct list_head async_event_list; + u32 async_events_reported; +}; + +static inline struct cdma_dev *get_cdma_dev(struct auxiliary_device *adev) +{ + return (struct cdma_dev *)dev_get_drvdata(&adev->dev); +} + #endif /* _CDMA_H_ */ diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 8cbc980dc726..c48ac55631bf 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -19,6 +19,7 @@ struct cdma_context { spinlock_t lock; int handle; u32 tid; + void *jfae; bool is_kernel; atomic_t ref_cnt; struct list_head queue_list; diff --git a/drivers/ub/cdma/cdma_eq.c b/drivers/ub/cdma/cdma_eq.c new file mode 100644 index 000000000000..51c84ebaf7b6 --- /dev/null +++ b/drivers/ub/cdma/cdma_eq.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include +#include +#include +#include "cdma_jfs.h" +#include "cdma_jfc.h" +#include "cdma.h" +#include "cdma_eq.h" + +static int cdma_ae_jfs_check_error(struct auxiliary_device *adev, + u32 jetty_id) +{ + struct cdma_dev *cdev = get_cdma_dev(adev); + struct cdma_base_jfs *base_jfs; + struct cdma_event ae; + struct cdma_jfs *jfs; + + spin_lock(&cdev->jfs_table.lock); + jfs = idr_find(&cdev->jfs_table.idr_tbl.idr, jetty_id); + if (!jfs) { + dev_err(cdev->dev, "ae get jfs from table failed, id = %u.\n", + jetty_id); + spin_unlock(&cdev->jfs_table.lock); + return -EINVAL; + } + + base_jfs = &jfs->base_jfs; + + if (base_jfs->jfae_handler && base_jfs->ctx) { + refcount_inc(&jfs->ae_ref_cnt); + spin_unlock(&cdev->jfs_table.lock); + ae.dev = base_jfs->dev; + ae.element.jfs = base_jfs; + ae.event_type = CDMA_EVENT_JFS_ERR; + base_jfs->jfae_handler(&ae, base_jfs->ctx); + if (refcount_dec_and_test(&jfs->ae_ref_cnt)) { + complete(&jfs->ae_comp); + dev_dbg(cdev->dev, "jfs ae handler done.\n"); + } + } else { + spin_unlock(&cdev->jfs_table.lock); + } + + return 0; +} + +static int cdma_ae_jfc_check_error(struct auxiliary_device *adev, + u32 jetty_id) +{ + struct cdma_dev *cdev = get_cdma_dev(adev); + struct cdma_base_jfc *base_jfc; + struct cdma_event ae; + struct cdma_jfc *jfc; + unsigned long flags; + + spin_lock_irqsave(&cdev->jfc_table.lock, flags); + jfc = idr_find(&cdev->jfc_table.idr_tbl.idr, jetty_id); + if (!jfc) { + dev_err(cdev->dev, "get jfc from table failed, id = %u.\n", + jetty_id); + spin_unlock_irqrestore(&cdev->jfc_table.lock, flags); + return -EINVAL; + } + base_jfc = &jfc->base; + + if (base_jfc->jfae_handler && base_jfc->ctx) { + refcount_inc(&jfc->event_refcount); + spin_unlock_irqrestore(&cdev->jfc_table.lock, flags); + ae.dev = base_jfc->dev; + ae.element.jfc = base_jfc; + ae.event_type = CDMA_EVENT_JFC_ERR; + base_jfc->jfae_handler(&ae, base_jfc->ctx); + if (refcount_dec_and_test(&jfc->event_refcount)) { + complete(&jfc->event_comp); + dev_dbg(cdev->dev, "jfc ae handler done.\n"); + } + } else { + spin_unlock_irqrestore(&cdev->jfc_table.lock, flags); + } + + return 0; +} + +static int cdma_ae_jetty_level_error(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct ubase_event_nb *ev_nb = container_of(nb, struct ubase_event_nb, nb); + struct auxiliary_device *adev = ev_nb->back; + struct ubase_aeq_notify_info *info = data; + u32 jetty_id; + + jetty_id = info->aeqe->event.queue_event.num; + + switch (info->sub_type) { + case UBASE_SUBEVENT_TYPE_JFS_CHECK_ERROR: + return cdma_ae_jfs_check_error(adev, jetty_id); + case UBASE_SUBEVENT_TYPE_JFC_CHECK_ERROR: + return cdma_ae_jfc_check_error(adev, jetty_id); + default: + dev_warn(&adev->dev, "cdma get unsupported async event type %u.\n", + info->sub_type); + return -EINVAL; + } +} + +static struct cdma_ae_operation cdma_ae_opts[] = { + {UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR, cdma_ae_jetty_level_error} +}; + +static int cdma_event_register(struct auxiliary_device *adev, + enum ubase_event_type event_type, notifier_fn_t call) +{ + struct cdma_dev *cdma_dev = get_cdma_dev(adev); + struct ubase_event_nb *event_cb; + int ret; + + event_cb = kzalloc(sizeof(*event_cb), GFP_KERNEL); + if (!event_cb) + return -ENOMEM; + + event_cb->drv_type = UBASE_DRV_CDMA; + event_cb->event_type = event_type; + event_cb->back = (void *)adev; + event_cb->nb.notifier_call = call; + + ret = ubase_event_register(adev, event_cb); + if (ret) { + dev_err(cdma_dev->dev, + "register async event failed, event type = %u, ret = %d.\n", + event_cb->event_type, ret); + kfree(event_cb); + return ret; + } + cdma_dev->ae_event_addr[event_type] = event_cb; + + return 0; +} + +/* thanks to drivers/infiniband/hw/erdma/erdma_eq.c */ +int cdma_reg_ae_event(struct auxiliary_device *adev) +{ + struct cdma_dev *cdma_dev; + u32 opt_num; + int ret = 0; + int i; + + if (!adev) + return -EINVAL; + + cdma_dev = get_cdma_dev(adev); + if (!cdma_dev) + return -EINVAL; + + opt_num = sizeof(cdma_ae_opts) / sizeof(struct cdma_ae_operation); + for (i = 0; i < opt_num; ++i) { + ret = cdma_event_register(adev, + (enum ubase_event_type)cdma_ae_opts[i].op_code, + cdma_ae_opts[i].call); + if (ret) { + cdma_unreg_ae_event(adev); + return -EINVAL; + } + } + + dev_dbg(cdma_dev->dev, "cdma register ae event, ret = %d.\n", ret); + + return ret; +} + +void cdma_unreg_ae_event(struct auxiliary_device *adev) +{ + struct cdma_dev *cdma_dev; + int i; + + if (!adev) + return; + + cdma_dev = get_cdma_dev(adev); + if (!cdma_dev) + return; + + for (i = 0; i < UBASE_EVENT_TYPE_MAX; i++) { + if (cdma_dev->ae_event_addr[i]) { + ubase_event_unregister(adev, cdma_dev->ae_event_addr[i]); + kfree(cdma_dev->ae_event_addr[i]); + cdma_dev->ae_event_addr[i] = NULL; + } + } +} diff --git a/drivers/ub/cdma/cdma_eq.h b/drivers/ub/cdma/cdma_eq.h new file mode 100644 index 000000000000..51417e3eea50 --- /dev/null +++ b/drivers/ub/cdma/cdma_eq.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_EQ_H__ +#define __CDMA_EQ_H__ +#include + +struct cdma_ae_operation { + u32 op_code; + notifier_fn_t call; +}; + +int cdma_reg_ae_event(struct auxiliary_device *adev); +void cdma_unreg_ae_event(struct auxiliary_device *adev); + +#endif diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c new file mode 100644 index 000000000000..5d81363c88ff --- /dev/null +++ b/drivers/ub/cdma/cdma_event.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt +#define dev_fmt pr_fmt + +#include +#include +#include +#include +#include +#include +#include +#include "cdma_uobj.h" +#include "cdma_event.h" + +static __poll_t cdma_jfe_poll(struct cdma_jfe *jfe, struct file *filp, + struct poll_table_struct *wait) +{ + __poll_t flag = 0; + + poll_wait(filp, &jfe->poll_wait, wait); + + spin_lock_irq(&jfe->lock); + if (!list_empty(&jfe->event_list)) + flag = EPOLLIN | EPOLLRDNORM; + + spin_unlock_irq(&jfe->lock); + + return flag; +} + +static u32 cdma_read_jfe_event(struct cdma_jfe *jfe, u32 max_event_cnt, + struct list_head *event_list) +{ + struct cdma_jfe_event *event; + struct list_head *next; + struct list_head *p; + u32 cnt = 0; + + if (!max_event_cnt) + return 0; + + spin_lock_irq(&jfe->lock); + + list_for_each_safe(p, next, &jfe->event_list) { + event = list_entry(p, struct cdma_jfe_event, node); + if (event->counter) { + ++(*event->counter); + list_del(&event->obj_node); + } + list_del(p); + if (jfe->event_list_count > 0) + jfe->event_list_count--; + list_add_tail(p, event_list); + cnt++; + if (cnt == max_event_cnt) + break; + } + spin_unlock_irq(&jfe->lock); + + return cnt; +} + +static int cdma_wait_event(struct cdma_jfe *jfe, bool nonblock, + u32 max_event_cnt, u32 *event_cnt, + struct list_head *event_list) +{ + int ret; + + *event_cnt = 0; + spin_lock_irq(&jfe->lock); + while (list_empty(&jfe->event_list)) { + spin_unlock_irq(&jfe->lock); + if (nonblock) + return -EAGAIN; + + ret = wait_event_interruptible(jfe->poll_wait, + !list_empty(&jfe->event_list)); + if (ret) + return ret; + + spin_lock_irq(&jfe->lock); + if (list_empty(&jfe->event_list)) { + spin_unlock_irq(&jfe->lock); + return -EIO; + } + } + spin_unlock_irq(&jfe->lock); + *event_cnt = cdma_read_jfe_event(jfe, max_event_cnt, event_list); + + return 0; +} + +static void cdma_write_event(struct cdma_jfe *jfe, u64 event_data, + u32 event_type, struct list_head *obj_event_list, + u32 *counter) +{ + struct cdma_jfe_event *event; + unsigned long flags; + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (event == NULL) + return; + + spin_lock_irqsave(&jfe->lock, flags); + INIT_LIST_HEAD(&event->obj_node); + event->event_type = event_type; + event->event_data = event_data; + event->counter = counter; + list_add_tail(&event->node, &jfe->event_list); + if (obj_event_list) + list_add_tail(&event->obj_node, obj_event_list); + if (jfe->async_queue) + kill_fasync(&jfe->async_queue, SIGIO, POLL_IN); + jfe->event_list_count++; + spin_unlock_irqrestore(&jfe->lock, flags); + wake_up_interruptible(&jfe->poll_wait); +} + +static void cdma_init_jfe(struct cdma_jfe *jfe) +{ + spin_lock_init(&jfe->lock); + INIT_LIST_HEAD(&jfe->event_list); + init_waitqueue_head(&jfe->poll_wait); + jfe->async_queue = NULL; + jfe->event_list_count = 0; +} + +static void cdma_uninit_jfe(struct cdma_jfe *jfe) +{ + struct cdma_jfe_event *event; + struct list_head *p, *next; + + spin_lock_irq(&jfe->lock); + list_for_each_safe(p, next, &jfe->event_list) { + event = list_entry(p, struct cdma_jfe_event, node); + if (event->counter) + list_del(&event->obj_node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); +} + +static void cdma_write_async_event(struct cdma_context *ctx, u64 event_data, + u32 type, struct list_head *obj_event_list, + u32 *counter) +{ + struct cdma_jfae *jfae; + + rcu_read_lock(); + jfae = (struct cdma_jfae *)(rcu_dereference(ctx->jfae)); + if (!jfae) + goto err_free_rcu; + + if (jfae->jfe.event_list_count >= MAX_EVENT_LIST_SIZE) { + pr_debug("event list overflow, and this write will be discarded.\n"); + goto err_free_rcu; + } + + cdma_write_event(&jfae->jfe, event_data, type, obj_event_list, counter); + +err_free_rcu: + rcu_read_unlock(); +} + +void cdma_jfs_async_event_cb(struct cdma_event *event, struct cdma_context *ctx) +{ + struct cdma_jfs_event *jfs_event; + + jfs_event = &event->element.jfs->jfs_event; + cdma_write_async_event(ctx, event->element.jfs->cfg.queue_id, + event->event_type, &jfs_event->async_event_list, + &jfs_event->async_events_reported); +} + +void cdma_jfc_async_event_cb(struct cdma_event *event, struct cdma_context *ctx) +{ + struct cdma_jfc_event *jfc_event; + + jfc_event = &event->element.jfc->jfc_event; + cdma_write_async_event(ctx, event->element.jfc->jfc_cfg.queue_id, + event->event_type, &jfc_event->async_event_list, + &jfc_event->async_events_reported); +} + +static inline void cdma_set_async_event(struct cdma_cmd_async_event *async_event, + const struct cdma_jfe_event *event) +{ + async_event->event_data = event->event_data; + async_event->event_type = event->event_type; +} + +static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, + unsigned long arg) +{ + struct cdma_cmd_async_event async_event = { 0 }; + struct cdma_jfe_event *event; + struct list_head event_list; + u32 event_cnt; + int ret; + + if (!arg) { + pr_err("invalid jfae arg.\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&event_list); + ret = cdma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, + &event_cnt, &event_list); + if (ret < 0) { + pr_err("wait event failed, ret = %d.\n", ret); + return ret; + } + event = list_first_entry(&event_list, struct cdma_jfe_event, node); + if (event == NULL) + return -EIO; + + cdma_set_async_event(&async_event, event); + list_del(&event->node); + kfree(event); + + if (event_cnt > 0) { + ret = (int)copy_to_user((void *)arg, &async_event, + sizeof(async_event)); + if (ret) { + pr_err("dev copy to user failed, ret = %d\n", ret); + return -EFAULT; + } + } + + return 0; +} + +static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait) +{ + struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; + + if (!jfae || !jfae->cfile || !jfae->cfile->cdev) + return POLLERR; + + return cdma_jfe_poll(&jfae->jfe, filp, wait); +} + +static long cdma_jfae_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; + unsigned int nr; + int ret; + + if (!jfae) + return -EINVAL; + + nr = (unsigned int)_IOC_NR(cmd); + + switch (nr) { + case JFAE_CMD_GET_ASYNC_EVENT: + ret = cdma_get_async_event(jfae, filp, arg); + break; + default: + dev_err(jfae->cfile->cdev->dev, "nr = %u.\n", nr); + ret = -ENOIOCTLCMD; + break; + } + + return (long)ret; +} + +static int cdma_delete_jfae(struct inode *inode, struct file *filp) +{ + struct cdma_file *cfile; + struct cdma_jfae *jfae; + + if (!filp || !filp->private_data) + return 0; + + jfae = (struct cdma_jfae *)filp->private_data; + cfile = jfae->cfile; + if (!cfile) + return 0; + + if (!mutex_trylock(&cfile->ctx_mutex)) + return -ENOLCK; + jfae->ctx->jfae = NULL; + cdma_uninit_jfe(&jfae->jfe); + kfree(jfae); + filp->private_data = NULL; + mutex_unlock(&cfile->ctx_mutex); + cdma_close_uobj_fd(cfile); + + pr_debug("jfae is release.\n"); + return 0; +} + +static int cdma_jfae_fasync(int fd, struct file *filp, int on) +{ + struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; + int ret; + + if (!jfae) + return -EINVAL; + + spin_lock_irq(&jfae->jfe.lock); + ret = fasync_helper(fd, filp, on, &jfae->jfe.async_queue); + spin_unlock_irq(&jfae->jfe.lock); + + return ret; +} + +const struct file_operations cdma_jfae_fops = { + .owner = THIS_MODULE, + .poll = cdma_jfae_poll, + .unlocked_ioctl = cdma_jfae_ioctl, + .release = cdma_delete_jfae, + .fasync = cdma_jfae_fasync, +}; + +struct cdma_jfae *cdma_alloc_jfae(struct cdma_file *cfile) +{ + struct cdma_jfae *jfae; + struct file *file; + int fd; + + if (!cfile) + return NULL; + + fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (fd < 0) + return NULL; + + jfae = kzalloc(sizeof(*jfae), GFP_KERNEL); + if (!jfae) + goto err_put_unused_fd; + + file = anon_inode_getfile("[jfae]", &cdma_jfae_fops, jfae, + O_RDWR | O_CLOEXEC); + if (IS_ERR(file)) + goto err_free_jfae; + + cdma_init_jfe(&jfae->jfe); + jfae->fd = fd; + jfae->file = file; + jfae->cfile = cfile; + fd_install(fd, file); + + return jfae; + +err_free_jfae: + kfree(jfae); +err_put_unused_fd: + put_unused_fd(fd); + + return NULL; +} + +void cdma_free_jfae(struct cdma_jfae *jfae) +{ + if (!jfae) + return; + + fput(jfae->file); + put_unused_fd(jfae->fd); +} + +int cdma_get_jfae(struct cdma_context *ctx) +{ + struct cdma_jfae *jfae; + struct file *file; + + if (!ctx) + return -EINVAL; + + jfae = (struct cdma_jfae *)ctx->jfae; + if (!jfae) + return -EINVAL; + + file = fget(jfae->fd); + if (!file) + return -ENOENT; + + if (file->private_data != jfae) { + fput(file); + return -EBADF; + } + + return 0; +} + +void cdma_init_jfc_event(struct cdma_jfc_event *event, struct cdma_base_jfc *jfc) +{ + event->async_events_reported = 0; + INIT_LIST_HEAD(&event->async_event_list); + event->jfc = jfc; +} + +void cdma_release_async_event(struct cdma_context *ctx, struct list_head *event_list) +{ + struct cdma_jfe_event *event, *tmp; + struct cdma_jfae *jfae; + struct cdma_jfe *jfe; + + if (!ctx || !ctx->jfae) + return; + + jfae = (struct cdma_jfae *)ctx->jfae; + jfe = &jfae->jfe; + spin_lock_irq(&jfe->lock); + list_for_each_entry_safe(event, tmp, event_list, obj_node) { + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); + fput(jfae->file); +} + +void cdma_put_jfae(struct cdma_context *ctx) +{ + struct cdma_jfae *jfae; + + if (!ctx) + return; + + jfae = (struct cdma_jfae *)ctx->jfae; + if (!jfae) + return; + + if (!jfae->file) + return; + + fput(jfae->file); +} diff --git a/drivers/ub/cdma/cdma_event.h b/drivers/ub/cdma/cdma_event.h new file mode 100644 index 000000000000..9154ed10658a --- /dev/null +++ b/drivers/ub/cdma/cdma_event.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_EVENT_H__ +#define __CDMA_EVENT_H__ + +#include +#include +#include +#include +#include +#include +#include "cdma.h" +#include "cdma_context.h" +#include "cdma_types.h" + +#define MAX_EVENT_LIST_SIZE 65535 + +struct cdma_jfe { + spinlock_t lock; + struct list_head event_list; + wait_queue_head_t poll_wait; + struct fasync_struct *async_queue; + uint32_t event_list_count; +}; + +struct cdma_jfae { + int fd; + struct cdma_context *ctx; + struct cdma_file *cfile; + struct file *file; + struct cdma_jfe jfe; +}; + +struct cdma_jfe_event { + struct list_head node; + u32 event_type; + u64 event_data; + struct list_head obj_node; + u32 *counter; +}; + +void cdma_jfs_async_event_cb(struct cdma_event *event, struct cdma_context *ctx); + +void cdma_jfc_async_event_cb(struct cdma_event *event, struct cdma_context *ctx); + +struct cdma_jfae *cdma_alloc_jfae(struct cdma_file *cfile); + +void cdma_free_jfae(struct cdma_jfae *jfae); + +int cdma_get_jfae(struct cdma_context *ctx); + +void cdma_init_jfc_event(struct cdma_jfc_event *event, struct cdma_base_jfc *jfc); + +void cdma_release_async_event(struct cdma_context *ctx, struct list_head *event_list); + +void cdma_put_jfae(struct cdma_context *ctx); +#endif /* CDMA_EVENT_H */ diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 5f63cad58088..98286749ceb2 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -12,6 +12,7 @@ #include "cdma_tp.h" #include "cdma_jfs.h" #include "cdma_queue.h" +#include "cdma_event.h" #include "cdma_jfc.h" #include "cdma_uobj.h" #include "cdma_ioctl.h" @@ -65,6 +66,7 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, struct cdma_create_context_args args = { 0 }; struct cdma_dev *cdev = cfile->cdev; struct cdma_context *ctx; + struct cdma_jfae *jfae; int ret; if (cfile->uctx) { @@ -87,22 +89,34 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, if (IS_ERR(ctx)) return PTR_ERR(ctx); + ctx->jfae = cdma_alloc_jfae(cfile); + if (!ctx->jfae) { + dev_err(cdev->dev, "create jfae failed.\n"); + ret = -EFAULT; + goto free_context; + } + + jfae = (struct cdma_jfae *)ctx->jfae; + jfae->ctx = ctx; args.out.cqe_size = cdev->caps.cqe_size; args.out.dwqe_enable = !!(cdev->caps.feature & CDMA_CAP_FEATURE_DIRECT_WQE); + args.out.async_fd = jfae->fd; cfile->uctx = ctx; ret = (int)copy_to_user((void *)hdr->args_addr, &args, (u32)sizeof(args)); if (ret) { dev_err(cdev->dev, "copy ctx to user failed, ret = %d.\n", ret); - goto free_context; + goto free_jfae; } return ret; -free_context: +free_jfae: cfile->uctx = NULL; + cdma_free_jfae((struct cdma_jfae *)ctx->jfae); +free_context: cdma_free_context(cdev, ctx); return ret; @@ -270,6 +284,7 @@ static int cdma_cmd_create_jfs(struct cdma_ioctl_hdr *hdr, { struct cdma_cmd_create_jfs_args arg = { 0 }; struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfs_event *jfs_event; struct cdma_jfs_cfg cfg = { 0 }; struct cdma_udata udata = { 0 }; struct cdma_base_jfs *jfs; @@ -316,6 +331,9 @@ static int cdma_cmd_create_jfs(struct cdma_ioctl_hdr *hdr, } uobj->object = jfs; + jfs_event = &jfs->jfs_event; + jfs_event->async_events_reported = 0; + INIT_LIST_HEAD(&jfs_event->async_event_list); arg.out.id = jfs->id; arg.out.handle = uobj->id; @@ -497,6 +515,7 @@ static int cdma_cmd_create_jfc(struct cdma_ioctl_hdr *hdr, { struct cdma_cmd_create_jfc_args arg = { 0 }; struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfc_event *jfc_event; struct cdma_jfc_cfg cfg = { 0 }; struct cdma_udata udata = { 0 }; struct cdma_base_jfc *jfc; @@ -541,7 +560,9 @@ static int cdma_cmd_create_jfc(struct cdma_ioctl_hdr *hdr, goto err_create_jfc; } + jfc_event = &jfc->jfc_event; uobj->object = jfc; + cdma_init_jfc_event(jfc_event, jfc); arg.out.id = jfc->id; arg.out.depth = jfc->jfc_cfg.depth; diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index 4609fd22382a..80becf8753d9 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -8,6 +8,7 @@ #include "cdma_context.h" #include "cdma_mbox.h" #include "cdma_common.h" +#include "cdma_event.h" #include "cdma_db.h" #include "cdma_jfc.h" @@ -271,6 +272,12 @@ static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, u32 jfcn) return -ETIMEDOUT; } +static void cdma_release_jfc_event(struct cdma_jfc *jfc) +{ + cdma_release_async_event(jfc->base.ctx, + &jfc->base.jfc_event.async_event_list); +} + static int cdma_post_create_jfc_mbox(struct cdma_dev *cdev, struct cdma_jfc *jfc) { struct ubase_mbx_attr attr = { 0 }; @@ -323,10 +330,19 @@ struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, if (ret) goto err_get_jfc_buf; + if (udata) { + ret = cdma_get_jfae(jfc->base.ctx); + if (ret) + goto err_get_jfae; + } + ret = cdma_post_create_jfc_mbox(cdev, jfc); if (ret) goto err_alloc_cqc; + refcount_set(&jfc->event_refcount, 1); + init_completion(&jfc->event_comp); + jfc->base.jfae_handler = cdma_jfc_async_event_cb; jfc->base.dev = cdev; dev_dbg(cdev->dev, "create jfc id = %u, queue id = %u.\n", @@ -335,6 +351,9 @@ struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, return &jfc->base; err_alloc_cqc: + if (udata) + cdma_put_jfae(jfc->base.ctx); +err_get_jfae: cdma_free_jfc_buf(cdev, jfc); err_get_jfc_buf: cdma_jfc_id_free(cdev, jfc->jfcn); @@ -348,6 +367,7 @@ struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, struct cdma_cmd_delete_jfc_args *arg) { + struct cdma_jfc_event *jfc_event; struct cdma_jfc *jfc; int ret; @@ -373,11 +393,20 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, if (ret) dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); + if (refcount_dec_and_test(&jfc->event_refcount)) + complete(&jfc->event_comp); + wait_for_completion(&jfc->event_comp); + cdma_free_jfc_buf(cdev, jfc); cdma_jfc_id_free(cdev, jfc->jfcn); + if (arg) { + jfc_event = &jfc->base.jfc_event; + arg->out.async_events_reported = jfc_event->async_events_reported; + } pr_debug("Leave %s, jfcn: %u.\n", __func__, jfc->jfcn); + cdma_release_jfc_event(jfc); kfree(jfc); return 0; diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 599068837a4b..abc05c44432b 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -12,6 +12,7 @@ #include "cdma_cmd.h" #include "cdma_common.h" #include "cdma_mbox.h" +#include "cdma_event.h" #include "cdma_context.h" #include "cdma_jfs.h" @@ -296,13 +297,21 @@ struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, if (ret) goto err_get_jfs_buf; + if (udata) { + ret = cdma_get_jfae(jfs->base_jfs.ctx); + if (ret) + goto err_get_jfae; + } + ret = cdma_create_hw_jfs_ctx(cdev, jfs, cfg); if (ret) goto err_create_hw_jfsc; cdma_set_query_flush_time(&jfs->sq, cfg->err_timeout); - + refcount_set(&jfs->ae_ref_cnt, 1); + init_completion(&jfs->ae_comp); jfs->sq.state = CDMA_JETTY_READY; + jfs->base_jfs.jfae_handler = cdma_jfs_async_event_cb; jfs->base_jfs.dev = cdev; dev_dbg(cdev->dev, @@ -312,6 +321,9 @@ struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, return &jfs->base_jfs; err_create_hw_jfsc: + if (udata) + cdma_put_jfae(jfs->base_jfs.ctx); +err_get_jfae: cdma_free_sq_buf(cdev, &jfs->sq); err_get_jfs_buf: cdma_free_jfs_id(cdev, jfs->sq.id); @@ -499,6 +511,12 @@ static int cdma_modify_and_destroy_jfs(struct cdma_dev *cdev, return ret; } +static inline void cdma_release_jfs_event(struct cdma_jfs *jfs) +{ + cdma_release_async_event(jfs->base_jfs.ctx, + &jfs->base_jfs.jfs_event.async_event_list); +} + int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) { struct cdma_jfs *jfs; @@ -524,12 +542,18 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) if (ret) dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); + if (refcount_dec_and_test(&jfs->ae_ref_cnt)) + complete(&jfs->ae_comp); + wait_for_completion(&jfs->ae_comp); + cdma_free_sq_buf(cdev, &jfs->sq); cdma_free_jfs_id(cdev, jfs_id); pr_debug("Leave %s, jfsn: %u.\n", __func__, jfs_id); + cdma_release_jfs_event(jfs); + kfree(jfs); return 0; diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 8519d972c48f..62c475e31a1c 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -10,6 +10,7 @@ #include "cdma_dev.h" #include "cdma_chardev.h" #include +#include "cdma_eq.h" #include "cdma_cmd.h" /* Enabling jfc_arm_mode will cause jfc to report cqe; otherwise, it will not. */ @@ -24,10 +25,30 @@ MODULE_PARM_DESC(cqe_mode, "Set cqe reporting mode, default: 1 (0:BY_COUNT, 1:BY struct class *cdma_cdev_class; +static int cdma_register_event(struct auxiliary_device *adev) +{ + int ret; + + ret = cdma_reg_ae_event(adev); + if (ret) + return ret; + + return 0; +} + +static inline void cdma_unregister_event(struct auxiliary_device *adev) +{ + cdma_unreg_ae_event(adev); +} + static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) { int ret; + ret = cdma_register_event(auxdev); + if (ret) + return ret; + /* query eu failure does not affect driver loading, as eu can be updated. */ ret = cdma_ctrlq_query_eu(cdev); if (ret) @@ -77,6 +98,7 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) return; } + cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); cdma_destroy_dev(cdev); } diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 689db795d0c9..c7af05e282f2 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -7,8 +7,12 @@ #include #include #include +#include "cdma.h" -struct cdma_dev; +enum cdma_event_type { + CDMA_EVENT_JFC_ERR, + CDMA_EVENT_JFS_ERR, +}; struct cdma_ucontext { struct cdma_dev *dev; @@ -71,13 +75,28 @@ struct cdma_udata { struct cdma_udrv_priv *udrv_data; }; +struct cdma_event { + struct cdma_dev *dev; + union { + struct cdma_base_jfc *jfc; + struct cdma_base_jfs *jfs; + u32 eid_idx; + } element; + enum cdma_event_type event_type; +}; + +typedef void (*cdma_event_callback_t)(struct cdma_event *event, + struct cdma_context *ctx); + struct cdma_base_jfs { struct cdma_dev *dev; struct cdma_context *ctx; struct cdma_jfs_cfg cfg; + cdma_event_callback_t jfae_handler; u64 usr_jfs; u32 id; atomic_t use_cnt; + struct cdma_jfs_event jfs_event; }; struct cdma_jfc_cfg { @@ -91,8 +110,10 @@ struct cdma_base_jfc { struct cdma_context *ctx; struct cdma_jfc_cfg jfc_cfg; u32 id; + cdma_event_callback_t jfae_handler; struct hlist_node hnode; atomic_t use_cnt; + struct cdma_jfc_event jfc_event; }; struct cdma_file { diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 59367df04865..0af30a39534e 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -10,6 +10,13 @@ #define CDMA_IOC_MAGIC 'C' #define CDMA_SYNC _IOWR(CDMA_IOC_MAGIC, 0, struct cdma_ioctl_hdr) +/* cdma event ioctl cmd */ +#define CDMA_EVENT_CMD_MAGIC 'F' +#define JFAE_CMD_GET_ASYNC_EVENT 0 + +#define CDMA_CMD_GET_ASYNC_EVENT \ + _IOWR(CDMA_EVENT_CMD_MAGIC, JFAE_CMD_GET_ASYNC_EVENT, struct cdma_cmd_async_event) + #define CDMA_DOORBELL_OFFSET 0x80 #define MAP_COMMAND_MASK 0xff @@ -97,6 +104,11 @@ struct cdma_cmd_create_jfs_args { struct cdma_cmd_udrv_priv udata; }; +struct cdma_cmd_async_event { + __u64 event_data; + __u32 event_type; +}; + struct cdma_cmd_delete_jfs_args { struct { __u32 jfs_id; @@ -156,6 +168,9 @@ struct cdma_cmd_delete_jfc_args { __u64 handle; /* handle of jfc */ __u32 queue_id; } in; + struct { + __u32 async_events_reported; + } out; }; struct dev_eid { -- Gitee From 6a0237efa8bb5812b70fb63cddf7771ad6779e50 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 14:32:04 +0800 Subject: [PATCH 019/243] ub: cdma: support reporting completed events commit 3aa7aa335383c20b18a2b0f9fbe3841d83efa77a openEuler This patch implements the handling and reporting of completion events in the CDMA driver. The implementation includes writing the corresponding completion event to the completion event queue of the respective jfc when read/write semantics are completed. Signed-off-by: Zhipeng Lu Signed-off-by: Jingjing Ku Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma.h | 4 + drivers/ub/cdma/cdma_api.c | 36 ++++ drivers/ub/cdma/cdma_dev.c | 4 + drivers/ub/cdma/cdma_eq.c | 29 +++ drivers/ub/cdma/cdma_eq.h | 2 + drivers/ub/cdma/cdma_event.c | 334 ++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_event.h | 21 ++ drivers/ub/cdma/cdma_ioctl.c | 43 +++- drivers/ub/cdma/cdma_jfc.c | 232 ++++++++++++++++++++++ drivers/ub/cdma/cdma_jfc.h | 48 +++++ drivers/ub/cdma/cdma_main.c | 10 + drivers/ub/cdma/cdma_types.h | 5 + include/uapi/ub/cdma/cdma_abi.h | 72 +++++++ include/ub/cdma/cdma_api.h | 32 +++ 14 files changed, 871 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index 995f28def668..78ac66be6526 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -187,6 +187,7 @@ struct cdma_dev { struct cdma_table ctp_table; struct cdma_table jfs_table; struct cdma_table jfc_table; + struct cdma_table jfce_table; struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; struct mutex file_mutex; struct list_head file_list; @@ -200,7 +201,10 @@ struct cdma_jfs_event { struct cdma_jfc_event { struct cdma_base_jfc *jfc; + struct cdma_jfce *jfce; + struct list_head comp_event_list; struct list_head async_event_list; + u32 comp_events_reported; u32 async_events_reported; }; diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 34a2d96f7c3c..8043b0238cb4 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -8,6 +8,7 @@ #include "cdma_cmd.h" #include "cdma_context.h" #include "cdma_queue.h" +#include "cdma_jfc.h" #include "cdma.h" #include @@ -308,3 +309,38 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) atomic_dec(&ctx->ref_cnt); } EXPORT_SYMBOL_GPL(dma_free_queue); + +int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, + struct dma_cr *cr) +{ + struct cdma_queue *cdma_queue; + struct cdma_dev *cdev; + u32 eid; + + if (!dma_dev || !cr_cnt || !cr) { + pr_err("the poll queue input parameter is invalid.\n"); + return -EINVAL; + } + + eid = dma_dev->attr.eid.dw0; + cdev = get_cdma_dev_by_eid(eid); + if (!cdev) { + pr_err("get cdma dev failed, eid = 0x%x.\n", eid); + return -EINVAL; + } + + if (cdev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); + return -EINVAL; + } + + cdma_queue = cdma_find_queue(cdev, queue_id); + if (!cdma_queue || !cdma_queue->jfc) { + dev_err(cdev->dev, "get cdma queue failed, queue_id = %d.\n", + queue_id); + return -EINVAL; + } + + return cdma_poll_jfc(cdma_queue->jfc, cr_cnt, cr); +} +EXPORT_SYMBOL_GPL(dma_poll_queue); diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 75c87176e868..96f33a55ebdf 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -105,11 +105,14 @@ static void cdma_tbl_destroy(struct cdma_dev *cdev, struct cdma_table *table, static void cdma_init_tables(struct cdma_dev *cdev) { struct cdma_res *queue = &cdev->caps.queue; + struct cdma_res *jfce = &cdev->caps.jfce; struct cdma_res *jfs = &cdev->caps.jfs; struct cdma_res *jfc = &cdev->caps.jfc; cdma_tbl_init(&cdev->queue_table, queue->start_idx + queue->max_cnt - 1, queue->start_idx); + cdma_tbl_init(&cdev->jfce_table, jfce->start_idx + jfce->max_cnt - 1, + jfce->start_idx); cdma_tbl_init(&cdev->jfc_table, jfc->start_idx + jfc->max_cnt - 1, jfc->start_idx); cdma_tbl_init(&cdev->jfs_table, jfs->max_cnt + jfs->start_idx - 1, @@ -122,6 +125,7 @@ static void cdma_destroy_tables(struct cdma_dev *cdev) cdma_tbl_destroy(cdev, &cdev->ctp_table, "CTP"); cdma_tbl_destroy(cdev, &cdev->jfs_table, "JFS"); cdma_tbl_destroy(cdev, &cdev->jfc_table, "JFC"); + cdma_tbl_destroy(cdev, &cdev->jfce_table, "JFCE"); cdma_tbl_destroy(cdev, &cdev->queue_table, "QUEUE"); } diff --git a/drivers/ub/cdma/cdma_eq.c b/drivers/ub/cdma/cdma_eq.c index 51c84ebaf7b6..6bc6048e3127 100644 --- a/drivers/ub/cdma/cdma_eq.c +++ b/drivers/ub/cdma/cdma_eq.c @@ -192,3 +192,32 @@ void cdma_unreg_ae_event(struct auxiliary_device *adev) } } } + +/* thanks to drivers/infiniband/hw/erdma/erdma_eq.c */ +int cdma_reg_ce_event(struct auxiliary_device *adev) +{ + struct cdma_dev *cdma_dev; + int ret; + + if (!adev) + return -EINVAL; + + cdma_dev = get_cdma_dev(adev); + if (!cdma_dev) + return -EINVAL; + + ret = ubase_comp_register(adev, cdma_jfc_completion); + if (ret) + dev_err(cdma_dev->dev, + "register ce event failed, ret = %d.\n", ret); + + return ret; +} + +void cdma_unreg_ce_event(struct auxiliary_device *adev) +{ + if (!adev) + return; + + ubase_comp_unregister(adev); +} diff --git a/drivers/ub/cdma/cdma_eq.h b/drivers/ub/cdma/cdma_eq.h index 51417e3eea50..70e9edcccad4 100644 --- a/drivers/ub/cdma/cdma_eq.h +++ b/drivers/ub/cdma/cdma_eq.h @@ -12,5 +12,7 @@ struct cdma_ae_operation { int cdma_reg_ae_event(struct auxiliary_device *adev); void cdma_unreg_ae_event(struct auxiliary_device *adev); +int cdma_reg_ce_event(struct auxiliary_device *adev); +void cdma_unreg_ce_event(struct auxiliary_device *adev); #endif diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index 5d81363c88ff..f887c52a0479 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "cdma_uobj.h" #include "cdma_event.h" @@ -92,6 +93,189 @@ static int cdma_wait_event(struct cdma_jfe *jfe, bool nonblock, return 0; } +static int cdma_wait_event_timeout(struct cdma_jfe *jfe, + unsigned long max_timeout, + u32 max_event_cnt, + u32 *event_cnt, + struct list_head *event_list) +{ + long timeout = (long)max_timeout; + + *event_cnt = 0; + while (1) { + asm volatile("" : : : "memory"); + *event_cnt = cdma_read_jfe_event(jfe, max_event_cnt, event_list); + if (*event_cnt > 0) + break; + timeout = wait_event_interruptible_timeout(jfe->poll_wait, + !list_empty(&jfe->event_list), timeout); + if (timeout <= 0) + return timeout; + } + + return 0; +} + +static int cdma_jfce_wait(struct cdma_jfce *jfce, struct file *filp, + unsigned long arg) +{ + struct cdma_cmd_jfce_wait_args we = { 0 }; + struct cdma_jfe_event *event; + struct list_head event_list; + struct list_head *next; + struct list_head *p; + u32 max_event_cnt; + u32 i = 0; + int ret; + + if (copy_from_user(&we, (const void __user *)arg, + (u32)sizeof(we)) != 0) + return -EFAULT; + + max_event_cnt = min_t(u32, we.in.max_event_cnt, (u32)CDMA_MAX_JFCE_EVENT_CNT); + INIT_LIST_HEAD(&event_list); + if (we.in.time_out <= 0) { + ret = cdma_wait_event(&jfce->jfe, + (filp->f_flags & O_NONBLOCK) | + (!we.in.time_out), + max_event_cnt, + &we.out.event_cnt, &event_list); + } else { + ret = cdma_wait_event_timeout(&jfce->jfe, + msecs_to_jiffies(we.in.time_out), + max_event_cnt, &we.out.event_cnt, + &event_list); + } + + if (ret < 0) { + pr_err("wait jfce event failed, ret = %d\n", ret); + return ret; + } + + list_for_each_safe(p, next, &event_list) { + event = list_entry(p, struct cdma_jfe_event, node); + we.out.event_data[i++] = event->event_data; + list_del(p); + kfree(event); + } + + if (we.out.event_cnt > 0 && copy_to_user((void *)arg, &we, sizeof(we))) { + pr_err("copy to user failed.\n"); + return -EFAULT; + } + + return 0; +} + +static __poll_t cdma_jfce_poll(struct file *filp, struct poll_table_struct *wait) +{ + struct cdma_jfce *jfce = (struct cdma_jfce *)filp->private_data; + + if (!jfce) + return POLLERR; + + return cdma_jfe_poll(&jfce->jfe, filp, wait); +} + +static long cdma_jfce_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct cdma_jfce *jfce = (struct cdma_jfce *)filp->private_data; + unsigned int nr; + int ret; + + if (!arg || !jfce || _IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { + pr_err("invalid parameter, cmd = %u.\n", cmd); + return -EINVAL; + } + + nr = (unsigned int)_IOC_NR(cmd); + switch (nr) { + case JFCE_CMD_WAIT_EVENT: + ret = cdma_jfce_wait(jfce, filp, arg); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + + return ret; +} + +static int cdma_delete_jfce(struct inode *inode, struct file *filp) +{ + struct cdma_file *cfile; + struct cdma_jfce *jfce; + + if (!filp || !filp->private_data) + return 0; + + jfce = (struct cdma_jfce *)filp->private_data; + + cfile = jfce->cfile; + if (!cfile) + return 0; + + if (!mutex_trylock(&cfile->ctx_mutex)) + return -ENOLCK; + cdma_destroy_jfce(jfce); + filp->private_data = NULL; + mutex_unlock(&cfile->ctx_mutex); + cdma_close_uobj_fd(cfile); + + pr_info("jfce is release.\n"); + return 0; +} + +static int cdma_jfce_fasync(int fd, struct file *filp, int on) +{ + struct cdma_jfce *jfce = (struct cdma_jfce *)filp->private_data; + int ret; + + if (!jfce) + return -EINVAL; + + spin_lock_irq(&jfce->jfe.lock); + ret = fasync_helper(fd, filp, on, &jfce->jfe.async_queue); + spin_unlock_irq(&jfce->jfe.lock); + + return ret; +} + +const struct file_operations cdma_jfce_fops = { + .owner = THIS_MODULE, + .poll = cdma_jfce_poll, + .unlocked_ioctl = cdma_jfce_ioctl, + .release = cdma_delete_jfce, + .fasync = cdma_jfce_fasync, +}; + +static int cdma_jfce_id_alloc(struct cdma_dev *cdev, struct cdma_jfce *jfce) +{ + struct cdma_table *jfce_tbl = &cdev->jfce_table; + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&jfce_tbl->lock); + id = idr_alloc(&jfce_tbl->idr_tbl.idr, jfce, jfce_tbl->idr_tbl.min, + jfce_tbl->idr_tbl.max, GFP_NOWAIT); + if (id < 0) + dev_err(cdev->dev, "alloc jfce id failed.\n"); + spin_unlock(&jfce_tbl->lock); + idr_preload_end(); + + return id; +} + +static void cdma_jfce_id_free(struct cdma_dev *cdev, u32 jfce_id) +{ + struct cdma_table *jfce_tbl = &cdev->jfce_table; + + spin_lock(&jfce_tbl->lock); + idr_remove(&jfce_tbl->idr_tbl.idr, jfce_id); + spin_unlock(&jfce_tbl->lock); +} + static void cdma_write_event(struct cdma_jfe *jfe, u64 event_data, u32 event_type, struct list_head *obj_event_list, u32 *counter) @@ -142,6 +326,136 @@ static void cdma_uninit_jfe(struct cdma_jfe *jfe) spin_unlock_irq(&jfe->lock); } +struct cdma_jfce *cdma_get_jfce_from_id(struct cdma_dev *cdev, int jfce_id) +{ + struct cdma_table *jfce_table = &cdev->jfce_table; + struct cdma_jfce *jfce; + struct file *file; + + spin_lock(&jfce_table->lock); + jfce = idr_find(&jfce_table->idr_tbl.idr, jfce_id); + if (!jfce) { + dev_err(cdev->dev, "find jfce failed, id = %d.\n", jfce_id); + } else { + file = fget(jfce->fd); + if (!file) { + jfce = NULL; + } else { + if (file->private_data != jfce) { + fput(file); + jfce = NULL; + } + } + } + spin_unlock(&jfce_table->lock); + + return jfce; +} + +void cdma_jfc_comp_event_cb(struct cdma_base_jfc *jfc) +{ + struct cdma_jfc_event *jfc_event; + struct cdma_jfce *jfce; + + if (!jfc) + return; + + jfc_event = &jfc->jfc_event; + if (!IS_ERR_OR_NULL(jfc_event->jfce)) { + jfce = jfc_event->jfce; + if (jfce->jfe.event_list_count >= MAX_EVENT_LIST_SIZE) + return; + + cdma_write_event(&jfce->jfe, jfc->jfc_cfg.queue_id, 0, + &jfc_event->comp_event_list, + &jfc_event->comp_events_reported); + } +} + +struct cdma_jfce *cdma_alloc_jfce(struct cdma_file *cfile) +{ + struct cdma_jfce *jfce; + struct file *file; + int new_fd; + int ret; + + if (!cfile) + return ERR_PTR(-EINVAL); + + new_fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (new_fd < 0) + return ERR_PTR(new_fd); + + jfce = kzalloc(sizeof(*jfce), GFP_KERNEL); + if (!jfce) { + ret = -ENOMEM; + goto err_put_unused_fd; + } + + ret = cdma_jfce_id_alloc(cfile->cdev, jfce); + if (ret < 0) + goto err_free_jfce; + jfce->id = ret; + + file = anon_inode_getfile("[jfce]", &cdma_jfce_fops, jfce, + O_RDWR | O_CLOEXEC); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_free_id; + } + + cdma_init_jfe(&jfce->jfe); + jfce->cdev = cfile->cdev; + jfce->fd = new_fd; + jfce->file = file; + jfce->cfile = cfile; + fd_install(new_fd, file); + + return jfce; + +err_free_id: + cdma_jfce_id_free(cfile->cdev, jfce->id); +err_free_jfce: + kfree(jfce); +err_put_unused_fd: + put_unused_fd(new_fd); + + return ERR_PTR(ret); +} + +void cdma_free_jfce(struct cdma_jfce *jfce) +{ + struct cdma_dev *cdev; + + if (!jfce || !jfce->cdev) + return; + + cdev = jfce->cdev; + + if (jfce->id >= cdev->caps.jfce.max_cnt + cdev->caps.jfce.start_idx || + jfce->id < cdev->caps.jfce.start_idx) { + dev_err(cdev->dev, + "jfce id invalid, id = %u, start_idx = %u, max_cnt = %u.\n", + jfce->id, cdev->caps.jfce.start_idx, + cdev->caps.jfce.max_cnt); + return; + } + + fput(jfce->file); + put_unused_fd(jfce->fd); +} + +void cdma_destroy_jfce(struct cdma_jfce *jfce) +{ + if (!jfce) + return; + + cdma_uninit_jfe(&jfce->jfe); + if (jfce->cfile && jfce->cfile->cdev) + cdma_jfce_id_free(jfce->cdev, jfce->id); + kfree(jfce); +} + static void cdma_write_async_event(struct cdma_context *ctx, u64 event_data, u32 type, struct list_head *obj_event_list, u32 *counter) @@ -388,11 +702,31 @@ int cdma_get_jfae(struct cdma_context *ctx) void cdma_init_jfc_event(struct cdma_jfc_event *event, struct cdma_base_jfc *jfc) { + event->comp_events_reported = 0; event->async_events_reported = 0; + INIT_LIST_HEAD(&event->comp_event_list); INIT_LIST_HEAD(&event->async_event_list); event->jfc = jfc; } +void cdma_release_comp_event(struct cdma_jfce *jfce, struct list_head *event_list) +{ + struct cdma_jfe_event *event, *tmp; + struct cdma_jfe *jfe; + + if (!jfce) + return; + + jfe = &jfce->jfe; + spin_lock_irq(&jfe->lock); + list_for_each_entry_safe(event, tmp, event_list, obj_node) { + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); + fput(jfce->file); +} + void cdma_release_async_event(struct cdma_context *ctx, struct list_head *event_list) { struct cdma_jfe_event *event, *tmp; diff --git a/drivers/ub/cdma/cdma_event.h b/drivers/ub/cdma/cdma_event.h index 9154ed10658a..4ca14c3c5fcb 100644 --- a/drivers/ub/cdma/cdma_event.h +++ b/drivers/ub/cdma/cdma_event.h @@ -40,6 +40,19 @@ struct cdma_jfe_event { u32 *counter; }; +struct cdma_jfce { + int id; + int fd; + struct cdma_dev *cdev; + struct cdma_file *cfile; + struct file *file; + struct cdma_jfe jfe; +}; + +struct cdma_jfce *cdma_alloc_jfce(struct cdma_file *cfile); + +void cdma_free_jfce(struct cdma_jfce *jfce); + void cdma_jfs_async_event_cb(struct cdma_event *event, struct cdma_context *ctx); void cdma_jfc_async_event_cb(struct cdma_event *event, struct cdma_context *ctx); @@ -50,8 +63,16 @@ void cdma_free_jfae(struct cdma_jfae *jfae); int cdma_get_jfae(struct cdma_context *ctx); +struct cdma_jfce *cdma_get_jfce_from_id(struct cdma_dev *cdev, int jfce_id); + +void cdma_jfc_comp_event_cb(struct cdma_base_jfc *jfc); + +void cdma_destroy_jfce(struct cdma_jfce *jfce); + void cdma_init_jfc_event(struct cdma_jfc_event *event, struct cdma_base_jfc *jfc); +void cdma_release_comp_event(struct cdma_jfce *jfce, struct list_head *event_list); + void cdma_release_async_event(struct cdma_context *ctx, struct list_head *event_list); void cdma_put_jfae(struct cdma_context *ctx); diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 98286749ceb2..f1513c162db2 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -567,7 +567,11 @@ static int cdma_cmd_create_jfc(struct cdma_ioctl_hdr *hdr, arg.out.id = jfc->id; arg.out.depth = jfc->jfc_cfg.depth; arg.out.handle = uobj->id; - + jfc_event->jfce = cdma_get_jfce_from_id(cdev, arg.in.jfce_id); + if (!jfc_event->jfce) { + ret = -EFAULT; + goto err_get_jfce; + } ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); if (ret != 0) { dev_err(cdev->dev, "copy jfc to user failed, ret = %d.\n", ret); @@ -579,6 +583,7 @@ static int cdma_cmd_create_jfc(struct cdma_ioctl_hdr *hdr, return 0; err_copy_to_user: +err_get_jfce: cdma_delete_jfc(cdev, jfc->id, NULL); err_create_jfc: cdma_uobj_delete(uobj); @@ -641,6 +646,41 @@ static int cdma_cmd_delete_jfc(struct cdma_ioctl_hdr *hdr, return 0; } +static int cdma_cmd_create_jfce(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_create_jfce_args arg = { 0 }; + struct cdma_jfce *jfce; + int ret; + + if (!hdr->args_addr || hdr->args_len != (u32)sizeof(arg)) + return -EINVAL; + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) + return -EFAULT; + + jfce = cdma_alloc_jfce(cfile); + if (IS_ERR(jfce)) + return PTR_ERR(jfce); + + arg.out.fd = jfce->fd; + arg.out.id = jfce->id; + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret) { + ret = -EFAULT; + goto err_out; + } + + return 0; + +err_out: + cdma_free_jfce(jfce); + + return ret; +} + static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_QUERY_DEV_INFO] = cdma_query_dev, [CDMA_CMD_CREATE_CTX] = cdma_create_ucontext, @@ -653,6 +693,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, [CDMA_CMD_CREATE_JFC] = cdma_cmd_create_jfc, [CDMA_CMD_DELETE_JFC] = cdma_cmd_delete_jfc, + [CDMA_CMD_CREATE_JFCE] = cdma_cmd_create_jfce, }; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index 80becf8753d9..cd92f90461ff 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -10,6 +10,7 @@ #include "cdma_common.h" #include "cdma_event.h" #include "cdma_db.h" +#include "cdma_jfs.h" #include "cdma_jfc.h" static int cdma_get_cmd_from_user(struct cdma_create_jfc_ucmd *ucmd, @@ -82,6 +83,7 @@ static void cdma_init_jfc_param(struct cdma_jfc_cfg *cfg, struct cdma_jfc *jfc) { jfc->base.id = jfc->jfcn; jfc->base.jfc_cfg = *cfg; + jfc->base.jfc_event.jfce = NULL; jfc->ceqn = cfg->ceqn; } @@ -272,8 +274,171 @@ static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, u32 jfcn) return -ETIMEDOUT; } +static inline void *cdma_get_buf_entry(struct cdma_buf *buf, u32 n) +{ + uint32_t entry_index = n & buf->entry_cnt_mask; + + return (char *)buf->kva + (entry_index * buf->entry_size); +} + +static struct cdma_jfc_cqe *cdma_get_next_cqe(struct cdma_jfc *jfc, u32 n) +{ + struct cdma_jfc_cqe *cqe; + u32 valid_owner; + + cqe = (struct cdma_jfc_cqe *)cdma_get_buf_entry(&jfc->buf, n); + valid_owner = (jfc->ci >> jfc->buf.entry_cnt_mask_ilog2) & + CDMA_JFC_DB_VALID_OWNER_M; + if (!(cqe->owner ^ valid_owner)) + return NULL; + + return cqe; +} + +static struct cdma_jetty_queue *cdma_update_jetty_idx(struct cdma_jfc_cqe *cqe) +{ + struct cdma_jetty_queue *queue; + u32 entry_idx; + + entry_idx = cqe->entry_idx; + queue = (struct cdma_jetty_queue *)((u64)cqe->user_data_h << + CDMA_ADDR_SHIFT | cqe->user_data_l); + if (!queue) + return NULL; + + if (!!cqe->fd) + return queue; + + queue->ci += (entry_idx - queue->ci) & (queue->buf.entry_cnt - 1); + + return queue; +} + +static enum jfc_poll_state cdma_get_cr_status(u8 src_status, + u8 substatus, + enum dma_cr_status *dst_status) +{ +struct cdma_cqe_status { + bool is_valid; + enum dma_cr_status cr_status; +}; + + static struct cdma_cqe_status map[CDMA_CQE_STATUS_NUM][CDMA_CQE_SUB_STATUS_NUM] = { + {{true, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}}, + {{true, DMA_CR_UNSUPPORTED_OPCODE_ERR}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}}, + {{false, DMA_CR_SUCCESS}, {true, DMA_CR_LOC_LEN_ERR}, + {true, DMA_CR_LOC_ACCESS_ERR}, {true, DMA_CR_REM_RESP_LEN_ERR}, + {true, DMA_CR_LOC_DATA_POISON}}, + {{false, DMA_CR_SUCCESS}, {true, DMA_CR_REM_UNSUPPORTED_REQ_ERR}, + {true, DMA_CR_REM_ACCESS_ABORT_ERR}, {false, DMA_CR_SUCCESS}, + {true, DMA_CR_REM_DATA_POISON}}, + {{true, DMA_CR_RNR_RETRY_CNT_EXC_ERR}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}}, + {{true, DMA_CR_ACK_TIMEOUT_ERR}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}}, + {{true, DMA_CR_WR_FLUSH_ERR}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}, {false, DMA_CR_SUCCESS}, + {false, DMA_CR_SUCCESS}} + }; + + if ((src_status < CDMA_CQE_STATUS_NUM) && (substatus < CDMA_CQE_SUB_STATUS_NUM) && + map[src_status][substatus].is_valid) { + *dst_status = map[src_status][substatus].cr_status; + return JFC_OK; + } + + return JFC_POLL_ERR; +} + +static enum jfc_poll_state cdma_update_flush_cr(struct cdma_jetty_queue *queue, + struct cdma_jfc_cqe *cqe, + struct dma_cr *cr) +{ + if (cdma_get_cr_status(cqe->status, cqe->substatus, &cr->status)) + return JFC_POLL_ERR; + + if (cqe->fd) { + cr->status = DMA_CR_WR_FLUSH_ERR_DONE; + queue->flush_flag = true; + } else { + queue->ci++; + } + + return JFC_OK; +} + +static enum jfc_poll_state cdma_parse_cqe_for_jfc(struct cdma_dev *cdev, + struct cdma_jfc_cqe *cqe, + struct dma_cr *cr) +{ + struct cdma_jetty_queue *queue; + struct cdma_jfs *jfs; + + queue = cdma_update_jetty_idx(cqe); + if (!queue) { + dev_err(cdev->dev, "update jetty idx failed.\n"); + return JFC_POLL_ERR; + } + + jfs = container_of(queue, struct cdma_jfs, sq); + cr->flag.bs.s_r = cqe->s_r; + cr->flag.bs.jetty = cqe->is_jetty; + cr->completion_len = cqe->byte_cnt; + cr->tpn = cqe->tpn; + cr->local_id = cqe->local_num_h << CDMA_SRC_IDX_SHIFT | cqe->local_num_l; + cr->remote_id = cqe->rmt_idx; + + if (cqe->status) + dev_warn(cdev->dev, "get sq %u cqe status abnormal, ci = %u, pi = %u.\n", + queue->id, queue->ci, queue->pi); + + if (cdma_update_flush_cr(queue, cqe, cr)) { + dev_err(cdev->dev, + "update cr failed, status = %u, substatus = %u.\n", + cqe->status, cqe->substatus); + return JFC_POLL_ERR; + } + + return JFC_OK; +} + +static enum jfc_poll_state cdma_poll_one(struct cdma_dev *cdev, + struct cdma_jfc *jfc, + struct dma_cr *cr) +{ + enum dma_cr_status status; + struct cdma_jfc_cqe *cqe; + + cqe = cdma_get_next_cqe(jfc, jfc->ci); + if (!cqe) + return JFC_EMPTY; + + ++jfc->ci; + /* Ensure that the reading of the event is completed before parsing. */ + rmb(); + + if (cdma_parse_cqe_for_jfc(cdev, cqe, cr)) + return JFC_POLL_ERR; + + status = cr->status; + if (status == DMA_CR_WR_FLUSH_ERR_DONE || status == DMA_CR_WR_SUSPEND_DONE) { + dev_info(cdev->dev, "poll cr flush/suspend done, jfc id = %u, status = %u.\n", + jfc->jfcn, status); + return JFC_EMPTY; + } + + return JFC_OK; +} + static void cdma_release_jfc_event(struct cdma_jfc *jfc) { + cdma_release_comp_event(jfc->base.jfc_event.jfce, + &jfc->base.jfc_event.comp_event_list); cdma_release_async_event(jfc->base.ctx, &jfc->base.jfc_event.async_event_list); } @@ -343,6 +508,7 @@ struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, refcount_set(&jfc->event_refcount, 1); init_completion(&jfc->event_comp); jfc->base.jfae_handler = cdma_jfc_async_event_cb; + jfc->base.jfce_handler = cdma_jfc_comp_event_cb; jfc->base.dev = cdev; dev_dbg(cdev->dev, "create jfc id = %u, queue id = %u.\n", @@ -401,6 +567,7 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, cdma_jfc_id_free(cdev, jfc->jfcn); if (arg) { jfc_event = &jfc->base.jfc_event; + arg->out.comp_events_reported = jfc_event->comp_events_reported; arg->out.async_events_reported = jfc_event->async_events_reported; } @@ -411,3 +578,68 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, return 0; } + +int cdma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, + void *data) +{ + struct auxiliary_device *adev = (struct auxiliary_device *)data; + struct cdma_base_jfc *base_jfc; + struct cdma_table *jfc_tbl; + struct cdma_dev *cdev; + struct cdma_jfc *jfc; + unsigned long flags; + + if (!adev) + return -EINVAL; + + cdev = get_cdma_dev(adev); + jfc_tbl = &cdev->jfc_table; + spin_lock_irqsave(&jfc_tbl->lock, flags); + jfc = idr_find(&jfc_tbl->idr_tbl.idr, jfcn); + if (!jfc) { + dev_warn(cdev->dev, "can not find jfc, jfcn = %lu.\n", jfcn); + spin_unlock_irqrestore(&jfc_tbl->lock, flags); + return -EINVAL; + } + + ++jfc->arm_sn; + base_jfc = &jfc->base; + if (base_jfc->jfce_handler) { + refcount_inc(&jfc->event_refcount); + spin_unlock_irqrestore(&jfc_tbl->lock, flags); + base_jfc->jfce_handler(base_jfc); + if (refcount_dec_and_test(&jfc->event_refcount)) + complete(&jfc->event_comp); + } else { + spin_unlock_irqrestore(&jfc_tbl->lock, flags); + } + + return 0; +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int cdma_poll_jfc(struct cdma_base_jfc *base_jfc, int cr_cnt, + struct dma_cr *cr) +{ + struct cdma_jfc *jfc = to_cdma_jfc(base_jfc); + enum jfc_poll_state err = JFC_OK; + int npolled = 0; + + jfc->buf.entry_cnt_mask = jfc->buf.entry_cnt - 1; + jfc->buf.entry_cnt_mask_ilog2 = ilog2(jfc->buf.entry_cnt); + + spin_lock(&jfc->lock); + + for (npolled = 0; npolled < cr_cnt; ++npolled) { + err = cdma_poll_one(base_jfc->dev, jfc, cr + npolled); + if (err != JFC_OK) + break; + } + + if (npolled) + *jfc->db.db_record = jfc->ci & (u32)CDMA_JFC_DB_CI_IDX_M; + + spin_unlock(&jfc->lock); + + return err == JFC_POLL_ERR ? -CDMA_INTER_ERR : npolled; +} diff --git a/drivers/ub/cdma/cdma_jfc.h b/drivers/ub/cdma/cdma_jfc.h index 612887837e39..7f512150e50c 100644 --- a/drivers/ub/cdma/cdma_jfc.h +++ b/drivers/ub/cdma/cdma_jfc.h @@ -18,6 +18,8 @@ #define CQE_VA_L_OFFSET 12 #define CQE_VA_H_OFFSET 32 +#define CDMA_IMM_DATA_SHIFT 32 + enum cdma_record_db { CDMA_NO_RECORD_EN, CDMA_RECORD_EN @@ -130,6 +132,46 @@ struct cdma_jfc_ctx { u32 rsv11[12]; }; +struct cdma_jfc_cqe { + /* DW0 */ + u32 s_r : 1; + u32 is_jetty : 1; + u32 owner : 1; + u32 inline_en : 1; + u32 opcode : 3; + u32 fd : 1; + u32 rsv : 8; + u32 substatus : 8; + u32 status : 8; + /* DW1 */ + u32 entry_idx : 16; + u32 local_num_l : 16; + /* DW2 */ + u32 local_num_h : 4; + u32 rmt_idx : 20; + u32 rsv1 : 8; + /* DW3 */ + u32 tpn : 24; + u32 rsv2 : 8; + /* DW4 */ + u32 byte_cnt; + /* DW5 ~ DW6 */ + u32 user_data_l; + u32 user_data_h; + /* DW7 ~ DW10 */ + u32 rmt_eid[4]; + /* DW11 ~ DW12 */ + u32 data_l; + u32 data_h; + /* DW13 ~ DW15 */ + u32 inline_data[3]; +}; + +static inline struct cdma_jfc *to_cdma_jfc(struct cdma_base_jfc *base_jfc) +{ + return container_of(base_jfc, struct cdma_jfc, base); +} + int cdma_post_destroy_jfc_mbox(struct cdma_dev *cdev, u32 jfcn, enum cdma_jfc_state state); @@ -140,4 +182,10 @@ struct cdma_base_jfc *cdma_create_jfc(struct cdma_dev *cdev, int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, struct cdma_cmd_delete_jfc_args *arg); +int cdma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, + void *data); + +int cdma_poll_jfc(struct cdma_base_jfc *base_jfc, int cr_cnt, + struct dma_cr *cr); + #endif /* CDMA_JFC_H */ diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 62c475e31a1c..82dc5ab40cf8 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -33,11 +33,21 @@ static int cdma_register_event(struct auxiliary_device *adev) if (ret) return ret; + ret = cdma_reg_ce_event(adev); + if (ret) + goto err_ce_register; + return 0; + +err_ce_register: + cdma_unreg_ae_event(adev); + + return ret; } static inline void cdma_unregister_event(struct auxiliary_device *adev) { + cdma_unreg_ce_event(adev); cdma_unreg_ae_event(adev); } diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index c7af05e282f2..e4c2f3fd7b52 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -105,11 +105,16 @@ struct cdma_jfc_cfg { u32 queue_id; }; +struct cdma_base_jfc; + +typedef void (*cdma_comp_callback_t)(struct cdma_base_jfc *jfc); + struct cdma_base_jfc { struct cdma_dev *dev; struct cdma_context *ctx; struct cdma_jfc_cfg jfc_cfg; u32 id; + cdma_comp_callback_t jfce_handler; cdma_event_callback_t jfae_handler; struct hlist_node hnode; atomic_t use_cnt; diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 0af30a39534e..fcee5800193d 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -13,12 +13,22 @@ /* cdma event ioctl cmd */ #define CDMA_EVENT_CMD_MAGIC 'F' #define JFAE_CMD_GET_ASYNC_EVENT 0 +#define JFCE_CMD_WAIT_EVENT 0 #define CDMA_CMD_GET_ASYNC_EVENT \ _IOWR(CDMA_EVENT_CMD_MAGIC, JFAE_CMD_GET_ASYNC_EVENT, struct cdma_cmd_async_event) +#define CDMA_CMD_WAIT_JFC \ + _IOWR(CDMA_EVENT_CMD_MAGIC, JFCE_CMD_WAIT_EVENT, struct cdma_cmd_jfce_wait_args) +#define CDMA_ADDR_SHIFT 32 #define CDMA_DOORBELL_OFFSET 0x80 +#define CDMA_JFC_DB_CI_IDX_M GENMASK(21, 0) +#define CDMA_JFC_DB_VALID_OWNER_M 1 +#define CDMA_INTER_ERR 1 +#define CDMA_SRC_IDX_SHIFT 16 +#define CDMA_MAX_JFCE_EVENT_CNT 72 + #define MAP_COMMAND_MASK 0xff #define MAP_INDEX_MASK 0xffffff #define MAP_INDEX_SHIFT 8 @@ -27,6 +37,29 @@ #define CDMA_TYPICAL_RNR_RETRY 7 #define CDMA_TYPICAL_ERR_TIMEOUT 2 /* 0:128ms 1:1s 2:8s 3:64s */ +#define CDMA_CQE_STATUS_NUM 7 +#define CDMA_CQE_SUB_STATUS_NUM 5 + +enum dma_cr_status { + DMA_CR_SUCCESS = 0, + DMA_CR_UNSUPPORTED_OPCODE_ERR, + DMA_CR_LOC_LEN_ERR, + DMA_CR_LOC_OPERATION_ERR, + DMA_CR_LOC_ACCESS_ERR, + DMA_CR_REM_RESP_LEN_ERR, + DMA_CR_REM_UNSUPPORTED_REQ_ERR, + DMA_CR_REM_OPERATION_ERR, + DMA_CR_REM_ACCESS_ABORT_ERR, + DMA_CR_ACK_TIMEOUT_ERR, + DMA_CR_RNR_RETRY_CNT_EXC_ERR, + DMA_CR_WR_FLUSH_ERR, + DMA_CR_WR_SUSPEND_DONE, + DMA_CR_WR_FLUSH_ERR_DONE, + DMA_CR_WR_UNHANDLED, + DMA_CR_LOC_DATA_POISON, + DMA_CR_REM_DATA_POISON, +}; + enum db_mmap_type { CDMA_MMAP_JFC_PAGE, CDMA_MMAP_JETTY_DSQE @@ -44,9 +77,23 @@ enum cdma_cmd { CDMA_CMD_DELETE_QUEUE, CDMA_CMD_CREATE_JFC, CDMA_CMD_DELETE_JFC, + CDMA_CMD_CREATE_JFCE, CDMA_CMD_MAX }; +enum { + CQE_FOR_SEND, + CQE_FOR_RECEIVE +}; + +enum hw_cqe_opcode { + HW_CQE_OPC_SEND = 0x00, + HW_CQE_OPC_SEND_WITH_IMM = 0x01, + HW_CQE_OPC_SEND_WITH_INV = 0x02, + HW_CQE_OPC_WRITE_WITH_IMM = 0x03, + HW_CQE_OPC_ERR = 0xff +}; + struct cdma_ioctl_hdr { __u32 command; __u32 args_len; @@ -146,6 +193,13 @@ struct cdma_cmd_delete_ctp_args { } out; }; +struct cdma_cmd_create_jfce_args { + struct { + int fd; + int id; + } out; +}; + struct cdma_cmd_create_jfc_args { struct { __u32 depth; /* in terms of CQEBB */ @@ -169,6 +223,7 @@ struct cdma_cmd_delete_jfc_args { __u32 queue_id; } in; struct { + __u32 comp_events_reported; __u32 async_events_reported; } out; }; @@ -265,4 +320,21 @@ struct cdma_cmd_delete_queue_args { } in; }; +struct cdma_cmd_jfce_wait_args { + struct { + __u32 max_event_cnt; + int time_out; + } in; + struct { + __u32 event_cnt; + __u64 event_data[CDMA_MAX_JFCE_EVENT_CNT]; + } out; +}; + +enum jfc_poll_state { + JFC_OK, + JFC_EMPTY, + JFC_POLL_ERR, +}; + #endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 5ebe4feebd1c..3ef1eedee111 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -13,6 +13,35 @@ struct dma_device { void *private_data; }; +enum dma_cr_opcode { + DMA_CR_OPC_SEND = 0x00, + DMA_CR_OPC_SEND_WITH_IMM, + DMA_CR_OPC_SEND_WITH_INV, + DMA_CR_OPC_WRITE_WITH_IMM, +}; + +union dma_cr_flag { + struct { + u8 s_r : 1; + u8 jetty : 1; + u8 suspend_done : 1; + u8 flush_err_done : 1; + u8 reserved : 4; + } bs; + u8 value; +}; + +struct dma_cr { + enum dma_cr_status status; + u64 user_ctx; + enum dma_cr_opcode opcode; + union dma_cr_flag flag; + u32 completion_len; + u32 local_id; + u32 remote_id; + u32 tpn; +}; + struct queue_cfg { u32 queue_depth; u8 priority; @@ -42,4 +71,7 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, void dma_free_queue(struct dma_device *dma_dev, int queue_id); +int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, + struct dma_cr *cr); + #endif -- Gitee From 79593865683d16f894bcb882da1b5f1ef5d07bd1 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:36:17 +0800 Subject: [PATCH 020/243] ub: cdma: support unregister segment commit 62072a52120125e0cf45f531ebacdad6e8b3d6f0 openEuler This patch implements local segment unregister and remote segment import/unimport functionalities within the CDMA driver. The implementation includes support for the interfaces dma_unregister_seg, dma_import_seg, and dma_unimport_seg. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 1 + drivers/ub/cdma/cdma_api.c | 55 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_context.h | 2 ++ drivers/ub/cdma/cdma_dev.c | 5 +++ drivers/ub/cdma/cdma_ioctl.c | 41 +++++++++++++++++++++++- drivers/ub/cdma/cdma_jfs.h | 1 + drivers/ub/cdma/cdma_segment.c | 51 ++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_segment.h | 26 ++++++++++++++++ include/uapi/ub/cdma/cdma_abi.h | 7 +++++ include/ub/cdma/cdma_api.h | 22 +++++++++++++ 11 files changed, 211 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/cdma/cdma_segment.c create mode 100644 drivers/ub/cdma/cdma_segment.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 58a355df4c33..714e0542f387 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -2,6 +2,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ - cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o + cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index 78ac66be6526..8ed8fdb4d6fa 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -188,6 +188,7 @@ struct cdma_dev { struct cdma_table jfs_table; struct cdma_table jfc_table; struct cdma_table jfce_table; + struct cdma_table seg_table; struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; struct mutex file_mutex; struct list_head file_list; diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 8043b0238cb4..4ba886635ca0 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -4,6 +4,7 @@ #define pr_fmt(fmt) "CDMA: " fmt #define dev_fmt pr_fmt +#include "cdma_segment.h" #include "cdma_dev.h" #include "cdma_cmd.h" #include "cdma_context.h" @@ -310,6 +311,60 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) } EXPORT_SYMBOL_GPL(dma_free_queue); +void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg) +{ + struct cdma_ctx_res *ctx_res; + struct cdma_context *ctx; + struct cdma_segment *seg; + struct cdma_dev *cdev; + + if (!dma_dev || !dma_dev->private_data || !dma_seg) + return; + + cdev = get_cdma_dev_by_eid(dma_dev->attr.eid.dw0); + if (!cdev) { + pr_err("can not find cdev by eid, eid = 0x%x\n", + dma_dev->attr.eid.dw0); + return; + } + + ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; + seg = xa_load(&ctx_res->seg_xa, dma_seg->handle); + if (!seg) { + dev_err(cdev->dev, + "no segment found in this device, handle = %llu\n", + dma_seg->handle); + return; + } + xa_erase(&ctx_res->seg_xa, dma_seg->handle); + ctx = seg->ctx; + + cdma_seg_ungrant(seg); + cdma_unregister_seg(cdev, seg); + kfree(dma_seg); + + atomic_dec(&ctx->ref_cnt); +} +EXPORT_SYMBOL_GPL(dma_unregister_seg); + +struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg) +{ + if (!cfg || !cfg->sva || !cfg->len) + return NULL; + + return cdma_import_seg(cfg); +} +EXPORT_SYMBOL_GPL(dma_import_seg); + +void dma_unimport_seg(struct dma_seg *dma_seg) +{ + if (!dma_seg) + return; + + cdma_unimport_seg(dma_seg); +} +EXPORT_SYMBOL_GPL(dma_unimport_seg); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index c48ac55631bf..590bffb14cce 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -23,11 +23,13 @@ struct cdma_context { bool is_kernel; atomic_t ref_cnt; struct list_head queue_list; + struct list_head seg_list; }; struct cdma_ctx_res { struct cdma_context *ctx; struct xarray queue_xa; + struct xarray seg_xa; }; struct cdma_context *cdma_find_ctx_by_handle(struct cdma_dev *cdev, int handle); diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 96f33a55ebdf..79ad036557bf 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -122,6 +122,7 @@ static void cdma_init_tables(struct cdma_dev *cdev) static void cdma_destroy_tables(struct cdma_dev *cdev) { + cdma_tbl_destroy(cdev, &cdev->seg_table, "SEG"); cdma_tbl_destroy(cdev, &cdev->ctp_table, "CTP"); cdma_tbl_destroy(cdev, &cdev->jfs_table, "JFS"); cdma_tbl_destroy(cdev, &cdev->jfc_table, "JFC"); @@ -193,6 +194,7 @@ static void cdma_uninit_dev_param(struct cdma_dev *cdev) static void cdma_release_table_res(struct cdma_dev *cdev) { struct cdma_queue *queue; + struct cdma_segment *seg; struct cdma_jfc *jfc; struct cdma_jfs *jfs; struct cdma_tp *tmp; @@ -209,6 +211,9 @@ static void cdma_release_table_res(struct cdma_dev *cdev) idr_for_each_entry(&cdev->queue_table.idr_tbl.idr, queue, id) cdma_delete_queue(cdev, queue->id); + + idr_for_each_entry(&cdev->seg_table.idr_tbl.idr, seg, id) + cdma_unregister_seg(cdev, seg); } static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index f1513c162db2..d9e8ca330bc8 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -14,6 +14,7 @@ #include "cdma_queue.h" #include "cdma_event.h" #include "cdma_jfc.h" +#include "cdma_segment.h" #include "cdma_uobj.h" #include "cdma_ioctl.h" @@ -131,7 +132,8 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, dev_err(cdev->dev, "cdma context has not been created.\n"); return -ENOENT; } - if (!list_empty(&cfile->uctx->queue_list)) { + if (!list_empty(&cfile->uctx->queue_list) || + !list_empty(&cfile->uctx->seg_list)) { dev_err(cdev->dev, "queue/segment is still in use, ctx handle = %d.\n", cfile->uctx->handle); @@ -510,6 +512,42 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c return ret; } +static int cdma_cmd_unregister_seg(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_unregister_seg_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct cdma_segment *seg; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != sizeof(arg)) { + dev_err(cdev->dev, "unregister seg arg invalid.\n"); + return -EINVAL; + } + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, + "unregister seg get user data failed, ret = %d.\n", + ret); + return -EFAULT; + } + + uobj = cdma_uobj_get(cfile, arg.in.handle, UOBJ_TYPE_SEGMENT); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "get seg uobj failed.\n"); + return -EINVAL; + } + seg = uobj->object; + list_del(&seg->list); + cdma_unregister_seg(cdev, seg); + cdma_uobj_delete(uobj); + + return ret; +} + static int cdma_cmd_create_jfc(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { @@ -689,6 +727,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_DELETE_CTP] = cdma_cmd_delete_ctp, [CDMA_CMD_CREATE_JFS] = cdma_cmd_create_jfs, [CDMA_CMD_DELETE_JFS] = cdma_cmd_delete_jfs, + [CDMA_CMD_UNREGISTER_SEG] = cdma_cmd_unregister_seg, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, [CDMA_CMD_CREATE_JFC] = cdma_cmd_create_jfc, diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 7625ace4b5c7..e4dcaa765a89 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -6,6 +6,7 @@ #include "cdma_common.h" #include "cdma_types.h" +#include "cdma_segment.h" #define MAX_WQEBB_NUM 4 #define CDMA_JFS_WQEBB_SIZE 64 diff --git a/drivers/ub/cdma/cdma_segment.c b/drivers/ub/cdma/cdma_segment.c new file mode 100644 index 000000000000..c2746340c7cf --- /dev/null +++ b/drivers/ub/cdma/cdma_segment.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include "cdma_segment.h" + +static inline void cdma_free_seg_handle(struct cdma_dev *cdev, u64 handle) +{ + spin_lock(&cdev->seg_table.lock); + idr_remove(&cdev->seg_table.idr_tbl.idr, handle); + spin_unlock(&cdev->seg_table.lock); +} + +void cdma_unregister_seg(struct cdma_dev *cdev, struct cdma_segment *seg) +{ + cdma_free_seg_handle(cdev, seg->base.handle); + cdma_umem_release(seg->umem, seg->is_kernel); + kfree(seg); +} + +void cdma_seg_ungrant(struct cdma_segment *seg) +{ + struct ummu_token_info token_info = { 0 }; + + token_info.tokenVal = seg->base.token_value; + + ummu_sva_ungrant_range(seg->ksva, (void *)seg->base.sva, + seg->base.len, &token_info); +} + +struct dma_seg *cdma_import_seg(struct dma_seg_cfg *cfg) +{ + struct dma_seg *seg; + + seg = kzalloc(sizeof(*seg), GFP_KERNEL); + if (!seg) + return NULL; + + seg->sva = cfg->sva; + seg->len = cfg->len; + seg->token_value = cfg->token_value; + + return seg; +} + +void cdma_unimport_seg(struct dma_seg *seg) +{ + kfree(seg); +} diff --git a/drivers/ub/cdma/cdma_segment.h b/drivers/ub/cdma/cdma_segment.h new file mode 100644 index 000000000000..67a9e714adec --- /dev/null +++ b/drivers/ub/cdma/cdma_segment.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_SEGMENT_H__ +#define __CDMA_SEGMENT_H__ + +#include "cdma_common.h" +#include + +struct cdma_dev; + +struct cdma_segment { + struct dma_seg base; + struct iommu_sva *ksva; + struct cdma_umem *umem; + struct cdma_context *ctx; + bool is_kernel; + struct list_head list; +}; + +void cdma_unregister_seg(struct cdma_dev *cdev, struct cdma_segment *seg); +void cdma_seg_ungrant(struct cdma_segment *seg); +struct dma_seg *cdma_import_seg(struct dma_seg_cfg *cfg); +void cdma_unimport_seg(struct dma_seg *seg); + +#endif /* CDMA_SEGMENT_H */ diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index fcee5800193d..cdfcfc14a11d 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -73,6 +73,7 @@ enum cdma_cmd { CDMA_CMD_DELETE_CTP, CDMA_CMD_CREATE_JFS, CDMA_CMD_DELETE_JFS, + CDMA_CMD_UNREGISTER_SEG, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, CDMA_CMD_CREATE_JFC, @@ -228,6 +229,12 @@ struct cdma_cmd_delete_jfc_args { } out; }; +struct cdma_cmd_unregister_seg_args { + struct { + __u64 handle; + } in; +}; + struct dev_eid { __u32 dw0; __u32 dw1; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 3ef1eedee111..d256aee9217b 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -51,6 +51,22 @@ struct queue_cfg { u32 trans_mode; }; +struct dma_seg { + u64 handle; + u64 sva; + u64 len; + u32 tid; /* data valid only in bit 0-19 */ + u32 token_value; + bool token_value_valid; +}; + +struct dma_seg_cfg { + u64 sva; + u64 len; + u32 token_value; + bool token_value_valid; +}; + struct dma_context { struct dma_device *dma_dev; u32 tid; /* data valid only in bit 0-19 */ @@ -71,6 +87,12 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, void dma_free_queue(struct dma_device *dma_dev, int queue_id); +void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg); + +struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg); + +void dma_unimport_seg(struct dma_seg *dma_seg); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From a7edc06d229c9f46e4c22e65c17ec6f3c2a22d2f Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:40:59 +0800 Subject: [PATCH 021/243] ub: cdma: support register segment commit 3aa2afdcbee97f1546e225a5f4f8c96cdc35e104 openEuler This patch implements local segment register-related functionality in the CDMA driver. The implementation includes support for the dma_register_seg interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 72 +++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_context.c | 1 + drivers/ub/cdma/cdma_dev.c | 1 + drivers/ub/cdma/cdma_ioctl.c | 62 +++++++++++++++++++++++++ drivers/ub/cdma/cdma_segment.c | 81 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_segment.h | 9 ++++ include/uapi/ub/cdma/cdma_abi.h | 11 +++++ include/ub/cdma/cdma_api.h | 3 ++ 9 files changed, 241 insertions(+) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 4ba886635ca0..89d01159f797 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -311,6 +311,78 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) } EXPORT_SYMBOL_GPL(dma_free_queue); +struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, + struct dma_seg_cfg *cfg) +{ + struct cdma_ctx_res *ctx_res; + struct cdma_segment *seg; + struct cdma_context *ctx; + struct dma_seg *ret_seg; + struct cdma_dev *cdev; + int ret; + + if (!dma_dev || !dma_dev->private_data || !cfg || !cfg->sva || !cfg->len) + return NULL; + + cdev = get_cdma_dev_by_eid(dma_dev->attr.eid.dw0); + if (!cdev) { + pr_err("can not find normal cdev by eid, eid = 0x%x\n", + dma_dev->attr.eid.dw0); + return NULL; + } + + if (cdev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", + dma_dev->attr.eid.dw0); + return NULL; + } + + ctx = cdma_find_ctx_by_handle(cdev, ctx_id); + if (!ctx) { + dev_err(cdev->dev, "find ctx by handle failed, handle = %d.\n", + ctx_id); + return NULL; + } + atomic_inc(&ctx->ref_cnt); + + seg = cdma_register_seg(cdev, cfg, true); + if (!seg) + goto decrease_cnt; + + seg->ctx = ctx; + ret = cdma_seg_grant(cdev, seg, cfg); + if (ret) + goto unregister_seg; + + ret_seg = kzalloc(sizeof(struct dma_seg), GFP_KERNEL); + if (!ret_seg) + goto ungrant_seg; + + memcpy(ret_seg, &seg->base, sizeof(struct dma_seg)); + + ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; + ret = xa_err(xa_store(&ctx_res->seg_xa, ret_seg->handle, seg, + GFP_KERNEL)); + if (ret) { + dev_err(cdev->dev, "store seg to ctx_res failed, ret = %d\n", + ret); + goto free_seg; + } + + return ret_seg; + +free_seg: + kfree(ret_seg); +ungrant_seg: + cdma_seg_ungrant(seg); +unregister_seg: + cdma_unregister_seg(cdev, seg); +decrease_cnt: + atomic_dec(&ctx->ref_cnt); + return NULL; +} +EXPORT_SYMBOL_GPL(dma_register_seg); + void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg) { struct cdma_ctx_res *ctx_res; diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index b5a149658847..f0370bea2861 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -21,6 +21,7 @@ #define AVAIL_SGMT_OST_INIT 512 #define CDMA_RANGE_INDEX_ENTRY_CNT 0x100000 +#define CDMA_SEGMENT_ENTRY_CNT 0x10000 #define CDMA_DB_SIZE 64 diff --git a/drivers/ub/cdma/cdma_context.c b/drivers/ub/cdma/cdma_context.c index f13dcf8ccdbd..e3b3e13d8a4e 100644 --- a/drivers/ub/cdma/cdma_context.c +++ b/drivers/ub/cdma/cdma_context.c @@ -112,6 +112,7 @@ struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel) INIT_LIST_HEAD(&ctx->pgdir_list); mutex_init(&ctx->pgdir_mutex); INIT_LIST_HEAD(&ctx->queue_list); + INIT_LIST_HEAD(&ctx->seg_list); return ctx; diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 79ad036557bf..f08e60716edc 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -118,6 +118,7 @@ static void cdma_init_tables(struct cdma_dev *cdev) cdma_tbl_init(&cdev->jfs_table, jfs->max_cnt + jfs->start_idx - 1, jfs->start_idx); cdma_tbl_init(&cdev->ctp_table, CDMA_RANGE_INDEX_ENTRY_CNT, 0); + cdma_tbl_init(&cdev->seg_table, CDMA_SEGMENT_ENTRY_CNT, 0); } static void cdma_destroy_tables(struct cdma_dev *cdev) diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index d9e8ca330bc8..0a62e306d6f7 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -512,6 +512,67 @@ static int cdma_cmd_delete_queue(struct cdma_ioctl_hdr *hdr, struct cdma_file *c return ret; } +static int cdma_cmd_register_seg(struct cdma_ioctl_hdr *hdr, + struct cdma_file *cfile) +{ + struct cdma_cmd_register_seg_args arg = { 0 }; + struct cdma_dev *cdev = cfile->cdev; + struct dma_seg_cfg cfg = { 0 }; + struct cdma_segment *seg; + struct cdma_uobj *uobj; + int ret; + + if (!hdr->args_addr || hdr->args_len != sizeof(arg) || !cfile->uctx) { + dev_err(cdev->dev, "register seg arg invalid.\n"); + return -EINVAL; + } + + ret = (int)copy_from_user(&arg, (void *)hdr->args_addr, + (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, + "register seg get user data failed, ret = %d.\n", ret); + return -EFAULT; + } + + uobj = cdma_uobj_create(cfile, UOBJ_TYPE_SEGMENT); + if (IS_ERR(uobj)) { + dev_err(cdev->dev, "create seg uobj failed.\n"); + return -ENOMEM; + } + + cfg.sva = arg.in.addr; + cfg.len = arg.in.len; + seg = cdma_register_seg(cdev, &cfg, false); + if (!seg) { + dev_err(cdev->dev, "register seg failed.\n"); + ret = -EINVAL; + goto delete_uobj; + } + seg->ctx = cfile->uctx; + + list_add_tail(&seg->list, &cfile->uctx->seg_list); + arg.out.handle = uobj->id; + uobj->object = seg; + + ret = (int)copy_to_user((void *)hdr->args_addr, &arg, (u32)sizeof(arg)); + if (ret) { + dev_err(cdev->dev, + "register seg copy to user failed, ret = %d.\n", ret); + ret = -EFAULT; + goto free_seg; + } + return 0; + +free_seg: + list_del(&seg->list); + cdma_unregister_seg(cdev, seg); +delete_uobj: + cdma_uobj_delete(uobj); + + return ret; +} + static int cdma_cmd_unregister_seg(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { @@ -727,6 +788,7 @@ static cdma_cmd_handler g_cdma_cmd_handler[CDMA_CMD_MAX] = { [CDMA_CMD_DELETE_CTP] = cdma_cmd_delete_ctp, [CDMA_CMD_CREATE_JFS] = cdma_cmd_create_jfs, [CDMA_CMD_DELETE_JFS] = cdma_cmd_delete_jfs, + [CDMA_CMD_REGISTER_SEG] = cdma_cmd_register_seg, [CDMA_CMD_UNREGISTER_SEG] = cdma_cmd_unregister_seg, [CDMA_CMD_CREATE_QUEUE] = cdma_cmd_create_queue, [CDMA_CMD_DELETE_QUEUE] = cdma_cmd_delete_queue, diff --git a/drivers/ub/cdma/cdma_segment.c b/drivers/ub/cdma/cdma_segment.c index c2746340c7cf..6882d27cd70a 100644 --- a/drivers/ub/cdma/cdma_segment.c +++ b/drivers/ub/cdma/cdma_segment.c @@ -5,6 +5,27 @@ #include #include "cdma_segment.h" +#include "cdma_context.h" + +static int cdma_alloc_seg_handle(struct cdma_dev *cdev, + struct cdma_segment *seg) +{ + struct cdma_table *seg_table = &cdev->seg_table; + int handle; + + idr_preload(GFP_KERNEL); + spin_lock(&seg_table->lock); + + handle = idr_alloc(&seg_table->idr_tbl.idr, seg, seg_table->idr_tbl.min, + seg_table->idr_tbl.max, GFP_NOWAIT); + if (handle < 0) + dev_err(cdev->dev, "alloc seg handle failed.\n"); + + spin_unlock(&seg_table->lock); + idr_preload_end(); + + return handle; +} static inline void cdma_free_seg_handle(struct cdma_dev *cdev, u64 handle) { @@ -13,6 +34,43 @@ static inline void cdma_free_seg_handle(struct cdma_dev *cdev, u64 handle) spin_unlock(&cdev->seg_table.lock); } +struct cdma_segment *cdma_register_seg(struct cdma_dev *cdev, + struct dma_seg_cfg *cfg, bool is_kernel) +{ + struct cdma_segment *seg; + int handle; + + seg = kzalloc(sizeof(*seg), GFP_KERNEL); + if (!seg) + return NULL; + + seg->umem = cdma_umem_get(cdev, cfg->sva, cfg->len, is_kernel); + if (IS_ERR_OR_NULL(seg->umem)) { + dev_err(cdev->dev, "pin seg failed\n"); + goto free_seg; + } + + handle = cdma_alloc_seg_handle(cdev, seg); + if (handle < 0) + goto unpin_umem; + + seg->base.handle = (u64)handle; + seg->base.token_value = cfg->token_value; + seg->base.sva = cfg->sva; + seg->base.len = cfg->len; + seg->base.token_value_valid = cfg->token_value_valid; + seg->is_kernel = is_kernel; + + return seg; + +unpin_umem: + cdma_umem_release(seg->umem, is_kernel); +free_seg: + kfree(seg); + + return NULL; +} + void cdma_unregister_seg(struct cdma_dev *cdev, struct cdma_segment *seg) { cdma_free_seg_handle(cdev, seg->base.handle); @@ -20,6 +78,29 @@ void cdma_unregister_seg(struct cdma_dev *cdev, struct cdma_segment *seg) kfree(seg); } +int cdma_seg_grant(struct cdma_dev *cdev, struct cdma_segment *seg, + struct dma_seg_cfg *cfg) +{ + struct ummu_token_info token_info; + struct ummu_seg_attr seg_attr; + int ret; + + seg->base.tid = seg->ctx->tid; + seg->ksva = seg->ctx->sva; + + token_info.input = 0; + token_info.tokenVal = cfg->token_value; + seg_attr.token = &token_info; + seg_attr.e_bit = UMMU_EBIT_OFF; + + ret = ummu_sva_grant_range(seg->ksva, (void *)cfg->sva, cfg->len, + MAPT_PERM_RW, (void *)&seg_attr); + if (ret) + dev_err(cdev->dev, "grant seg failed, ret = %d.\n", ret); + + return ret; +} + void cdma_seg_ungrant(struct cdma_segment *seg) { struct ummu_token_info token_info = { 0 }; diff --git a/drivers/ub/cdma/cdma_segment.h b/drivers/ub/cdma/cdma_segment.h index 67a9e714adec..113e357fcedd 100644 --- a/drivers/ub/cdma/cdma_segment.h +++ b/drivers/ub/cdma/cdma_segment.h @@ -18,7 +18,16 @@ struct cdma_segment { struct list_head list; }; +static inline struct cdma_segment *to_cdma_seg(struct dma_seg *seg) +{ + return container_of(seg, struct cdma_segment, base); +} + +struct cdma_segment *cdma_register_seg(struct cdma_dev *cdev, + struct dma_seg_cfg *cfg, bool is_kernel); void cdma_unregister_seg(struct cdma_dev *cdev, struct cdma_segment *seg); +int cdma_seg_grant(struct cdma_dev *cdev, struct cdma_segment *seg, + struct dma_seg_cfg *cfg); void cdma_seg_ungrant(struct cdma_segment *seg); struct dma_seg *cdma_import_seg(struct dma_seg_cfg *cfg); void cdma_unimport_seg(struct dma_seg *seg); diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index cdfcfc14a11d..b32954f28636 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -73,6 +73,7 @@ enum cdma_cmd { CDMA_CMD_DELETE_CTP, CDMA_CMD_CREATE_JFS, CDMA_CMD_DELETE_JFS, + CDMA_CMD_REGISTER_SEG, CDMA_CMD_UNREGISTER_SEG, CDMA_CMD_CREATE_QUEUE, CDMA_CMD_DELETE_QUEUE, @@ -229,6 +230,16 @@ struct cdma_cmd_delete_jfc_args { } out; }; +struct cdma_cmd_register_seg_args { + struct { + __u64 addr; + __u64 len; + } in; + struct { + __u64 handle; + } out; +}; + struct cdma_cmd_unregister_seg_args { struct { __u64 handle; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index d256aee9217b..ff69c268b569 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -87,6 +87,9 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, void dma_free_queue(struct dma_device *dma_dev, int queue_id); +struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, + struct dma_seg_cfg *cfg); + void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg); struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg); -- Gitee From ba659c56a69f9a03859d9092f1a82807dc9e3ee0 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2025 20:26:29 -0400 Subject: [PATCH 022/243] fwctl: Add basic structure for a class subsystem with a cdev [Upstream commit 2e4986cf2d525eed3a240b7821f89ca45cf36d78] Create the class, character device and functions for a fwctl driver to un/register to the subsystem. A typical fwctl driver has a sysfs presence like: $ ls -l /dev/fwctl/fwctl0 crw------- 1 root root 250, 0 Apr 25 19:16 /dev/fwctl/fwctl0 $ ls /sys/class/fwctl/fwctl0 dev device power subsystem uevent $ ls /sys/class/fwctl/fwctl0/device/infiniband/ ibp0s10f0 $ ls /sys/class/infiniband/ibp0s10f0/device/fwctl/ fwctl0/ $ ls /sys/devices/pci0000:00/0000:00:0a.0/fwctl/fwctl0 dev device power subsystem uevent Which allows userspace to link all the multi-subsystem driver components together and learn the subsystem specific names for the device's components. Link: https://patch.msgid.link/r/1-v5-642aa0c94070+4447f-fwctl_jgg@nvidia.com Reviewed-by: Jonathan Cameron Reviewed-by: Dan Williams Reviewed-by: Dave Jiang Reviewed-by: Shannon Nelson Tested-by: Dave Jiang Tested-by: Shannon Nelson Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- MAINTAINERS | 9 ++ arch/arm64/configs/tencent.config | 3 + arch/x86/configs/tencent.config | 3 + drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/fwctl/Kconfig | 9 ++ drivers/fwctl/Makefile | 4 + drivers/fwctl/main.c | 173 ++++++++++++++++++++++++++++++ include/linux/fwctl.h | 69 ++++++++++++ 9 files changed, 273 insertions(+) create mode 100644 drivers/fwctl/Kconfig create mode 100644 drivers/fwctl/Makefile create mode 100644 drivers/fwctl/main.c create mode 100644 include/linux/fwctl.h diff --git a/MAINTAINERS b/MAINTAINERS index 29ee45a167d1..9cd5de7361ed 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8640,6 +8640,15 @@ F: kernel/futex/* F: tools/perf/bench/futex* F: tools/testing/selftests/futex/ +FWCTL SUBSYSTEM +M: Dave Jiang +M: Jason Gunthorpe +M: Saeed Mahameed +R: Jonathan Cameron +S: Maintained +F: drivers/fwctl/ +F: include/linux/fwctl.h + GATEWORKS SYSTEM CONTROLLER (GSC) DRIVER M: Tim Harvey S: Maintained diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index b806c4dc225c..63cfddd36d14 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1863,3 +1863,6 @@ CONFIG_UB_URMA=m CONFIG_IOMMUFD=m CONFIG_VFIO_DEVICE_CDEV=y # end of IOMMUFD + +# fwctl +CONFIG_FWCTL=y diff --git a/arch/x86/configs/tencent.config b/arch/x86/configs/tencent.config index 8958dc6d4527..126020c405a7 100644 --- a/arch/x86/configs/tencent.config +++ b/arch/x86/configs/tencent.config @@ -2026,3 +2026,6 @@ CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_KSTRTOX=y CONFIG_TEST_BPF=m CONFIG_EXT4_FS=y + +# fwctl +CONFIG_FWCTL=y diff --git a/drivers/Kconfig b/drivers/Kconfig index bfb2bdb00477..1a9785701376 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -22,6 +22,8 @@ source "drivers/connector/Kconfig" source "drivers/firmware/Kconfig" +source "drivers/fwctl/Kconfig" + source "drivers/gnss/Kconfig" source "drivers/mtd/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 800793aafbbb..269267ac3b4f 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -141,6 +141,7 @@ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-y += firmware/ +obj-$(CONFIG_FWCTL) += fwctl/ obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SUPERH) += sh/ obj-y += clocksource/ diff --git a/drivers/fwctl/Kconfig b/drivers/fwctl/Kconfig new file mode 100644 index 000000000000..37147a695add --- /dev/null +++ b/drivers/fwctl/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig FWCTL + tristate "fwctl device firmware access framework" + help + fwctl provides a userspace API for restricted access to communicate + with on-device firmware. The communication channel is intended to + support a wide range of lockdown compatible device behaviors including + manipulating device FLASH, debugging, and other activities that don't + fit neatly into an existing subsystem. diff --git a/drivers/fwctl/Makefile b/drivers/fwctl/Makefile new file mode 100644 index 000000000000..1cad210f6ba5 --- /dev/null +++ b/drivers/fwctl/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FWCTL) += fwctl.o + +fwctl-y += main.o diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c new file mode 100644 index 000000000000..9096b9ba9cf7 --- /dev/null +++ b/drivers/fwctl/main.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES + */ +#define pr_fmt(fmt) "fwctl: " fmt +#include + +#include +#include +#include +#include + +enum { + FWCTL_MAX_DEVICES = 4096, +}; +static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS)); + +static dev_t fwctl_dev; +static DEFINE_IDA(fwctl_ida); + +static int fwctl_fops_open(struct inode *inode, struct file *filp) +{ + struct fwctl_device *fwctl = + container_of(inode->i_cdev, struct fwctl_device, cdev); + + get_device(&fwctl->dev); + filp->private_data = fwctl; + return 0; +} + +static int fwctl_fops_release(struct inode *inode, struct file *filp) +{ + struct fwctl_device *fwctl = filp->private_data; + + fwctl_put(fwctl); + return 0; +} + +static const struct file_operations fwctl_fops = { + .owner = THIS_MODULE, + .open = fwctl_fops_open, + .release = fwctl_fops_release, +}; + +static void fwctl_device_release(struct device *device) +{ + struct fwctl_device *fwctl = + container_of(device, struct fwctl_device, dev); + + ida_free(&fwctl_ida, fwctl->dev.devt - fwctl_dev); + kfree(fwctl); +} + +static char *fwctl_devnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "fwctl/%s", dev_name(dev)); +} + +static struct class fwctl_class = { + .name = "fwctl", + .dev_release = fwctl_device_release, + .devnode = fwctl_devnode, +}; + +static struct fwctl_device * +_alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size) +{ + struct fwctl_device *fwctl __free(kfree) = kzalloc(size, GFP_KERNEL); + int devnum; + + if (!fwctl) + return NULL; + + fwctl->dev.class = &fwctl_class; + fwctl->dev.parent = parent; + + devnum = ida_alloc_max(&fwctl_ida, FWCTL_MAX_DEVICES - 1, GFP_KERNEL); + if (devnum < 0) + return NULL; + + fwctl->dev.devt = fwctl_dev + devnum; + fwctl->dev.class = &fwctl_class; + fwctl->dev.parent = parent; + + device_initialize(&fwctl->dev); + return_ptr(fwctl); +} + +/* Drivers use the fwctl_alloc_device() wrapper */ +struct fwctl_device *_fwctl_alloc_device(struct device *parent, + const struct fwctl_ops *ops, + size_t size) +{ + struct fwctl_device *fwctl __free(fwctl) = + _alloc_device(parent, ops, size); + + if (!fwctl) + return NULL; + + cdev_init(&fwctl->cdev, &fwctl_fops); + /* + * The driver module is protected by fwctl_register/unregister(), + * unregister won't complete until we are done with the driver's module. + */ + fwctl->cdev.owner = THIS_MODULE; + + if (dev_set_name(&fwctl->dev, "fwctl%d", fwctl->dev.devt - fwctl_dev)) + return NULL; + + fwctl->ops = ops; + return_ptr(fwctl); +} +EXPORT_SYMBOL_NS_GPL(_fwctl_alloc_device, FWCTL); + +/** + * fwctl_register - Register a new device to the subsystem + * @fwctl: Previously allocated fwctl_device + * + * On return the device is visible through sysfs and /dev, driver ops may be + * called. + */ +int fwctl_register(struct fwctl_device *fwctl) +{ + return cdev_device_add(&fwctl->cdev, &fwctl->dev); +} +EXPORT_SYMBOL_NS_GPL(fwctl_register, FWCTL); + +/** + * fwctl_unregister - Unregister a device from the subsystem + * @fwctl: Previously allocated and registered fwctl_device + * + * Undoes fwctl_register(). On return no driver ops will be called. The + * caller must still call fwctl_put() to free the fwctl. + * + * The design of fwctl allows this sort of disassociation of the driver from the + * subsystem primarily by keeping memory allocations owned by the core subsytem. + * The fwctl_device and fwctl_uctx can both be freed without requiring a driver + * callback. This allows the module to remain unlocked while FDs are open. + */ +void fwctl_unregister(struct fwctl_device *fwctl) +{ + cdev_device_del(&fwctl->cdev, &fwctl->dev); +} +EXPORT_SYMBOL_NS_GPL(fwctl_unregister, FWCTL); + +static int __init fwctl_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&fwctl_dev, 0, FWCTL_MAX_DEVICES, "fwctl"); + if (ret) + return ret; + + ret = class_register(&fwctl_class); + if (ret) + goto err_chrdev; + return 0; + +err_chrdev: + unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES); + return ret; +} + +static void __exit fwctl_exit(void) +{ + class_unregister(&fwctl_class); + unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES); +} + +module_init(fwctl_init); +module_exit(fwctl_exit); +MODULE_DESCRIPTION("fwctl device firmware access framework"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h new file mode 100644 index 000000000000..39d5059c9e59 --- /dev/null +++ b/include/linux/fwctl.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES + */ +#ifndef __LINUX_FWCTL_H +#define __LINUX_FWCTL_H +#include +#include +#include + +struct fwctl_device; +struct fwctl_uctx; + +struct fwctl_ops { +}; + +/** + * struct fwctl_device - Per-driver registration struct + * @dev: The sysfs (class/fwctl/fwctlXX) device + * + * Each driver instance will have one of these structs with the driver private + * data following immediately after. This struct is refcounted, it is freed by + * calling fwctl_put(). + */ +struct fwctl_device { + struct device dev; + /* private: */ + struct cdev cdev; + const struct fwctl_ops *ops; +}; + +struct fwctl_device *_fwctl_alloc_device(struct device *parent, + const struct fwctl_ops *ops, + size_t size); +/** + * fwctl_alloc_device - Allocate a fwctl + * @parent: Physical device that provides the FW interface + * @ops: Driver ops to register + * @drv_struct: 'struct driver_fwctl' that holds the struct fwctl_device + * @member: Name of the struct fwctl_device in @drv_struct + * + * This allocates and initializes the fwctl_device embedded in the drv_struct. + * Upon success the pointer must be freed via fwctl_put(). Returns a 'drv_struct + * \*' on success, NULL on error. + */ +#define fwctl_alloc_device(parent, ops, drv_struct, member) \ + ({ \ + static_assert(__same_type(struct fwctl_device, \ + ((drv_struct *)NULL)->member)); \ + static_assert(offsetof(drv_struct, member) == 0); \ + (drv_struct *)_fwctl_alloc_device(parent, ops, \ + sizeof(drv_struct)); \ + }) + +static inline struct fwctl_device *fwctl_get(struct fwctl_device *fwctl) +{ + get_device(&fwctl->dev); + return fwctl; +} +static inline void fwctl_put(struct fwctl_device *fwctl) +{ + put_device(&fwctl->dev); +} +DEFINE_FREE(fwctl, struct fwctl_device *, if (_T) fwctl_put(_T)); + +int fwctl_register(struct fwctl_device *fwctl); +void fwctl_unregister(struct fwctl_device *fwctl); + +#endif -- Gitee From 3aa3befb84b5226179f3066b8cfd5137013715ce Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2025 20:26:30 -0400 Subject: [PATCH 023/243] fwctl: Basic ioctl dispatch for the character device [Upstream commit 0e79a47fb197b6937709a2af2a138c526a9bc374] Each file descriptor gets a chunk of per-FD driver specific context that allows the driver to attach a device specific struct to. The core code takes care of the memory lifetime for this structure. The ioctl dispatch and design is based on what was built for iommufd. The ioctls have a struct which has a combined in/out behavior with a typical 'zero pad' scheme for future extension and backwards compatibility. Like iommufd some shared logic does most of the ioctl marshaling and compatibility work and table dispatches to some function pointers for each unique ioctl. This approach has proven to work quite well in the iommufd and rdma subsystems. Allocate an ioctl number space for the subsystem. Link: https://patch.msgid.link/r/2-v5-642aa0c94070+4447f-fwctl_jgg@nvidia.com Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Shannon Nelson Tested-by: Dave Jiang Tested-by: Shannon Nelson Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- .../userspace-api/ioctl/ioctl-number.rst | 1 + MAINTAINERS | 1 + drivers/fwctl/main.c | 143 +++++++++++++++++- include/linux/fwctl.h | 46 ++++++ include/uapi/fwctl/fwctl.h | 38 +++++ 5 files changed, 224 insertions(+), 5 deletions(-) create mode 100644 include/uapi/fwctl/fwctl.h diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 4ea5b837399a..66ad52639a28 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -321,6 +321,7 @@ Code Seq# Include File Comments 0x97 00-7F fs/ceph/ioctl.h Ceph file system 0x99 00-0F 537-Addinboard driver +0x9A 00-0F include/uapi/fwctl/fwctl.h 0xA0 all linux/sdp/sdp.h Industrial Device Project 0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver diff --git a/MAINTAINERS b/MAINTAINERS index 9cd5de7361ed..cd3481849a04 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8648,6 +8648,7 @@ R: Jonathan Cameron S: Maintained F: drivers/fwctl/ F: include/linux/fwctl.h +F: include/uapi/fwctl/ GATEWORKS SYSTEM CONTROLLER (GSC) DRIVER M: Tim Harvey diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c index 9096b9ba9cf7..1aa0e4209a8c 100644 --- a/drivers/fwctl/main.c +++ b/drivers/fwctl/main.c @@ -10,6 +10,8 @@ #include #include +#include + enum { FWCTL_MAX_DEVICES = 4096, }; @@ -18,20 +20,128 @@ static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS)); static dev_t fwctl_dev; static DEFINE_IDA(fwctl_ida); +struct fwctl_ucmd { + struct fwctl_uctx *uctx; + void __user *ubuffer; + void *cmd; + u32 user_size; +}; + +/* On stack memory for the ioctl structs */ +union fwctl_ucmd_buffer { +}; + +struct fwctl_ioctl_op { + unsigned int size; + unsigned int min_size; + unsigned int ioctl_num; + int (*execute)(struct fwctl_ucmd *ucmd); +}; + +#define IOCTL_OP(_ioctl, _fn, _struct, _last) \ + [_IOC_NR(_ioctl) - FWCTL_CMD_BASE] = { \ + .size = sizeof(_struct) + \ + BUILD_BUG_ON_ZERO(sizeof(union fwctl_ucmd_buffer) < \ + sizeof(_struct)), \ + .min_size = offsetofend(_struct, _last), \ + .ioctl_num = _ioctl, \ + .execute = _fn, \ + } +static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = { +}; + +static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct fwctl_uctx *uctx = filp->private_data; + const struct fwctl_ioctl_op *op; + struct fwctl_ucmd ucmd = {}; + union fwctl_ucmd_buffer buf; + unsigned int nr; + int ret; + + nr = _IOC_NR(cmd); + if ((nr - FWCTL_CMD_BASE) >= ARRAY_SIZE(fwctl_ioctl_ops)) + return -ENOIOCTLCMD; + + op = &fwctl_ioctl_ops[nr - FWCTL_CMD_BASE]; + if (op->ioctl_num != cmd) + return -ENOIOCTLCMD; + + ucmd.uctx = uctx; + ucmd.cmd = &buf; + ucmd.ubuffer = (void __user *)arg; + ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer); + if (ret) + return ret; + + if (ucmd.user_size < op->min_size) + return -EINVAL; + + ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer, + ucmd.user_size); + if (ret) + return ret; + + guard(rwsem_read)(&uctx->fwctl->registration_lock); + if (!uctx->fwctl->ops) + return -ENODEV; + return op->execute(&ucmd); +} + static int fwctl_fops_open(struct inode *inode, struct file *filp) { struct fwctl_device *fwctl = container_of(inode->i_cdev, struct fwctl_device, cdev); + int ret; + + guard(rwsem_read)(&fwctl->registration_lock); + if (!fwctl->ops) + return -ENODEV; + + struct fwctl_uctx *uctx __free(kfree) = + kzalloc(fwctl->ops->uctx_size, GFP_KERNEL_ACCOUNT); + if (!uctx) + return -ENOMEM; + + uctx->fwctl = fwctl; + ret = fwctl->ops->open_uctx(uctx); + if (ret) + return ret; + + scoped_guard(mutex, &fwctl->uctx_list_lock) { + list_add_tail(&uctx->uctx_list_entry, &fwctl->uctx_list); + } get_device(&fwctl->dev); - filp->private_data = fwctl; + filp->private_data = no_free_ptr(uctx); return 0; } +static void fwctl_destroy_uctx(struct fwctl_uctx *uctx) +{ + lockdep_assert_held(&uctx->fwctl->uctx_list_lock); + list_del(&uctx->uctx_list_entry); + uctx->fwctl->ops->close_uctx(uctx); +} + static int fwctl_fops_release(struct inode *inode, struct file *filp) { - struct fwctl_device *fwctl = filp->private_data; + struct fwctl_uctx *uctx = filp->private_data; + struct fwctl_device *fwctl = uctx->fwctl; + scoped_guard(rwsem_read, &fwctl->registration_lock) { + /* + * NULL ops means fwctl_unregister() has already removed the + * driver and destroyed the uctx. + */ + if (fwctl->ops) { + guard(mutex)(&fwctl->uctx_list_lock); + fwctl_destroy_uctx(uctx); + } + } + + kfree(uctx); fwctl_put(fwctl); return 0; } @@ -40,6 +150,7 @@ static const struct file_operations fwctl_fops = { .owner = THIS_MODULE, .open = fwctl_fops_open, .release = fwctl_fops_release, + .unlocked_ioctl = fwctl_fops_ioctl, }; static void fwctl_device_release(struct device *device) @@ -48,6 +159,7 @@ static void fwctl_device_release(struct device *device) container_of(device, struct fwctl_device, dev); ida_free(&fwctl_ida, fwctl->dev.devt - fwctl_dev); + mutex_destroy(&fwctl->uctx_list_lock); kfree(fwctl); } @@ -71,9 +183,6 @@ _alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size) if (!fwctl) return NULL; - fwctl->dev.class = &fwctl_class; - fwctl->dev.parent = parent; - devnum = ida_alloc_max(&fwctl_ida, FWCTL_MAX_DEVICES - 1, GFP_KERNEL); if (devnum < 0) return NULL; @@ -82,6 +191,10 @@ _alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size) fwctl->dev.class = &fwctl_class; fwctl->dev.parent = parent; + init_rwsem(&fwctl->registration_lock); + mutex_init(&fwctl->uctx_list_lock); + INIT_LIST_HEAD(&fwctl->uctx_list); + device_initialize(&fwctl->dev); return_ptr(fwctl); } @@ -132,6 +245,10 @@ EXPORT_SYMBOL_NS_GPL(fwctl_register, FWCTL); * Undoes fwctl_register(). On return no driver ops will be called. The * caller must still call fwctl_put() to free the fwctl. * + * Unregister will return even if userspace still has file descriptors open. + * This will call ops->close_uctx() on any open FDs and after return no driver + * op will be called. The FDs remain open but all fops will return -ENODEV. + * * The design of fwctl allows this sort of disassociation of the driver from the * subsystem primarily by keeping memory allocations owned by the core subsytem. * The fwctl_device and fwctl_uctx can both be freed without requiring a driver @@ -139,7 +256,23 @@ EXPORT_SYMBOL_NS_GPL(fwctl_register, FWCTL); */ void fwctl_unregister(struct fwctl_device *fwctl) { + struct fwctl_uctx *uctx; + cdev_device_del(&fwctl->cdev, &fwctl->dev); + + /* Disable and free the driver's resources for any still open FDs. */ + guard(rwsem_write)(&fwctl->registration_lock); + guard(mutex)(&fwctl->uctx_list_lock); + while ((uctx = list_first_entry_or_null(&fwctl->uctx_list, + struct fwctl_uctx, + uctx_list_entry))) + fwctl_destroy_uctx(uctx); + + /* + * The driver module may unload after this returns, the op pointer will + * not be valid. + */ + fwctl->ops = NULL; } EXPORT_SYMBOL_NS_GPL(fwctl_unregister, FWCTL); diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h index 39d5059c9e59..faa4b2c780e0 100644 --- a/include/linux/fwctl.h +++ b/include/linux/fwctl.h @@ -11,7 +11,30 @@ struct fwctl_device; struct fwctl_uctx; +/** + * struct fwctl_ops - Driver provided operations + * + * fwctl_unregister() will wait until all excuting ops are completed before it + * returns. Drivers should be mindful to not let their ops run for too long as + * it will block device hot unplug and module unloading. + */ struct fwctl_ops { + /** + * @uctx_size: The size of the fwctl_uctx struct to allocate. The first + * bytes of this memory will be a fwctl_uctx. The driver can use the + * remaining bytes as its private memory. + */ + size_t uctx_size; + /** + * @open_uctx: Called when a file descriptor is opened before the uctx + * is ever used. + */ + int (*open_uctx)(struct fwctl_uctx *uctx); + /** + * @close_uctx: Called when the uctx is destroyed, usually when the FD + * is closed. + */ + void (*close_uctx)(struct fwctl_uctx *uctx); }; /** @@ -26,6 +49,15 @@ struct fwctl_device { struct device dev; /* private: */ struct cdev cdev; + + /* Protect uctx_list */ + struct mutex uctx_list_lock; + struct list_head uctx_list; + /* + * Protect ops, held for write when ops becomes NULL during unregister, + * held for read whenever ops is loaded or an ops function is running. + */ + struct rw_semaphore registration_lock; const struct fwctl_ops *ops; }; @@ -66,4 +98,18 @@ DEFINE_FREE(fwctl, struct fwctl_device *, if (_T) fwctl_put(_T)); int fwctl_register(struct fwctl_device *fwctl); void fwctl_unregister(struct fwctl_device *fwctl); +/** + * struct fwctl_uctx - Per user FD context + * @fwctl: fwctl instance that owns the context + * + * Every FD opened by userspace will get a unique context allocation. Any driver + * private data will follow immediately after. + */ +struct fwctl_uctx { + struct fwctl_device *fwctl; + /* private: */ + /* Head at fwctl_device::uctx_list */ + struct list_head uctx_list_entry; +}; + #endif diff --git a/include/uapi/fwctl/fwctl.h b/include/uapi/fwctl/fwctl.h new file mode 100644 index 000000000000..8f5fe821cf28 --- /dev/null +++ b/include/uapi/fwctl/fwctl.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. + */ +#ifndef _UAPI_FWCTL_H +#define _UAPI_FWCTL_H + +#define FWCTL_TYPE 0x9A + +/** + * DOC: General ioctl format + * + * The ioctl interface follows a general format to allow for extensibility. Each + * ioctl is passed a structure pointer as the argument providing the size of + * the structure in the first u32. The kernel checks that any structure space + * beyond what it understands is 0. This allows userspace to use the backward + * compatible portion while consistently using the newer, larger, structures. + * + * ioctls use a standard meaning for common errnos: + * + * - ENOTTY: The IOCTL number itself is not supported at all + * - E2BIG: The IOCTL number is supported, but the provided structure has + * non-zero in a part the kernel does not understand. + * - EOPNOTSUPP: The IOCTL number is supported, and the structure is + * understood, however a known field has a value the kernel does not + * understand or support. + * - EINVAL: Everything about the IOCTL was understood, but a field is not + * correct. + * - ENOMEM: Out of memory. + * - ENODEV: The underlying device has been hot-unplugged and the FD is + * orphaned. + * + * As well as additional errnos, within specific ioctls. + */ +enum { + FWCTL_CMD_BASE = 0, +}; + +#endif -- Gitee From 2ce7983c7d3378053b64962d26243652883f9e50 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2025 20:26:31 -0400 Subject: [PATCH 024/243] fwctl: FWCTL_INFO to return basic information about the device [Upstream commit fb39e9092be5a18eaab05b5a2492741fe6e395fe] Userspace will need to know some details about the fwctl interface being used to locate the correct userspace code to communicate with the kernel. Provide a simple device_type enum indicating what the kernel driver is. Allow the device to provide a device specific info struct that contains any additional information that the driver may need to provide to userspace. Link: https://patch.msgid.link/r/3-v5-642aa0c94070+4447f-fwctl_jgg@nvidia.com Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Shannon Nelson Tested-by: Dave Jiang Tested-by: Shannon Nelson Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- drivers/fwctl/main.c | 55 ++++++++++++++++++++++++++++++++++++++ include/linux/fwctl.h | 12 +++++++++ include/uapi/fwctl/fwctl.h | 31 +++++++++++++++++++++ 3 files changed, 98 insertions(+) diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c index 1aa0e4209a8c..44305af8a1c9 100644 --- a/drivers/fwctl/main.c +++ b/drivers/fwctl/main.c @@ -27,8 +27,62 @@ struct fwctl_ucmd { u32 user_size; }; +static int ucmd_respond(struct fwctl_ucmd *ucmd, size_t cmd_len) +{ + if (copy_to_user(ucmd->ubuffer, ucmd->cmd, + min_t(size_t, ucmd->user_size, cmd_len))) + return -EFAULT; + return 0; +} + +static int copy_to_user_zero_pad(void __user *to, const void *from, + size_t from_len, size_t user_len) +{ + size_t copy_len; + + copy_len = min(from_len, user_len); + if (copy_to_user(to, from, copy_len)) + return -EFAULT; + if (copy_len < user_len) { + if (clear_user(to + copy_len, user_len - copy_len)) + return -EFAULT; + } + return 0; +} + +static int fwctl_cmd_info(struct fwctl_ucmd *ucmd) +{ + struct fwctl_device *fwctl = ucmd->uctx->fwctl; + struct fwctl_info *cmd = ucmd->cmd; + size_t driver_info_len = 0; + + if (cmd->flags) + return -EOPNOTSUPP; + + if (!fwctl->ops->info && cmd->device_data_len) { + if (clear_user(u64_to_user_ptr(cmd->out_device_data), + cmd->device_data_len)) + return -EFAULT; + } else if (cmd->device_data_len) { + void *driver_info __free(kfree) = + fwctl->ops->info(ucmd->uctx, &driver_info_len); + if (IS_ERR(driver_info)) + return PTR_ERR(driver_info); + + if (copy_to_user_zero_pad(u64_to_user_ptr(cmd->out_device_data), + driver_info, driver_info_len, + cmd->device_data_len)) + return -EFAULT; + } + + cmd->out_device_type = fwctl->ops->device_type; + cmd->device_data_len = driver_info_len; + return ucmd_respond(ucmd, sizeof(*cmd)); +} + /* On stack memory for the ioctl structs */ union fwctl_ucmd_buffer { + struct fwctl_info info; }; struct fwctl_ioctl_op { @@ -48,6 +102,7 @@ struct fwctl_ioctl_op { .execute = _fn, \ } static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = { + IOCTL_OP(FWCTL_INFO, fwctl_cmd_info, struct fwctl_info, out_device_data), }; static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd, diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h index faa4b2c780e0..700a5be940e3 100644 --- a/include/linux/fwctl.h +++ b/include/linux/fwctl.h @@ -7,6 +7,7 @@ #include #include #include +#include struct fwctl_device; struct fwctl_uctx; @@ -19,6 +20,10 @@ struct fwctl_uctx; * it will block device hot unplug and module unloading. */ struct fwctl_ops { + /** + * @device_type: The drivers assigned device_type number. This is uABI. + */ + enum fwctl_device_type device_type; /** * @uctx_size: The size of the fwctl_uctx struct to allocate. The first * bytes of this memory will be a fwctl_uctx. The driver can use the @@ -35,6 +40,13 @@ struct fwctl_ops { * is closed. */ void (*close_uctx)(struct fwctl_uctx *uctx); + /** + * @info: Implement FWCTL_INFO. Return a kmalloc() memory that is copied + * to out_device_data. On input length indicates the size of the user + * buffer on output it indicates the size of the memory. The driver can + * ignore length on input, the core code will handle everything. + */ + void *(*info)(struct fwctl_uctx *uctx, size_t *length); }; /** diff --git a/include/uapi/fwctl/fwctl.h b/include/uapi/fwctl/fwctl.h index 8f5fe821cf28..4052df63f66d 100644 --- a/include/uapi/fwctl/fwctl.h +++ b/include/uapi/fwctl/fwctl.h @@ -4,6 +4,9 @@ #ifndef _UAPI_FWCTL_H #define _UAPI_FWCTL_H +#include +#include + #define FWCTL_TYPE 0x9A /** @@ -33,6 +36,34 @@ */ enum { FWCTL_CMD_BASE = 0, + FWCTL_CMD_INFO = 0, +}; + +enum fwctl_device_type { + FWCTL_DEVICE_TYPE_ERROR = 0, +}; + +/** + * struct fwctl_info - ioctl(FWCTL_INFO) + * @size: sizeof(struct fwctl_info) + * @flags: Must be 0 + * @out_device_type: Returns the type of the device from enum fwctl_device_type + * @device_data_len: On input the length of the out_device_data memory. On + * output the size of the kernel's device_data which may be larger or + * smaller than the input. Maybe 0 on input. + * @out_device_data: Pointer to a memory of device_data_len bytes. Kernel will + * fill the entire memory, zeroing as required. + * + * Returns basic information about this fwctl instance, particularly what driver + * is being used to define the device_data format. + */ +struct fwctl_info { + __u32 size; + __u32 flags; + __u32 out_device_type; + __u32 device_data_len; + __aligned_u64 out_device_data; }; +#define FWCTL_INFO _IO(FWCTL_TYPE, FWCTL_CMD_INFO) #endif -- Gitee From 539f0dc269d4fd9013fc3ba022083ce1836b58f8 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2025 20:26:32 -0400 Subject: [PATCH 025/243] taint: Add TAINT_FWCTL [Upstream commit 8eea4e74475804285507c077bec87d40be87ff06] Requesting a fwctl scope of access that includes mutating device debug data will cause the kernel to be tainted. Changing the device operation through things in the debug scope may cause the device to malfunction in undefined ways. This should be reflected in the TAINT flags to help any debuggers understand that something has been done. Link: https://patch.msgid.link/r/4-v5-642aa0c94070+4447f-fwctl_jgg@nvidia.com Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Shannon Nelson Tested-by: Dave Jiang Tested-by: Shannon Nelson Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- Documentation/admin-guide/tainted-kernels.rst | 6 ++++++ include/linux/panic.h | 3 ++- kernel/panic.c | 1 + tools/debugging/kernel-chktaint | 8 ++++++++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst index 92a8a07f5c43..6a63aede0d86 100644 --- a/Documentation/admin-guide/tainted-kernels.rst +++ b/Documentation/admin-guide/tainted-kernels.rst @@ -101,6 +101,7 @@ Bit Log Number Reason that got the kernel tainted 16 _/X 65536 auxiliary taint, defined for and used by distros 17 _/T 131072 kernel was built with the struct randomization plugin 18 _/N 262144 an in-kernel test has been run + 19 _/J 524288 userspace used a mutating debug operation in fwctl === === ====== ======================================================== Note: The character ``_`` is representing a blank in this table to make reading @@ -182,3 +183,8 @@ More detailed explanation for tainting produce extremely unusual kernel structure layouts (even performance pathological ones), which is important to know when debugging. Set at build time. + + 19) ``J`` if userpace opened /dev/fwctl/* and performed a FWTCL_RPC_DEBUG_WRITE + to use the devices debugging features. Device debugging features could + cause the device to malfunction in undefined ways. + diff --git a/include/linux/panic.h b/include/linux/panic.h index d0d592e55173..adfe053cf6b2 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -75,7 +75,8 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) #define TAINT_AUX 16 #define TAINT_RANDSTRUCT 17 #define TAINT_TEST 18 -#define TAINT_FLAGS_COUNT 19 +#define TAINT_FWCTL 19 +#define TAINT_FLAGS_COUNT 20 #define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) struct taint_flag { diff --git a/kernel/panic.c b/kernel/panic.c index 8c54a4b96f03..0defd5bcfb89 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -482,6 +482,7 @@ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { [ TAINT_AUX ] = { 'X', ' ', true }, [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, [ TAINT_TEST ] = { 'N', ' ', true }, + [ TAINT_FWCTL ] = { 'J', ' ', true }, }; /** diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint index 279be06332be..e7da0909d097 100755 --- a/tools/debugging/kernel-chktaint +++ b/tools/debugging/kernel-chktaint @@ -204,6 +204,14 @@ else echo " * an in-kernel test (such as a KUnit test) has been run (#18)" fi +T=`expr $T / 2` +if [ `expr $T % 2` -eq 0 ]; then + addout " " +else + addout "J" + echo " * fwctl's mutating debug interface was used (#19)" +fi + echo "For a more detailed explanation of the various taint flags see" echo " Documentation/admin-guide/tainted-kernels.rst in the Linux kernel sources" echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html" -- Gitee From dfd3a195aa57652878b5ea2a4735940d03b22871 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2025 20:26:33 -0400 Subject: [PATCH 026/243] fwctl: FWCTL_RPC to execute a Remote Procedure Call to device firmware [Upstream commit 840cfb7cf570b681f5d20e19f7c2675a9d991732] Add the FWCTL_RPC ioctl which allows a request/response RPC call to device firmware. Drivers implementing this call must follow the security guidelines under Documentation/userspace-api/fwctl.rst The core code provides some memory management helpers to get the messages copied from and back to userspace. The driver is responsible for allocating the output message memory and delivering the message to the device. Link: https://patch.msgid.link/r/5-v5-642aa0c94070+4447f-fwctl_jgg@nvidia.com Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Shannon Nelson Tested-by: Dave Jiang Tested-by: Shannon Nelson Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- drivers/fwctl/main.c | 60 +++++++++++++++++++++++++++++++++ include/linux/fwctl.h | 8 +++++ include/uapi/fwctl/fwctl.h | 69 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 137 insertions(+) diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c index 44305af8a1c9..c783e94c9c65 100644 --- a/drivers/fwctl/main.c +++ b/drivers/fwctl/main.c @@ -8,17 +8,20 @@ #include #include #include +#include #include #include enum { FWCTL_MAX_DEVICES = 4096, + MAX_RPC_LEN = SZ_2M, }; static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS)); static dev_t fwctl_dev; static DEFINE_IDA(fwctl_ida); +static unsigned long fwctl_tainted; struct fwctl_ucmd { struct fwctl_uctx *uctx; @@ -80,9 +83,65 @@ static int fwctl_cmd_info(struct fwctl_ucmd *ucmd) return ucmd_respond(ucmd, sizeof(*cmd)); } +static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd) +{ + struct fwctl_device *fwctl = ucmd->uctx->fwctl; + struct fwctl_rpc *cmd = ucmd->cmd; + size_t out_len; + + if (cmd->in_len > MAX_RPC_LEN || cmd->out_len > MAX_RPC_LEN) + return -EMSGSIZE; + + switch (cmd->scope) { + case FWCTL_RPC_CONFIGURATION: + case FWCTL_RPC_DEBUG_READ_ONLY: + break; + + case FWCTL_RPC_DEBUG_WRITE_FULL: + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + fallthrough; + case FWCTL_RPC_DEBUG_WRITE: + if (!test_and_set_bit(0, &fwctl_tainted)) { + dev_warn( + &fwctl->dev, + "%s(%d): has requested full access to the physical device device", + current->comm, task_pid_nr(current)); + add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK); + } + break; + default: + return -EOPNOTSUPP; + } + + void *inbuf __free(kvfree) = kvzalloc(cmd->in_len, GFP_KERNEL_ACCOUNT); + if (!inbuf) + return -ENOMEM; + if (copy_from_user(inbuf, u64_to_user_ptr(cmd->in), cmd->in_len)) + return -EFAULT; + + out_len = cmd->out_len; + void *outbuf __free(kvfree) = fwctl->ops->fw_rpc( + ucmd->uctx, cmd->scope, inbuf, cmd->in_len, &out_len); + if (IS_ERR(outbuf)) + return PTR_ERR(outbuf); + if (outbuf == inbuf) { + /* The driver can re-use inbuf as outbuf */ + inbuf = NULL; + } + + if (copy_to_user(u64_to_user_ptr(cmd->out), outbuf, + min(cmd->out_len, out_len))) + return -EFAULT; + + cmd->out_len = out_len; + return ucmd_respond(ucmd, sizeof(*cmd)); +} + /* On stack memory for the ioctl structs */ union fwctl_ucmd_buffer { struct fwctl_info info; + struct fwctl_rpc rpc; }; struct fwctl_ioctl_op { @@ -103,6 +162,7 @@ struct fwctl_ioctl_op { } static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = { IOCTL_OP(FWCTL_INFO, fwctl_cmd_info, struct fwctl_info, out_device_data), + IOCTL_OP(FWCTL_RPC, fwctl_cmd_rpc, struct fwctl_rpc, out), }; static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd, diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h index 700a5be940e3..5d61fc8a6871 100644 --- a/include/linux/fwctl.h +++ b/include/linux/fwctl.h @@ -47,6 +47,14 @@ struct fwctl_ops { * ignore length on input, the core code will handle everything. */ void *(*info)(struct fwctl_uctx *uctx, size_t *length); + /** + * @fw_rpc: Implement FWCTL_RPC. Deliver rpc_in/in_len to the FW and + * return the response and set out_len. rpc_in can be returned as the + * response pointer. Otherwise the returned pointer is freed with + * kvfree(). + */ + void *(*fw_rpc)(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, + void *rpc_in, size_t in_len, size_t *out_len); }; /** diff --git a/include/uapi/fwctl/fwctl.h b/include/uapi/fwctl/fwctl.h index 4052df63f66d..0bec798790a6 100644 --- a/include/uapi/fwctl/fwctl.h +++ b/include/uapi/fwctl/fwctl.h @@ -37,6 +37,7 @@ enum { FWCTL_CMD_BASE = 0, FWCTL_CMD_INFO = 0, + FWCTL_CMD_RPC = 1, }; enum fwctl_device_type { @@ -66,4 +67,72 @@ struct fwctl_info { }; #define FWCTL_INFO _IO(FWCTL_TYPE, FWCTL_CMD_INFO) +/** + * enum fwctl_rpc_scope - Scope of access for the RPC + * + * Refer to fwctl.rst for a more detailed discussion of these scopes. + */ +enum fwctl_rpc_scope { + /** + * @FWCTL_RPC_CONFIGURATION: Device configuration access scope + * + * Read/write access to device configuration. When configuration + * is written to the device it remains in a fully supported state. + */ + FWCTL_RPC_CONFIGURATION = 0, + /** + * @FWCTL_RPC_DEBUG_READ_ONLY: Read only access to debug information + * + * Readable debug information. Debug information is compatible with + * kernel lockdown, and does not disclose any sensitive information. For + * instance exposing any encryption secrets from this information is + * forbidden. + */ + FWCTL_RPC_DEBUG_READ_ONLY = 1, + /** + * @FWCTL_RPC_DEBUG_WRITE: Writable access to lockdown compatible debug information + * + * Allows write access to data in the device which may leave a fully + * supported state. This is intended to permit intensive and possibly + * invasive debugging. This scope will taint the kernel. + */ + FWCTL_RPC_DEBUG_WRITE = 2, + /** + * @FWCTL_RPC_DEBUG_WRITE_FULL: Write access to all debug information + * + * Allows read/write access to everything. Requires CAP_SYS_RAW_IO, so + * it is not required to follow lockdown principals. If in doubt + * debugging should be placed in this scope. This scope will taint the + * kernel. + */ + FWCTL_RPC_DEBUG_WRITE_FULL = 3, +}; + +/** + * struct fwctl_rpc - ioctl(FWCTL_RPC) + * @size: sizeof(struct fwctl_rpc) + * @scope: One of enum fwctl_rpc_scope, required scope for the RPC + * @in_len: Length of the in memory + * @out_len: Length of the out memory + * @in: Request message in device specific format + * @out: Response message in device specific format + * + * Deliver a Remote Procedure Call to the device FW and return the response. The + * call's parameters and return are marshaled into linear buffers of memory. Any + * errno indicates that delivery of the RPC to the device failed. Return status + * originating in the device during a successful delivery must be encoded into + * out. + * + * The format of the buffers matches the out_device_type from FWCTL_INFO. + */ +struct fwctl_rpc { + __u32 size; + __u32 scope; + __u32 in_len; + __u32 out_len; + __aligned_u64 in; + __aligned_u64 out; +}; +#define FWCTL_RPC _IO(FWCTL_TYPE, FWCTL_CMD_RPC) + #endif -- Gitee From 4084f086b43dba0ca4318f5154fe8249d0dc5a44 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2025 20:26:34 -0400 Subject: [PATCH 027/243] fwctl: Add documentation [Upstream commit 18285acc2c047cda2449f426c09fc8969b04b8b1] Document the purpose and rules for the fwctl subsystem. Link in kdocs to the doc tree. Link: https://patch.msgid.link/r/6-v5-642aa0c94070+4447f-fwctl_jgg@nvidia.com Nacked-by: Jakub Kicinski Link: https://lore.kernel.org/r/20240603114250.5325279c@kernel.org Acked-by: Daniel Vetter Link: https://lore.kernel.org/r/ZrHY2Bds7oF7KRGz@phenom.ffwll.local Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Shannon Nelson Reviewed-by: Bagas Sanjaya Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- Documentation/userspace-api/fwctl/fwctl.rst | 284 ++++++++++++++++++++ Documentation/userspace-api/fwctl/index.rst | 12 + Documentation/userspace-api/index.rst | 1 + MAINTAINERS | 1 + 4 files changed, 298 insertions(+) create mode 100644 Documentation/userspace-api/fwctl/fwctl.rst create mode 100644 Documentation/userspace-api/fwctl/index.rst diff --git a/Documentation/userspace-api/fwctl/fwctl.rst b/Documentation/userspace-api/fwctl/fwctl.rst new file mode 100644 index 000000000000..8c586a8f677d --- /dev/null +++ b/Documentation/userspace-api/fwctl/fwctl.rst @@ -0,0 +1,284 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============== +fwctl subsystem +=============== + +:Author: Jason Gunthorpe + +Overview +======== + +Modern devices contain extensive amounts of FW, and in many cases, are largely +software-defined pieces of hardware. The evolution of this approach is largely a +reaction to Moore's Law where a chip tape out is now highly expensive, and the +chip design is extremely large. Replacing fixed HW logic with a flexible and +tightly coupled FW/HW combination is an effective risk mitigation against chip +respin. Problems in the HW design can be counteracted in device FW. This is +especially true for devices which present a stable and backwards compatible +interface to the operating system driver (such as NVMe). + +The FW layer in devices has grown to incredible size and devices frequently +integrate clusters of fast processors to run it. For example, mlx5 devices have +over 30MB of FW code, and big configurations operate with over 1GB of FW managed +runtime state. + +The availability of such a flexible layer has created quite a variety in the +industry where single pieces of silicon are now configurable software-defined +devices and can operate in substantially different ways depending on the need. +Further, we often see cases where specific sites wish to operate devices in ways +that are highly specialized and require applications that have been tailored to +their unique configuration. + +Further, devices have become multi-functional and integrated to the point they +no longer fit neatly into the kernel's division of subsystems. Modern +multi-functional devices have drivers, such as bnxt/ice/mlx5/pds, that span many +subsystems while sharing the underlying hardware using the auxiliary device +system. + +All together this creates a challenge for the operating system, where devices +have an expansive FW environment that needs robust device-specific debugging +support, and FW-driven functionality that is not well suited to “generic” +interfaces. fwctl seeks to allow access to the full device functionality from +user space in the areas of debuggability, management, and first-boot/nth-boot +provisioning. + +fwctl is aimed at the common device design pattern where the OS and FW +communicate via an RPC message layer constructed with a queue or mailbox scheme. +In this case the driver will typically have some layer to deliver RPC messages +and collect RPC responses from device FW. The in-kernel subsystem drivers that +operate the device for its primary purposes will use these RPCs to build their +drivers, but devices also usually have a set of ancillary RPCs that don't really +fit into any specific subsystem. For example, a HW RAID controller is primarily +operated by the block layer but also comes with a set of RPCs to administer the +construction of drives within the HW RAID. + +In the past when devices were more single function, individual subsystems would +grow different approaches to solving some of these common problems. For instance +monitoring device health, manipulating its FLASH, debugging the FW, +provisioning, all have various unique interfaces across the kernel. + +fwctl's purpose is to define a common set of limited rules, described below, +that allow user space to securely construct and execute RPCs inside device FW. +The rules serve as an agreement between the operating system and FW on how to +correctly design the RPC interface. As a uAPI the subsystem provides a thin +layer of discovery and a generic uAPI to deliver the RPCs and collect the +response. It supports a system of user space libraries and tools which will +use this interface to control the device using the device native protocols. + +Scope of Action +--------------- + +fwctl drivers are strictly restricted to being a way to operate the device FW. +It is not an avenue to access random kernel internals, or other operating system +SW states. + +fwctl instances must operate on a well-defined device function, and the device +should have a well-defined security model for what scope within the physical +device the function is permitted to access. For instance, the most complex PCIe +device today may broadly have several function-level scopes: + + 1. A privileged function with full access to the on-device global state and + configuration + + 2. Multiple hypervisor functions with control over itself and child functions + used with VMs + + 3. Multiple VM functions tightly scoped within the VM + +The device may create a logical parent/child relationship between these scopes. +For instance a child VM's FW may be within the scope of the hypervisor FW. It is +quite common in the VFIO world that the hypervisor environment has a complex +provisioning/profiling/configuration responsibility for the function VFIO +assigns to the VM. + +Further, within the function, devices often have RPC commands that fall within +some general scopes of action (see enum fwctl_rpc_scope): + + 1. Access to function & child configuration, FLASH, etc. that becomes live at a + function reset. Access to function & child runtime configuration that is + transparent or non-disruptive to any driver or VM. + + 2. Read-only access to function debug information that may report on FW objects + in the function & child, including FW objects owned by other kernel + subsystems. + + 3. Write access to function & child debug information strictly compatible with + the principles of kernel lockdown and kernel integrity protection. Triggers + a kernel Taint. + + 4. Full debug device access. Triggers a kernel Taint, requires CAP_SYS_RAWIO. + +User space will provide a scope label on each RPC and the kernel must enforce the +above CAPs and taints based on that scope. A combination of kernel and FW can +enforce that RPCs are placed in the correct scope by user space. + +Denied behavior +--------------- + +There are many things this interface must not allow user space to do (without a +Taint or CAP), broadly derived from the principles of kernel lockdown. Some +examples: + + 1. DMA to/from arbitrary memory, hang the system, compromise FW integrity with + untrusted code, or otherwise compromise device or system security and + integrity. + + 2. Provide an abnormal “back door” to kernel drivers. No manipulation of kernel + objects owned by kernel drivers. + + 3. Directly configure or otherwise control kernel drivers. A subsystem kernel + driver can react to the device configuration at function reset/driver load + time, but otherwise must not be coupled to fwctl. + + 4. Operate the HW in a way that overlaps with the core purpose of another + primary kernel subsystem, such as read/write to LBAs, send/receive of + network packets, or operate an accelerator's data plane. + +fwctl is not a replacement for device direct access subsystems like uacce or +VFIO. + +Operations exposed through fwctl's non-taining interfaces should be fully +sharable with other users of the device. For instance exposing a RPC through +fwctl should never prevent a kernel subsystem from also concurrently using that +same RPC or hardware unit down the road. In such cases fwctl will be less +important than proper kernel subsystems that eventually emerge. Mistakes in this +area resulting in clashes will be resolved in favour of a kernel implementation. + +fwctl User API +============== + +.. kernel-doc:: include/uapi/fwctl/fwctl.h + +sysfs Class +----------- + +fwctl has a sysfs class (/sys/class/fwctl/fwctlNN/) and character devices +(/dev/fwctl/fwctlNN) with a simple numbered scheme. The character device +operates the iotcl uAPI described above. + +fwctl devices can be related to driver components in other subsystems through +sysfs:: + + $ ls /sys/class/fwctl/fwctl0/device/infiniband/ + ibp0s10f0 + + $ ls /sys/class/infiniband/ibp0s10f0/device/fwctl/ + fwctl0/ + + $ ls /sys/devices/pci0000:00/0000:00:0a.0/fwctl/fwctl0 + dev device power subsystem uevent + +User space Community +-------------------- + +Drawing inspiration from nvme-cli, participating in the kernel side must come +with a user space in a common TBD git tree, at a minimum to usefully operate the +kernel driver. Providing such an implementation is a pre-condition to merging a +kernel driver. + +The goal is to build user space community around some of the shared problems +we all have, and ideally develop some common user space programs with some +starting themes of: + + - Device in-field debugging + + - HW provisioning + + - VFIO child device profiling before VM boot + + - Confidential Compute topics (attestation, secure provisioning) + +that stretch across all subsystems in the kernel. fwupd is a great example of +how an excellent user space experience can emerge out of kernel-side diversity. + +fwctl Kernel API +================ + +.. kernel-doc:: drivers/fwctl/main.c + :export: +.. kernel-doc:: include/linux/fwctl.h + +fwctl Driver design +------------------- + +In many cases a fwctl driver is going to be part of a larger cross-subsystem +device possibly using the auxiliary_device mechanism. In that case several +subsystems are going to be sharing the same device and FW interface layer so the +device design must already provide for isolation and cooperation between kernel +subsystems. fwctl should fit into that same model. + +Part of the driver should include a description of how its scope restrictions +and security model work. The driver and FW together must ensure that RPCs +provided by user space are mapped to the appropriate scope. If the validation is +done in the driver then the validation can read a 'command effects' report from +the device, or hardwire the enforcement. If the validation is done in the FW, +then the driver should pass the fwctl_rpc_scope to the FW along with the command. + +The driver and FW must cooperate to ensure that either fwctl cannot allocate +any FW resources, or any resources it does allocate are freed on FD closure. A +driver primarily constructed around FW RPCs may find that its core PCI function +and RPC layer belongs under fwctl with auxiliary devices connecting to other +subsystems. + +Each device type must be mindful of Linux's philosophy for stable ABI. The FW +RPC interface does not have to meet a strictly stable ABI, but it does need to +meet an expectation that userspace tools that are deployed and in significant +use don't needlessly break. FW upgrade and kernel upgrade should keep widely +deployed tooling working. + +Development and debugging focused RPCs under more permissive scopes can have +less stabilitiy if the tools using them are only run under exceptional +circumstances and not for every day use of the device. Debugging tools may even +require exact version matching as they may require something similar to DWARF +debug information from the FW binary. + +Security Response +================= + +The kernel remains the gatekeeper for this interface. If violations of the +scopes, security or isolation principles are found, we have options to let +devices fix them with a FW update, push a kernel patch to parse and block RPC +commands or push a kernel patch to block entire firmware versions/devices. + +While the kernel can always directly parse and restrict RPCs, it is expected +that the existing kernel pattern of allowing drivers to delegate validation to +FW to be a useful design. + +Existing Similar Examples +========================= + +The approach described in this document is not a new idea. Direct, or near +direct device access has been offered by the kernel in different areas for +decades. With more devices wanting to follow this design pattern it is becoming +clear that it is not entirely well understood and, more importantly, the +security considerations are not well defined or agreed upon. + +Some examples: + + - HW RAID controllers. This includes RPCs to do things like compose drives into + a RAID volume, configure RAID parameters, monitor the HW and more. + + - Baseboard managers. RPCs for configuring settings in the device and more + + - NVMe vendor command capsules. nvme-cli provides access to some monitoring + functions that different products have defined, but more exist. + + - CXL also has a NVMe-like vendor command system. + + - DRM allows user space drivers to send commands to the device via kernel + mediation + + - RDMA allows user space drivers to directly push commands to the device + without kernel involvement + + - Various “raw” APIs, raw HID (SDL2), raw USB, NVMe Generic Interface, etc. + +The first 4 are examples of areas that fwctl intends to cover. The latter three +are examples of denied behavior as they fully overlap with the primary purpose +of a kernel subsystem. + +Some key lessons learned from these past efforts are the importance of having a +common user space project to use as a pre-condition for obtaining a kernel +driver. Developing good community around useful software in user space is key to +getting companies to fund participation to enable their products. diff --git a/Documentation/userspace-api/fwctl/index.rst b/Documentation/userspace-api/fwctl/index.rst new file mode 100644 index 000000000000..06959fbf1547 --- /dev/null +++ b/Documentation/userspace-api/fwctl/index.rst @@ -0,0 +1,12 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Firmware Control (FWCTL) Userspace API +====================================== + +A framework that define a common set of limited rules that allows user space +to securely construct and execute RPCs inside device firmware. + +.. toctree:: + :maxdepth: 1 + + fwctl diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 72a65db0c498..2125bb520e52 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -26,6 +26,7 @@ place where this information is gathered. ELF ioctl/index iommu + fwctl/index iommufd media/index netlink/index diff --git a/MAINTAINERS b/MAINTAINERS index cd3481849a04..cac72e421f1a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8646,6 +8646,7 @@ M: Jason Gunthorpe M: Saeed Mahameed R: Jonathan Cameron S: Maintained +F: Documentation/userspace-api/fwctl/ F: drivers/fwctl/ F: include/linux/fwctl.h F: include/uapi/fwctl/ -- Gitee From 731e27c0a60c0db9e8997ea1642d19ff34a6e35a Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 8 Apr 2025 15:33:00 -0700 Subject: [PATCH 028/243] fwctl: Fix repeated device word in log message [Upstream commit c92ae5d4f53ebf9c32ace69c1f89a47e8714d18b] Remove the repeated word "device" from a dev_warn() message. Link: https://patch.msgid.link/r/20250408223300.24561-1-shannon.nelson@amd.com Signed-off-by: Shannon Nelson Reviewed-by: Dave Jiang Signed-off-by: Jason Gunthorpe Signed-off-by: huwentao --- drivers/fwctl/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c index c783e94c9c65..7854ab0369f2 100644 --- a/drivers/fwctl/main.c +++ b/drivers/fwctl/main.c @@ -105,7 +105,7 @@ static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd) if (!test_and_set_bit(0, &fwctl_tainted)) { dev_warn( &fwctl->dev, - "%s(%d): has requested full access to the physical device device", + "%s(%d): has requested full access to the physical device", current->comm, task_pid_nr(current)); add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK); } -- Gitee From 0d9fa1760e985d72c2b7cece2366494c1400c440 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 14 Aug 2025 16:53:23 +0800 Subject: [PATCH 029/243] ub: ub_fwctl: Add the ub_fwctl driver and its basic features. commit aabc3d6533494ed9a3ace44576d03572746f91cd openEuler Add support for the loading and unloading of ub_fwctl drivers. ub_fwctl drv supports Auxiliary devices. ub_fwctl drv adaptation to fwctl framework. The ub_fwctl driver needs to impose traffic restrictions on the ioctl command to prevent the occurrence of CMDQ storms. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- arch/arm64/configs/tencent.config | 4 + drivers/fwctl/Kconfig | 10 ++ drivers/fwctl/Makefile | 1 + drivers/fwctl/ub/Makefile | 4 + drivers/fwctl/ub/main.c | 215 ++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmd_reg.c | 26 ++++ drivers/fwctl/ub/ub_cmd_reg.h | 13 ++ drivers/fwctl/ub/ub_common.h | 58 ++++++++ include/uapi/fwctl/fwctl.h | 1 + include/uapi/fwctl/ub_fwctl.h | 43 ++++++ 10 files changed, 375 insertions(+) create mode 100644 drivers/fwctl/ub/Makefile create mode 100644 drivers/fwctl/ub/main.c create mode 100644 drivers/fwctl/ub/ub_cmd_reg.c create mode 100644 drivers/fwctl/ub/ub_cmd_reg.h create mode 100644 drivers/fwctl/ub/ub_common.h create mode 100644 include/uapi/fwctl/ub_fwctl.h diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 63cfddd36d14..d122f864bc43 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1866,3 +1866,7 @@ CONFIG_VFIO_DEVICE_CDEV=y # fwctl CONFIG_FWCTL=y + +# UB_FWCTL +CONFIG_FWCTL_UB=m +# end of UB_FWCTL diff --git a/drivers/fwctl/Kconfig b/drivers/fwctl/Kconfig index 37147a695add..24c3c83437a0 100644 --- a/drivers/fwctl/Kconfig +++ b/drivers/fwctl/Kconfig @@ -7,3 +7,13 @@ menuconfig FWCTL support a wide range of lockdown compatible device behaviors including manipulating device FLASH, debugging, and other activities that don't fit neatly into an existing subsystem. + +if FWCTL +config FWCTL_UB + tristate "ub_fwctl depend on fwctl driver" + help + ub_fwctl provides users with various information related to + querying UB (UnifiedBus) registers or devices. + + If you don't know what to do here, say N. +endif diff --git a/drivers/fwctl/Makefile b/drivers/fwctl/Makefile index 1cad210f6ba5..9005cdf31dfd 100644 --- a/drivers/fwctl/Makefile +++ b/drivers/fwctl/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_FWCTL) += fwctl.o +obj-$(CONFIG_FWCTL_UB) += ub/ fwctl-y += main.o diff --git a/drivers/fwctl/ub/Makefile b/drivers/fwctl/ub/Makefile new file mode 100644 index 000000000000..a6669814dbbf --- /dev/null +++ b/drivers/fwctl/ub/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0+ +obj-$(CONFIG_FWCTL_UB) += ub_fwctl.o + +ub_fwctl-y += main.o ub_cmd_reg.o diff --git a/drivers/fwctl/ub/main.c b/drivers/fwctl/ub/main.c new file mode 100644 index 000000000000..63de07d5d028 --- /dev/null +++ b/drivers/fwctl/ub/main.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. + */ + +#include +#include +#include +#include + +#include "ub_common.h" +#include "ub_cmd_reg.h" + +#define MAX_IOCTL_COUNT 1024 +#define TIME_WINDOW_MS 3000 +#define TIME_WINDOW_JIFFIES msecs_to_jiffies(TIME_WINDOW_MS) + +struct ubctl_uctx { + struct fwctl_uctx uctx; +}; + +static int ubctl_open_uctx(struct fwctl_uctx *uctx) +{ + return 0; +} + +static void ubctl_close_uctx(struct fwctl_uctx *uctx) +{ + +} + +static void *ubctl_fw_info(struct fwctl_uctx *uctx, size_t *length) +{ + return NULL; +} + +static int ubctl_legitimacy_rpc(struct ubctl_dev *ucdev, size_t out_len, + enum fwctl_rpc_scope scope) +{ + /* + * Verify if RPC (Remote Procedure Call) requests are valid. + * It determines whether the request is within the allowed time window + * and whether the output length meets the requirements by checking + * the timestamp and output length of the request. + */ + unsigned long current_jiffies = jiffies; + unsigned long earliest_jiffies = current_jiffies - TIME_WINDOW_JIFFIES; + unsigned long record_jiffies = 0; + int kfifo_ret = 0; + + while (kfifo_peek(&ucdev->ioctl_fifo, &record_jiffies) && record_jiffies) { + if (time_after(record_jiffies, earliest_jiffies)) + break; + + kfifo_ret = kfifo_get(&ucdev->ioctl_fifo, &record_jiffies); + if (!kfifo_ret) { + ubctl_err(ucdev, "unexpected events occurred while obtaining data.\n"); + return kfifo_ret; + } + } + + if (kfifo_is_full(&ucdev->ioctl_fifo)) { + ubctl_err(ucdev, "the current number of valid requests exceeds the limit.\n"); + return -EBADMSG; + } + + kfifo_ret = kfifo_put(&ucdev->ioctl_fifo, current_jiffies); + if (!kfifo_ret) { + ubctl_err(ucdev, "unexpected events occurred while writing data.\n"); + return kfifo_ret; + } + + if (out_len < sizeof(struct fwctl_rpc_ub_out)) { + ubctl_dbg(ucdev, "outlen %zu is less than min value %zu.\n", + out_len, sizeof(struct fwctl_rpc_ub_out)); + return -EBADMSG; + } + + if (scope != FWCTL_RPC_CONFIGURATION && + scope != FWCTL_RPC_DEBUG_READ_ONLY) + return -EOPNOTSUPP; + + return 0; +} + +static int ubctl_cmd_err(struct ubctl_dev *ucdev, int ret, struct fwctl_rpc_ub_out *out) +{ + /* Keep rpc_out as contains useful debug info for userspace */ + if (!ret || out->retval) + return 0; + + return ret; +} + +static int ub_cmd_do(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param) +{ + u32 rpc_cmd = query_cmd_param->in->rpc_cmd; + struct ubctl_func_dispatch *ubctl_query_reg = ubctl_get_query_reg_func( + ucdev, rpc_cmd); + int ret; + + if (ubctl_query_reg && ubctl_query_reg->execute) { + ret = ubctl_query_reg->execute(ucdev, query_cmd_param, + ubctl_query_reg); + } else { + ubctl_err(ucdev, "No corresponding query was found.\n"); + return -EINVAL; + } + + return ubctl_cmd_err(ucdev, ret, query_cmd_param->out); +} + +static void *ubctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, + void *rpc_in, size_t in_len, size_t *out_len) +{ + struct ubctl_dev *ucdev = container_of(uctx->fwctl, struct ubctl_dev, + fwctl); + u32 opcode = ((struct fwctl_rpc_ub_in *)rpc_in)->rpc_cmd; + struct ubctl_query_cmd_param query_cmd_param; + void *rpc_out; + int ret; + + ubctl_dbg(ucdev, "cmdif: opcode 0x%x inlen %zu outlen %zu\n", + opcode, in_len, *out_len); + + ret = ubctl_legitimacy_rpc(ucdev, *out_len, scope); + if (ret) + return ERR_PTR(ret); + + rpc_out = kvzalloc(*out_len, GFP_KERNEL); + if (!rpc_out) + return ERR_PTR(-ENOMEM); + + query_cmd_param.out = rpc_out; + query_cmd_param.in = rpc_in; + query_cmd_param.out_len = *out_len - offsetof(struct fwctl_rpc_ub_out, data); + query_cmd_param.in_len = in_len; + + ret = ub_cmd_do(ucdev, &query_cmd_param); + + ubctl_dbg(ucdev, "cmdif: opcode 0x%x retval %d\n", opcode, ret); + + return rpc_out; +} + +static const struct fwctl_ops ubctl_ops = { + .device_type = FWCTL_DEVICE_TYPE_UB, + .uctx_size = sizeof(struct ubctl_uctx), + .open_uctx = ubctl_open_uctx, + .close_uctx = ubctl_close_uctx, + .info = ubctl_fw_info, + .fw_rpc = ubctl_fw_rpc, +}; + +DEFINE_FREE(ubctl, struct ubctl_dev *, if (_T) fwctl_put(&_T->fwctl)) + +static int ubctl_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ubctl_dev *ucdev __free(ubctl) = fwctl_alloc_device( + adev->dev.parent, &ubctl_ops, struct ubctl_dev, fwctl); + int ret; + + if (!ucdev) + return -ENOMEM; + + ret = kfifo_alloc(&ucdev->ioctl_fifo, MAX_IOCTL_COUNT, GFP_KERNEL); + if (ret) { + ubctl_err(ucdev, "kfifo alloc device failed, retval = %d.\n", ret); + return -ENOMEM; + } + + ret = fwctl_register(&ucdev->fwctl); + if (ret) { + ubctl_err(ucdev, "fwctl register failed, retval = %d.\n", ret); + kfifo_free(&ucdev->ioctl_fifo); + return ret; + } + + ucdev->adev = adev; + auxiliary_set_drvdata(adev, no_free_ptr(ucdev)); + return 0; +} + +static void ubctl_remove(struct auxiliary_device *adev) +{ + struct ubctl_dev *ucdev = auxiliary_get_drvdata(adev); + + fwctl_unregister(&ucdev->fwctl); + kfifo_free(&ucdev->ioctl_fifo); + fwctl_put(&ucdev->fwctl); +} + +static const struct auxiliary_device_id ubctl_id_table[] = { + { + .name = "ubase.fwctl", + }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, ubctl_id_table); + +static struct auxiliary_driver ubctl_driver = { + .name = "ub_fwctl", + .probe = ubctl_probe, + .remove = ubctl_remove, + .id_table = ubctl_id_table, +}; + +module_auxiliary_driver(ubctl_driver); + +MODULE_IMPORT_NS(FWCTL); +MODULE_DESCRIPTION("UB fwctl driver"); +MODULE_AUTHOR("HiSilicon Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c new file mode 100644 index 000000000000..61caefa7c06e --- /dev/null +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + */ + +#include "ub_cmd_reg.h" + +static struct ubctl_func_dispatch g_ubctl_query_reg[] = { + { UTOOL_CMD_QUERY_MAX, NULL, NULL } +}; + +struct ubctl_func_dispatch *ubctl_get_query_reg_func(struct ubctl_dev *ucdev, + u32 rpc_cmd) +{ + u32 i; + + if (!ucdev) + return NULL; + + for (i = 0; i < ARRAY_SIZE(g_ubctl_query_reg); i++) { + if (g_ubctl_query_reg[i].rpc_cmd == rpc_cmd) + return &g_ubctl_query_reg[i]; + } + + return NULL; +} diff --git a/drivers/fwctl/ub/ub_cmd_reg.h b/drivers/fwctl/ub/ub_cmd_reg.h new file mode 100644 index 000000000000..87b3e0183cd9 --- /dev/null +++ b/drivers/fwctl/ub/ub_cmd_reg.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +#ifndef __UB_CMD_REG_H__ +#define __UB_CMD_REG_H__ + +#include "ub_common.h" + +struct ubctl_func_dispatch *ubctl_get_query_reg_func(struct ubctl_dev *ucdev, + u32 rpc_cmd); +#endif diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h new file mode 100644 index 000000000000..7eef8a6ca937 --- /dev/null +++ b/drivers/fwctl/ub/ub_common.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. + */ + +#ifndef __UB_COMMAND_H__ +#define __UB_COMMAND_H__ + +#include +#include +#include + +#include + +#define ubctl_err(ucdev, format, ...) \ + dev_err(&ucdev->fwctl.dev, format, ##__VA_ARGS__) + +#define ubctl_dbg(ucdev, format, ...) \ + dev_dbg(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ + ##__VA_ARGS__) + +#define ubctl_info(ucdev, format, ...) \ + dev_info(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ + ##__VA_ARGS__) + +struct ubctl_dev { + struct fwctl_device fwctl; + DECLARE_KFIFO_PTR(ioctl_fifo, unsigned long); + struct auxiliary_device *adev; +}; + +struct ubctl_query_cmd_param { + size_t in_len; + struct fwctl_rpc_ub_in *in; + size_t out_len; + struct fwctl_rpc_ub_out *out; +}; + +struct ubctl_cmd { + u32 op_code; + u32 is_read; + u32 in_len; + u32 out_len; + void *in_data; + void *out_data; +}; + +struct ubctl_func_dispatch { + u32 rpc_cmd; + int (*execute)(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func); + int (*data_deal)(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset_index); +}; + +#endif diff --git a/include/uapi/fwctl/fwctl.h b/include/uapi/fwctl/fwctl.h index 0bec798790a6..bddd8d19695c 100644 --- a/include/uapi/fwctl/fwctl.h +++ b/include/uapi/fwctl/fwctl.h @@ -42,6 +42,7 @@ enum { enum fwctl_device_type { FWCTL_DEVICE_TYPE_ERROR = 0, + FWCTL_DEVICE_TYPE_UB = 5, }; /** diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h new file mode 100644 index 000000000000..e534f22cf146 --- /dev/null +++ b/include/uapi/fwctl/ub_fwctl.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note*/ +/* + * Copyright(c) 2025 HiSilicon Technologies CO., Limited. All rights reserved. + */ + +#ifndef _UAPI_UB_FWCTL_H_ +#define _UAPI_UB_FWCTL_H_ + +#include + +/** + * struct fwctl_rpc_ub_in - ioctl(FWCTL_RPC) input + * @rpc_cmd: user specified opcode + * @data_size: Length of @data + * @version: Version passed in by the user + * @rsvd: reserved + * @data: user inputs specified input data + */ +struct fwctl_rpc_ub_in { + __u32 rpc_cmd; + __u32 data_size; + __u32 version; + __u32 rsvd; + __u32 data[] __counted_by(data_size); +}; + +/** + * struct fwctl_rpc_ub_out - ioctl(FWCTL_RPC) output + * @retval: The value returned when querying data with an error message + * @data_size: Length of @data + * @data: data transmitted to users + */ +struct fwctl_rpc_ub_out { + int retval; + __u32 data_size; + __u32 data[]; +}; + +enum ub_fwctl_cmdrpc_type { + UTOOL_CMD_QUERY_MAX, +}; + +#endif -- Gitee From 27807fe5083d2709994f1102f99b7d427b803ccc Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 21 Aug 2025 14:38:11 +0800 Subject: [PATCH 030/243] ub: ub_fwctl: Add some simple common framework functions commit fe9e7907e911ee1469da9ee52535d1b0b61bdb3a openEuler Ub_fwctl adds some simple common framework functions, mainly to assemble data structures that interact with software, call software query interaction functions, and return the queried data to user mode and other main functions. Added a basic feature: support querying NL(Network Layer) statistical register information Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/Kconfig | 2 + drivers/fwctl/ub/Makefile | 2 +- drivers/fwctl/ub/ub_cmd_reg.c | 16 +++ drivers/fwctl/ub/ub_cmdq.h | 13 +++ drivers/fwctl/ub/ub_common.c | 197 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_common.h | 28 +++++ include/uapi/fwctl/ub_fwctl.h | 2 + 7 files changed, 259 insertions(+), 1 deletion(-) create mode 100644 drivers/fwctl/ub/ub_cmdq.h create mode 100644 drivers/fwctl/ub/ub_common.c diff --git a/drivers/fwctl/Kconfig b/drivers/fwctl/Kconfig index 24c3c83437a0..88b346631ebe 100644 --- a/drivers/fwctl/Kconfig +++ b/drivers/fwctl/Kconfig @@ -11,6 +11,8 @@ menuconfig FWCTL if FWCTL config FWCTL_UB tristate "ub_fwctl depend on fwctl driver" + depends on UB_UBASE + default n help ub_fwctl provides users with various information related to querying UB (UnifiedBus) registers or devices. diff --git a/drivers/fwctl/ub/Makefile b/drivers/fwctl/ub/Makefile index a6669814dbbf..c2c2008b2653 100644 --- a/drivers/fwctl/ub/Makefile +++ b/drivers/fwctl/ub/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0+ obj-$(CONFIG_FWCTL_UB) += ub_fwctl.o -ub_fwctl-y += main.o ub_cmd_reg.o +ub_fwctl-y += main.o ub_cmd_reg.o ub_common.o diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index 61caefa7c06e..3154d7f10a35 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -3,9 +3,25 @@ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ +#include "ub_cmdq.h" #include "ub_cmd_reg.h" +static int ubctl_query_nl_pkt_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_PKT_STATS_DFX, UBCTL_NL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static struct ubctl_func_dispatch g_ubctl_query_reg[] = { + { UTOOL_CMD_QUERY_NL_PKT_STATS, ubctl_query_nl_pkt_stats_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h new file mode 100644 index 000000000000..05db6aa07d5b --- /dev/null +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + */ + +#ifndef __UB_CMDQ_H__ +#define __UB_CMDQ_H__ + +#define UBCTL_QUERY_NL_PKT_STATS_DFX 0xA001 + +#define UBCTL_NL_PKT_STATS_LEN 632 + +#endif diff --git a/drivers/fwctl/ub/ub_common.c b/drivers/fwctl/ub/ub_common.c new file mode 100644 index 000000000000..87654eb0aefc --- /dev/null +++ b/drivers/fwctl/ub/ub_common.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +#include + +#include "ub_common.h" + +static inline void ubctl_struct_cpu_to_le32(u32 *data, u32 cnt) +{ + for (u32 i = 0; i < cnt; i++) + data[i] = cpu_to_le32(data[i]); +} + +static inline void ubctl_struct_le32_to_cpu(u32 *data, u32 cnt) +{ + for (u32 i = 0; i < cnt; i++) + data[i] = le32_to_cpu(data[i]); +} + +static inline int ubctl_ubase_cmd_send_param_check(struct auxiliary_device *adev, + struct ubctl_cmd *cmd) +{ + if (!adev || !cmd) + return -EINVAL; + + if (!cmd->in_data || !cmd->out_data) + return -EINVAL; + + return 0; +} + +int ubctl_ubase_cmd_send(struct auxiliary_device *adev, struct ubctl_cmd *cmd) +{ + struct ubase_cmd_buf in, out; + int ret; + + if (ubctl_ubase_cmd_send_param_check(adev, cmd)) + return -EINVAL; + + ubctl_struct_cpu_to_le32(cmd->in_data, cmd->in_len / sizeof(u32)); + ubase_fill_inout_buf(&in, cmd->op_code, cmd->is_read, cmd->in_len, + cmd->in_data); + ubase_fill_inout_buf(&out, cmd->op_code, cmd->is_read, cmd->out_len, + cmd->out_data); + + ret = ubase_cmd_send_inout(adev, &in, &out); + if (ret) + return ret; + + ubctl_struct_le32_to_cpu(cmd->out_data, cmd->out_len / sizeof(u32)); + + return 0; +} + +int ubctl_fill_cmd(struct ubctl_cmd *cmd, void *cmd_in, void *cmd_out, + u32 out_len, u32 is_read) +{ + if (!cmd || !cmd_in || !cmd_out) + return -EINVAL; + + cmd->is_read = is_read; + cmd->in_data = cmd_in; + cmd->out_data = cmd_out; + cmd->in_len = out_len; + cmd->out_len = out_len; + + return 0; +} + +static int ubctl_query_param_check(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func, + struct ubctl_query_dp *query_dp) +{ + if (!ucdev || !query_cmd_param || !query_func || !query_dp) + return -EINVAL; + + if (!query_cmd_param->in || !query_cmd_param->out) { + ubctl_err(ucdev, "ubctl in or out is null.\n"); + return -EINVAL; + } + + if (!query_func->data_deal) { + ubctl_err(ucdev, "ubctl data deal func is null.\n"); + return -EINVAL; + } + + return 0; +} + +static int ubctl_cmd_send_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_query_dp *query_dp, + struct ubctl_query_cmd_dp *cmd_data, u32 offset) +{ + int *retval = &query_cmd_param->out->retval; + struct ubctl_cmd cmd = {}; + int ret = 0; + + cmd.op_code = query_dp->op_code; + ret = ubctl_fill_cmd(&cmd, cmd_data->cmd_in, cmd_data->cmd_out, + query_dp->out_len, query_dp->is_read); + if (ret) { + ubctl_err(ucdev, "ubctl fill cmd failed.\n"); + return ret; + } + + *retval = ubctl_ubase_cmd_send(ucdev->adev, &cmd); + if (*retval) { + ubctl_err(ucdev, "ubctl ubase cmd send failed, retval = %d.\n", + *retval); + return -EINVAL; + } + + ret = cmd_data->query_func->data_deal(ucdev, query_cmd_param, &cmd, + query_dp->out_len, offset); + if (ret) + ubctl_err(ucdev, "ubctl data deal failed, ret = %d.\n", ret); + + return ret; +} + +int ubctl_query_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func, + struct ubctl_query_dp *query_dp, u32 query_dp_num) +{ + u32 offset = 0; + int ret = 0; + u32 i; + + ret = ubctl_query_param_check(ucdev, query_cmd_param, query_func, query_dp); + if (ret) { + ubctl_err(ucdev, "ubctl query param check failed, ret = %d.\n", ret); + return ret; + } + + for (i = 0; i < query_dp_num; i++) { + if (query_cmd_param->in->data_size > query_dp[i].out_len) { + ubctl_err(ucdev, "ubctl in data size is bigger than out len.\n"); + return -EINVAL; + } + + void *cmd_in __free(kvfree) = kvzalloc(query_dp[i].out_len, GFP_KERNEL); + if (!cmd_in) + return -ENOMEM; + + void *cmd_out __free(kvfree) = kvzalloc(query_dp[i].out_len, GFP_KERNEL); + if (!cmd_out) + return -ENOMEM; + + struct ubctl_query_cmd_dp cmd_dp = (struct ubctl_query_cmd_dp) { + .cmd_in = cmd_in, + .cmd_out = cmd_out, + .query_func = query_func, + }; + + memcpy(cmd_dp.cmd_in, query_cmd_param->in->data, query_cmd_param->in->data_size); + ret = ubctl_cmd_send_deal(ucdev, query_cmd_param, &query_dp[i], + &cmd_dp, offset); + if (ret) + return ret; + + offset += query_dp[i].out_len / sizeof(u32); + } + return 0; +} + +int ubctl_query_data_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset) +{ + if (!ucdev || !query_cmd_param || !cmd) + return -EINVAL; + + if (!query_cmd_param->in || !query_cmd_param->out) { + ubctl_err(ucdev, "ubctl in or out is null.\n"); + return -EINVAL; + } + + if (cmd->out_len != out_len) { + ubctl_err(ucdev, "out data size is not equal to out len.\n"); + return -EINVAL; + } + + if ((offset * (u32)sizeof(u32) + out_len) > query_cmd_param->out_len) { + ubctl_err(ucdev, "offset size is bigger than user out len.\n"); + return -EINVAL; + } + + memcpy(&query_cmd_param->out->data[offset], cmd->out_data, cmd->out_len); + query_cmd_param->out->data_size += cmd->out_len; + + return 0; +} diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index 7eef8a6ca937..0eda05fc5932 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -12,6 +12,8 @@ #include +#define UBCTL_READ true + #define ubctl_err(ucdev, format, ...) \ dev_err(&ucdev->fwctl.dev, format, ##__VA_ARGS__) @@ -55,4 +57,30 @@ struct ubctl_func_dispatch { struct ubctl_cmd *cmd, u32 out_len, u32 offset_index); }; +struct ubctl_query_dp { + u32 op_code; + u32 out_len; + bool is_read; + void *data; + u32 data_len; +}; + +struct ubctl_query_cmd_dp { + struct ubctl_func_dispatch *query_func; + void *cmd_in; + void *cmd_out; +}; + +int ubctl_ubase_cmd_send(struct auxiliary_device *adev, + struct ubctl_cmd *cmd); +int ubctl_fill_cmd(struct ubctl_cmd *cmd, void *cmd_in, void *cmd_out, + u32 out_len, u32 is_read); +int ubctl_query_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func, + struct ubctl_query_dp *query_dp, u32 query_dp_num); +int ubctl_query_data_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset); + #endif diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index e534f22cf146..011c4d48bada 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -37,6 +37,8 @@ struct fwctl_rpc_ub_out { }; enum ub_fwctl_cmdrpc_type { + UTOOL_CMD_QUERY_NL_PKT_STATS = 0x0002, + UTOOL_CMD_QUERY_MAX, }; -- Gitee From 06106b1a7ab37057ac87ebc0e2330ca0455b3108 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 21 Aug 2025 15:02:31 +0800 Subject: [PATCH 031/243] ub: ub_fwctl: supports querying NL, TA, DL related register information commit 7607eafee2394d691fe29632a217e8371feec2ce openEuler 1. Support querying NL SSU related register information, and exception message statistical register information. 2. Support querying TA(Transaction Layer) layer statistical register information and abnormal message statistical register information. 3. Support querying DL(Data Link&Physical Layer) statistical register information, port link status, lane information, port error rate, BIST testing. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd_reg.c | 202 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 26 +++++ drivers/fwctl/ub/ub_common.h | 1 + include/uapi/fwctl/ub_fwctl.h | 16 +++ 4 files changed, 245 insertions(+) diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index 3154d7f10a35..17a695a81c0b 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -6,6 +6,20 @@ #include "ub_cmdq.h" #include "ub_cmd_reg.h" +static int ubctl_query_nl_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_PKT_STATS_DFX, UBCTL_NL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_NL_SSU_STATS_DFX, UBCTL_NL_SSU_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_NL_ABN_DFX, UBCTL_NL_ABN_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static int ubctl_query_nl_pkt_stats_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func) @@ -18,9 +32,197 @@ static int ubctl_query_nl_pkt_stats_data(struct ubctl_dev *ucdev, query_dp, ARRAY_SIZE(query_dp)); } +static int ubctl_query_nl_ssu_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_STATS_DFX, UBCTL_NL_SSU_STATS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_nl_abn_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_ABN_DFX, UBCTL_NL_ABN_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_PKT_STATS_DFX, UBCTL_DL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_REPL_DFX, UBCTL_DL_REPL_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_LINK_STATUS_DFX, UBCTL_DL_LINK_STATUS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_LANE_DFX, UBCTL_DL_LANE_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_BIT_ERR_DFX, UBCTL_DL_BIT_ERR_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_pkt_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_PKT_STATS_DFX, UBCTL_DL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_REPL_DFX, UBCTL_DL_REPL_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_link_status_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_LINK_STATUS_DFX, UBCTL_DL_LINK_STATUS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_lane_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_LANE_DFX, UBCTL_DL_LANE_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_bit_err_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_BIT_ERR_DFX, UBCTL_DL_BIT_ERR_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_bist_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_CONF_DL_BIST_DFX, UBCTL_DL_BIST_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_conf_dl_bist_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_CONF_DL_BIST_DFX, UBCTL_DL_BIST_LEN, UBCTL_WRITE, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_dl_bist_err_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_BIST_ERR_DFX, UBCTL_DL_BIST_ERR_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ta_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ta_pkt_stats(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ta_abn_stats(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static struct ubctl_func_dispatch g_ubctl_query_reg[] = { + { UTOOL_CMD_QUERY_NL, ubctl_query_nl_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_NL_PKT_STATS, ubctl_query_nl_pkt_stats_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_STATS, ubctl_query_nl_ssu_stats_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_ABN, ubctl_query_nl_abn_data, ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_DL, ubctl_query_dl_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DL_PKT_STATS, ubctl_query_dl_pkt_stats_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DL_LINK_STATUS, ubctl_query_dl_link_status_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DL_LANE, ubctl_query_dl_lane_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DL_BIT_ERR, ubctl_query_dl_bit_err_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DL_BIST, ubctl_query_dl_bist_data, + ubctl_query_data_deal }, + { UTOOL_CMD_CONF_DL_BIST, ubctl_conf_dl_bist_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DL_BIST_ERR, ubctl_query_dl_bist_err_data, + ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_TA, ubctl_query_ta_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TA_PKT_STATS, ubctl_query_ta_pkt_stats, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TA_ABN_STATS, ubctl_query_ta_abn_stats, + ubctl_query_data_deal }, { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index 05db6aa07d5b..c4420884928c 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -7,7 +7,33 @@ #define __UB_CMDQ_H__ #define UBCTL_QUERY_NL_PKT_STATS_DFX 0xA001 +#define UBCTL_QUERY_NL_SSU_STATS_DFX 0xA002 +#define UBCTL_QUERY_NL_ABN_DFX 0xA003 + +#define UBCTL_QUERY_TA_PKT_STATS_DFX 0xA006 +#define UBCTL_QUERY_TA_ABN_STATS_DFX 0xA023 + +#define UBCTL_QUERY_DL_PKT_STATS_DFX 0xA007 +#define UBCTL_QUERY_DL_LINK_STATUS_DFX 0xA008 +#define UBCTL_QUERY_DL_LANE_DFX 0xA009 +#define UBCTL_QUERY_DL_BIT_ERR_DFX 0xA00A +#define UBCTL_QUERY_CONF_DL_BIST_DFX 0xA020 +#define UBCTL_QUERY_DL_BIST_ERR_DFX 0xA021 +#define UBCTL_QUERY_DL_REPL_DFX 0xA022 #define UBCTL_NL_PKT_STATS_LEN 632 +#define UBCTL_NL_SSU_STATS_LEN 408 +#define UBCTL_NL_ABN_LEN 56 + +#define UBCTL_TA_PKT_STATS_LEN 920 +#define UBCTL_TA_ABN_STATS_LEN 168 + +#define UBCTL_DL_PKT_STATS_LEN 984 +#define UBCTL_DL_REPL_LEN 120 +#define UBCTL_DL_LINK_STATUS_LEN 24 +#define UBCTL_DL_LANE_LEN 24 +#define UBCTL_DL_BIT_ERR_LEN 56 +#define UBCTL_DL_BIST_LEN 24 +#define UBCTL_DL_BIST_ERR_LEN 24 #endif diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index 0eda05fc5932..cde0a09b85e2 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -13,6 +13,7 @@ #include #define UBCTL_READ true +#define UBCTL_WRITE false #define ubctl_err(ucdev, format, ...) \ dev_err(&ucdev->fwctl.dev, format, ##__VA_ARGS__) diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 011c4d48bada..3f540cb826b3 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -37,7 +37,23 @@ struct fwctl_rpc_ub_out { }; enum ub_fwctl_cmdrpc_type { + UTOOL_CMD_QUERY_NL = 0x0001, UTOOL_CMD_QUERY_NL_PKT_STATS = 0x0002, + UTOOL_CMD_QUERY_NL_SSU_STATS = 0x0003, + UTOOL_CMD_QUERY_NL_ABN = 0x0004, + + UTOOL_CMD_QUERY_DL = 0x0011, + UTOOL_CMD_QUERY_DL_PKT_STATS = 0x0012, + UTOOL_CMD_QUERY_DL_LINK_STATUS = 0x0013, + UTOOL_CMD_QUERY_DL_LANE = 0x0014, + UTOOL_CMD_QUERY_DL_BIT_ERR = 0x0015, + UTOOL_CMD_QUERY_DL_BIST = 0x0017, + UTOOL_CMD_CONF_DL_BIST = 0x0018, + UTOOL_CMD_QUERY_DL_BIST_ERR = 0x0019, + + UTOOL_CMD_QUERY_TA = 0x0031, + UTOOL_CMD_QUERY_TA_PKT_STATS = 0x0032, + UTOOL_CMD_QUERY_TA_ABN_STATS = 0x0033, UTOOL_CMD_QUERY_MAX, }; -- Gitee From 62fec57e914384beb5a99e9dae5ec358924755a5 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Fri, 22 Aug 2025 15:24:10 +0800 Subject: [PATCH 032/243] ub: ub_fwctl: supports querying TP, BA related register information commit 12da5b6ce2af648d4cacb5429731e6542c30f535 openEuler 1. Support querying statistical register information for TP(Tranport) TX and RX directions, as well as abnormal message statistical register information 2. Support querying BA(Bus Adapter) statistical register information, querying MAR DFX including entry status, CXT status, traffic statistics, error information, etc. 3. Supports UB Memory PMU function, which calculates traffic information in UB Memory, including write traffic, read traffic, total traffic, average read payload length, average write payload length, average payload length, write delay, and read delay. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd_reg.c | 248 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 32 +++++ drivers/fwctl/ub/ub_common.c | 17 ++- include/uapi/fwctl/ub_fwctl.h | 22 +++ 4 files changed, 318 insertions(+), 1 deletion(-) diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index 17a695a81c0b..6bb8bf7625be 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -157,6 +157,120 @@ static int ubctl_query_dl_bist_err_data(struct ubctl_dev *ucdev, query_dp, ARRAY_SIZE(query_dp)); } +static int ubctl_query_dp_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func, + struct ubctl_query_dp *query_dp, u32 query_dp_num) +{ +#define UBCTL_TP_RX_BANK_NUM 3U + + u32 *rx_bank_id __free(kvfree) = kvzalloc(sizeof(u32) * UBCTL_TP_RX_BANK_NUM, GFP_KERNEL); + u32 bank_idx = 0; + u32 bank_id = 0; + int ret = 0; + u32 i; + + if (!rx_bank_id) + return -ENOMEM; + + for (i = 0; i < query_dp_num; i++) { + if (query_dp[i].op_code != UBCTL_QUERY_TP_RX_BANK_DFX) + continue; + if (bank_idx >= UBCTL_TP_RX_BANK_NUM) { + ubctl_err(ucdev, "bank_idx is out of bounds: %u.\n", bank_idx); + return -EINVAL; + } + + rx_bank_id[bank_idx] = bank_id++; + query_dp[i].data = (void *)&rx_bank_id[bank_idx++]; + query_dp[i].data_len = (u32)sizeof(u32); + } + + ret = ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, query_dp_num); + if (ret) + ubctl_err(ucdev, "ubctl query data failed, ret = %d.\n", ret); + + return ret; +} + +static int ubctl_query_tp_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TP_TX_DFX, UBCTL_TP_TX_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_TX_DFX, UBCTL_TP_TX_ABN_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_ABN_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_ABN_STATS_DFX, UBCTL_TP_REG_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_dp_deal(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_tp_tx_route_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, query_dp, + ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_tp_abn_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TP_TX_DFX, UBCTL_TP_TX_ABN_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_ABN_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_ABN_STATS_DFX, UBCTL_TP_REG_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, query_dp, + ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_tp_pkt_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TP_TX_DFX, UBCTL_TP_TX_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_tp_rx_bank_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_dp_deal(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static int ubctl_query_ta_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func) @@ -194,6 +308,115 @@ static int ubctl_query_ta_abn_stats(struct ubctl_dev *ucdev, query_dp, ARRAY_SIZE(query_dp)); } +static int ubctl_query_ba_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_BA_PKT_STATS_DFX, UBCTL_BA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_BA_MAR_DFX, UBCTL_BA_MAR_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ba_pkt_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_BA_PKT_STATS_DFX, UBCTL_BA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_conf_ba_mar_perf(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_CONF_BA_PERF_DFX, UBCTL_CONF_BA_MAR_PERF_LEN, UBCTL_WRITE, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ba_mar_perf(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_BA_MAR_PERF_DFX, UBCTL_QUERY_BA_MAR_PERF_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ba_mar_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_BA_MAR_DFX, UBCTL_BA_MAR_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_mar_cyc_en_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_MAR_CYC_EN_DFX, UBCTL_MAR_CYC_EN_LEN, UBCTL_READ, NULL, 0 }, + }; + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_conf_mar_cyc_en_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_MAR_CYC_EN_DFX, UBCTL_MAR_CYC_EN_LEN, UBCTL_WRITE, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_mar_table_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ +#define UBCTL_UB_MEM_TABLE_ENTRY_LEN 16U +#define UBCTL_UB_MEM_TABLE_ENTRY_NUM 7U + + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_MAR_TABLE_DFX, UBCTL_MAR_TABLE_LEN, UBCTL_READ, NULL, 0 }, + }; + struct fwctl_pkt_in_table *mar_table = + (struct fwctl_pkt_in_table *)(query_cmd_param->in->data); + + if (query_cmd_param->in->data_size != sizeof(*mar_table)) { + ubctl_err(ucdev, "user data of mar table is invalid.\n"); + return -EINVAL; + } + + if (mar_table->table_num == UBCTL_UB_MEM_TABLE_ENTRY_NUM) + mar_table->index *= UBCTL_UB_MEM_TABLE_ENTRY_LEN; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_NL, ubctl_query_nl_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_NL_PKT_STATS, ubctl_query_nl_pkt_stats_data, @@ -218,12 +441,37 @@ static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_DL_BIST_ERR, ubctl_query_dl_bist_err_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TP, ubctl_query_tp_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TP_PKT_STATS, ubctl_query_tp_pkt_stats_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TP_ABN_STATS, ubctl_query_tp_abn_stats_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TP_TX_ROUTE, ubctl_query_tp_tx_route_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TP_RX_BANK, ubctl_query_tp_rx_bank_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_TA, ubctl_query_ta_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_TA_PKT_STATS, ubctl_query_ta_pkt_stats, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_TA_ABN_STATS, ubctl_query_ta_abn_stats, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_BA, ubctl_query_ba_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_BA_PKT_STATS, ubctl_query_ba_pkt_stats_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_BA_MAR, ubctl_query_ba_mar_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_BA_MAR_TABLE, ubctl_query_mar_table_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_BA_MAR_CYC_EN, ubctl_query_mar_cyc_en_data, + ubctl_query_data_deal }, + { UTOOL_CMD_CONF_BA_MAR_CYC_EN, ubctl_conf_mar_cyc_en_data, + ubctl_query_data_deal }, + { UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS, ubctl_conf_ba_mar_perf, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS, ubctl_query_ba_mar_perf, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index c4420884928c..026f751d365b 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -10,6 +10,14 @@ #define UBCTL_QUERY_NL_SSU_STATS_DFX 0xA002 #define UBCTL_QUERY_NL_ABN_DFX 0xA003 +#define UBCTL_QUERY_TP_TX_DFX 0xA004 +#define UBCTL_QUERY_TP_RX_DFX 0xA005 +#define UBCTL_QUERY_TP_TX_ROUTE_DFX 0xA01A +#define UBCTL_QUERY_TP_RX_BANK_DFX 0xA01C +#define UBCTL_QUERY_TP_ABN_STATS_DFX 0xA01D +#define UBCTL_QUERY_TP_RQM_DFX 0xA01E +#define UBCTL_QUERY_TP_STATE_DFX 0xA024 + #define UBCTL_QUERY_TA_PKT_STATS_DFX 0xA006 #define UBCTL_QUERY_TA_ABN_STATS_DFX 0xA023 @@ -21,10 +29,27 @@ #define UBCTL_QUERY_DL_BIST_ERR_DFX 0xA021 #define UBCTL_QUERY_DL_REPL_DFX 0xA022 +#define UBCTL_QUERY_BA_PKT_STATS_DFX 0xA00B +#define UBCTL_QUERY_BA_MAR_DFX 0xA00C +#define UBCTL_QUERY_MAR_TABLE_DFX 0xA012 +#define UBCTL_QUERY_MAR_CYC_EN_DFX 0xA013 +#define UBCTL_CONF_BA_PERF_DFX 0xA014 +#define UBCTL_QUERY_BA_MAR_PERF_DFX 0xA015 + #define UBCTL_NL_PKT_STATS_LEN 632 #define UBCTL_NL_SSU_STATS_LEN 408 #define UBCTL_NL_ABN_LEN 56 +#define UBCTL_TP_TX_STATS_LEN 904 +#define UBCTL_TP_RX_STATS_LEN 704 +#define UBCTL_TP_TX_ABN_LEN 948 +#define UBCTL_TP_RX_ABN_LEN 760 +#define UBCTL_TP_REG_LEN 24 +#define UBCTL_TP_TX_ROUTE_LEN 216 +#define UBCTL_TP_RX_BANK_LEN 408 +#define UBCTL_TP_RQM_LEN 88 +#define UBCTL_TP_STATE_DFX_LEN 376 + #define UBCTL_TA_PKT_STATS_LEN 920 #define UBCTL_TA_ABN_STATS_LEN 168 @@ -36,4 +61,11 @@ #define UBCTL_DL_BIST_LEN 24 #define UBCTL_DL_BIST_ERR_LEN 24 +#define UBCTL_BA_PKT_STATS_LEN 792 +#define UBCTL_BA_MAR_LEN 440 +#define UBCTL_MAR_TABLE_LEN 88 +#define UBCTL_MAR_CYC_EN_LEN 24 +#define UBCTL_CONF_BA_MAR_PERF_LEN 24 +#define UBCTL_QUERY_BA_MAR_PERF_LEN 56 + #endif diff --git a/drivers/fwctl/ub/ub_common.c b/drivers/fwctl/ub/ub_common.c index 87654eb0aefc..23d67829c8de 100644 --- a/drivers/fwctl/ub/ub_common.c +++ b/drivers/fwctl/ub/ub_common.c @@ -6,6 +6,7 @@ #include #include "ub_common.h" +#include "ub_cmdq.h" static inline void ubctl_struct_cpu_to_le32(u32 *data, u32 cnt) { @@ -122,6 +123,20 @@ static int ubctl_cmd_send_deal(struct ubctl_dev *ucdev, return ret; } +static void ubctl_cmd_data_deal(struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_query_dp *query_dp, + struct ubctl_query_cmd_dp *cmd_dp) +{ + if (!query_dp->data) { + memcpy(cmd_dp->cmd_in, query_cmd_param->in->data, query_cmd_param->in->data_size); + return; + } + + if (query_dp->op_code == UBCTL_QUERY_TP_RX_BANK_DFX && + query_dp->data_len == (u32)sizeof(u32)) + memcpy(cmd_dp->cmd_in, query_dp->data, sizeof(u32)); +} + int ubctl_query_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func, @@ -157,7 +172,7 @@ int ubctl_query_data(struct ubctl_dev *ucdev, .query_func = query_func, }; - memcpy(cmd_dp.cmd_in, query_cmd_param->in->data, query_cmd_param->in->data_size); + ubctl_cmd_data_deal(query_cmd_param, &query_dp[i], &cmd_dp); ret = ubctl_cmd_send_deal(ucdev, query_cmd_param, &query_dp[i], &cmd_dp, offset); if (ret) diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 3f540cb826b3..e7aa1df3a660 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -42,6 +42,12 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_QUERY_NL_SSU_STATS = 0x0003, UTOOL_CMD_QUERY_NL_ABN = 0x0004, + UTOOL_CMD_QUERY_TP = 0x0021, + UTOOL_CMD_QUERY_TP_PKT_STATS = 0x0022, + UTOOL_CMD_QUERY_TP_TX_ROUTE = 0x0023, + UTOOL_CMD_QUERY_TP_ABN_STATS = 0x0024, + UTOOL_CMD_QUERY_TP_RX_BANK = 0x0025, + UTOOL_CMD_QUERY_DL = 0x0011, UTOOL_CMD_QUERY_DL_PKT_STATS = 0x0012, UTOOL_CMD_QUERY_DL_LINK_STATUS = 0x0013, @@ -55,7 +61,23 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_QUERY_TA_PKT_STATS = 0x0032, UTOOL_CMD_QUERY_TA_ABN_STATS = 0x0033, + UTOOL_CMD_QUERY_BA = 0x0041, + UTOOL_CMD_QUERY_BA_PKT_STATS = 0x0042, + UTOOL_CMD_QUERY_BA_MAR = 0x0043, + UTOOL_CMD_QUERY_BA_MAR_TABLE = 0x0044, + UTOOL_CMD_QUERY_BA_MAR_CYC_EN = 0x0045, + UTOOL_CMD_CONF_BA_MAR_CYC_EN = 0x0046, + UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS = 0x0047, + UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS = 0x0048, + UTOOL_CMD_QUERY_MAX, }; +struct fwctl_pkt_in_table { + __u32 port_id; + __u32 table_num; + __u32 index; +}; + + #endif -- Gitee From 583ce1ad460b8dfe88df7dc512fd3b477f9b8593 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Wed, 20 Aug 2025 17:13:53 +0800 Subject: [PATCH 033/243] ub: ub_fwctl: supports querying and configuring some scattered registers. commit bb849315817d8c5dd1f7759ec06016d434ed449e openEuler 1. Support querying QOS(Quality of Service) related registers. 2. Support SCC debug switch configuration query. 3. Support querying UBOMMU(UB Memory Management Unit) related registers information. 4. Support querying software UB port link building process software and hardware information. 5. Support querying multi bit ECC statistics. 6. Support querying queue status registers. 7. Support Uboe port PRBS packet self-test. 8. Support configuring uboe port loopback. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd_reg.c | 177 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 29 ++++++ include/uapi/fwctl/ub_fwctl.h | 23 +++++ 3 files changed, 229 insertions(+) diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index 6bb8bf7625be..bfc480c73c1d 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -417,6 +417,161 @@ static int ubctl_query_mar_table_data(struct ubctl_dev *ucdev, query_dp, ARRAY_SIZE(query_dp)); } +static int ubctl_query_qos_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_QOS_DFX, UBCTL_QOS_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_scc_debug(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_SCC_DEBUG_DFX, UBCTL_SCC_DEBUG_EN_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_config_scc_debug(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_SCC_DEBUG_DFX, UBCTL_SCC_DEBUG_EN_LEN, UBCTL_WRITE, NULL, 0 }, + }; + + if (query_cmd_param->in->data_size != sizeof(struct fwctl_pkt_in_enable)) { + ubctl_err(ucdev, "user data of scc debug is invalid.\n"); + return -EINVAL; + } + u8 *scc_debug_en = (u8 *)(query_cmd_param->in->data); + + if (*scc_debug_en > 1) + return -EINVAL; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ubommu_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_UBOMMU_DFX, UBCTL_UBOMMU_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_port_info_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_PORT_INFO_DFX, UBCTL_PORT_INFO_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, query_dp, + ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_ecc_2b_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_ECC_2B_DFX, UBCTL_ECC_2B_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, query_dp, + ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_queue_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_QUEUE_DFX, UBCTL_QUEUE_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, query_dp, + ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_loopback(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_LOOPBACK, UBCTL_QUERY_DEBUG_EN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_config_loopback(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_LOOPBACK, UBCTL_QUERY_DEBUG_EN, UBCTL_WRITE, NULL, 0 }, + }; + int ret; + + ret = ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); + + if (query_cmd_param->out->retval == -EBUSY) + ubctl_err(ucdev, "Current port has been enabled for another loopback mode.\n"); + if (query_cmd_param->out->retval == -EMLINK) + ubctl_err(ucdev, "Another port has already been enabled.\n"); + + return ret; +} + +static int ubctl_query_prbs(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_PRBS_RESULT, UBCTL_QUERY_DEBUG_EN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_config_prbs(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_PRBS_RESULT, UBCTL_QUERY_DEBUG_EN, UBCTL_WRITE, NULL, 0 }, + }; + int ret; + + ret = ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); + + if (query_cmd_param->out->retval == -EMLINK) + ubctl_err(ucdev, "Another port has already been enabled.\n"); + + return ret; +} + static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_NL, ubctl_query_nl_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_NL_PKT_STATS, ubctl_query_nl_pkt_stats_data, @@ -472,6 +627,28 @@ static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS, ubctl_query_ba_mar_perf, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_QOS, ubctl_query_qos_data, ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_SCC_DEBUG_EN, ubctl_query_scc_debug, + ubctl_query_data_deal }, + { UTOOL_CMD_CONF_SCC_DEBUG_EN, ubctl_config_scc_debug, + ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_UBOMMU, ubctl_query_ubommu_data, ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_PORT_INFO, ubctl_query_port_info_data, + ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_ECC_2B, ubctl_query_ecc_2b_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_QUEUE, ubctl_query_queue_data, ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_LOOPBACK, ubctl_query_loopback, ubctl_query_data_deal }, + { UTOOL_CMD_CONF_LOOPBACK, ubctl_config_loopback, ubctl_query_data_deal }, + + { UTOOL_CMD_QUERY_PRBS_EN, ubctl_query_prbs, ubctl_query_data_deal }, + { UTOOL_CMD_CONF_PRBS_EN, ubctl_config_prbs, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_PRBS_RESULT, ubctl_query_prbs, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index 026f751d365b..67e0bd576a6f 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -36,6 +36,21 @@ #define UBCTL_CONF_BA_PERF_DFX 0xA014 #define UBCTL_QUERY_BA_MAR_PERF_DFX 0xA015 +#define UBCTL_QUERY_QOS_DFX 0xA00D + +#define UBCTL_QUERY_SCC_DEBUG_DFX 0xA011 + +#define UBCTL_QUERY_QUEUE_DFX 0xA01B + +#define UBCTL_QUERY_UBOMMU_DFX 0xA016 + +#define UBCTL_QUERY_PORT_INFO_DFX 0xA018 + +#define UBCTL_QUERY_ECC_2B_DFX 0xA019 + +#define UBCTL_QUERY_LOOPBACK 0xA025 +#define UBCTL_QUERY_PRBS_RESULT 0xA026 + #define UBCTL_NL_PKT_STATS_LEN 632 #define UBCTL_NL_SSU_STATS_LEN 408 #define UBCTL_NL_ABN_LEN 56 @@ -68,4 +83,18 @@ #define UBCTL_CONF_BA_MAR_PERF_LEN 24 #define UBCTL_QUERY_BA_MAR_PERF_LEN 56 +#define UBCTL_QOS_LEN 284 + +#define UBCTL_SCC_DEBUG_EN_LEN 24 + +#define UBCTL_QUEUE_LEN 120 + +#define UBCTL_PORT_INFO_LEN 56 + +#define UBCTL_UBOMMU_LEN 56 + +#define UBCTL_ECC_2B_LEN 344 + +#define UBCTL_QUERY_DEBUG_EN 24 + #endif diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index e7aa1df3a660..432ea55ff59a 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -70,9 +70,32 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS = 0x0047, UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS = 0x0048, + UTOOL_CMD_QUERY_QOS = 0x0051, + + UTOOL_CMD_QUERY_SCC_DEBUG_EN = 0x0063, + UTOOL_CMD_CONF_SCC_DEBUG_EN = 0x0064, + + UTOOL_CMD_QUERY_QUEUE = 0x0073, + + UTOOL_CMD_QUERY_PORT_INFO = 0x0081, + + UTOOL_CMD_QUERY_UBOMMU = 0x0091, + + UTOOL_CMD_QUERY_ECC_2B = 0x00B1, + + UTOOL_CMD_QUERY_LOOPBACK = 0x00D1, + UTOOL_CMD_CONF_LOOPBACK = 0x00D2, + UTOOL_CMD_QUERY_PRBS_EN = 0x00D3, + UTOOL_CMD_CONF_PRBS_EN = 0x00D4, + UTOOL_CMD_QUERY_PRBS_RESULT = 0x00D5, + UTOOL_CMD_QUERY_MAX, }; +struct fwctl_pkt_in_enable { + __u8 enable; +}; + struct fwctl_pkt_in_table { __u32 port_id; __u32 table_num; -- Gitee From b9347c9dcb1f8d19f4bb2e4b8cead96029518869 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Wed, 20 Aug 2025 19:43:59 +0800 Subject: [PATCH 034/243] ub: ub_fwctl: Support Dump register. commit d9fe3f4bd8c5bd7eeb7e2c5e787f7056b3b9d7dd openEuler Support Dump registers at various levels, mainly consisting of statistical information registers and status registers. Not all registers. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd_reg.c | 46 +++++++++++++++++++++++++++++++++++ include/uapi/fwctl/ub_fwctl.h | 2 ++ 2 files changed, 48 insertions(+) diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index bfc480c73c1d..026ac3f2fe90 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -572,6 +572,50 @@ static int ubctl_config_prbs(struct ubctl_dev *ucdev, return ret; } +static int ubctl_query_dump_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_PKT_STATS_DFX, UBCTL_NL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_NL_SSU_STATS_DFX, UBCTL_NL_SSU_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_NL_ABN_DFX, UBCTL_NL_ABN_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_TP_TX_DFX, UBCTL_TP_TX_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_TX_DFX, UBCTL_TP_TX_ABN_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_ABN_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_ABN_STATS_DFX, UBCTL_TP_REG_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_DL_PKT_STATS_DFX, UBCTL_DL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_REPL_DFX, UBCTL_DL_REPL_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_LINK_STATUS_DFX, UBCTL_DL_LINK_STATUS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_LANE_DFX, UBCTL_DL_LANE_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_DL_BIT_ERR_DFX, UBCTL_DL_BIT_ERR_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_BA_PKT_STATS_DFX, UBCTL_BA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_BA_MAR_DFX, UBCTL_BA_MAR_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_QOS_DFX, UBCTL_QOS_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_UBOMMU_DFX, UBCTL_UBOMMU_LEN, UBCTL_READ, NULL, 0 }, + + { UBCTL_QUERY_ECC_2B_DFX, UBCTL_ECC_2B_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_dp_deal(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_NL, ubctl_query_nl_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_NL_PKT_STATS, ubctl_query_nl_pkt_stats_data, @@ -649,6 +693,8 @@ static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_CONF_PRBS_EN, ubctl_config_prbs, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_PRBS_RESULT, ubctl_query_prbs, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_DUMP, ubctl_query_dump_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 432ea55ff59a..1e40bd8f4a1c 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -89,6 +89,8 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_CONF_PRBS_EN = 0x00D4, UTOOL_CMD_QUERY_PRBS_RESULT = 0x00D5, + UTOOL_CMD_QUERY_DUMP = 0xFFFE, + UTOOL_CMD_QUERY_MAX, }; -- Gitee From e88b342cf967834b9b199d4f2c927094fb4ecc92 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 21 Aug 2025 17:09:08 +0800 Subject: [PATCH 035/243] ub: ub_fwctl: support querying UB link trace information. commit d275d5c4931372d973499ea430bb7cb787d0b617 openEuler ub_fwctl adds some complex query processing. Support ub_fwctl to query UB link trace information. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/Makefile | 2 +- drivers/fwctl/ub/main.c | 8 +- drivers/fwctl/ub/ub_cmd.c | 157 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmd.h | 13 +++ drivers/fwctl/ub/ub_cmdq.h | 2 + include/uapi/fwctl/ub_fwctl.h | 4 + 6 files changed, 184 insertions(+), 2 deletions(-) create mode 100644 drivers/fwctl/ub/ub_cmd.c create mode 100644 drivers/fwctl/ub/ub_cmd.h diff --git a/drivers/fwctl/ub/Makefile b/drivers/fwctl/ub/Makefile index c2c2008b2653..391aa5f909c5 100644 --- a/drivers/fwctl/ub/Makefile +++ b/drivers/fwctl/ub/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0+ obj-$(CONFIG_FWCTL_UB) += ub_fwctl.o -ub_fwctl-y += main.o ub_cmd_reg.o ub_common.o +ub_fwctl-y += main.o ub_cmd_reg.o ub_common.o ub_cmd.o diff --git a/drivers/fwctl/ub/main.c b/drivers/fwctl/ub/main.c index 63de07d5d028..e96ccf5afa55 100644 --- a/drivers/fwctl/ub/main.c +++ b/drivers/fwctl/ub/main.c @@ -10,6 +10,7 @@ #include "ub_common.h" #include "ub_cmd_reg.h" +#include "ub_cmd.h" #define MAX_IOCTL_COUNT 1024 #define TIME_WINDOW_MS 3000 @@ -98,9 +99,14 @@ static int ub_cmd_do(struct ubctl_dev *ucdev, u32 rpc_cmd = query_cmd_param->in->rpc_cmd; struct ubctl_func_dispatch *ubctl_query_reg = ubctl_get_query_reg_func( ucdev, rpc_cmd); + struct ubctl_func_dispatch *ubctl_query_func = ubctl_get_query_func( + ucdev, rpc_cmd); int ret; - if (ubctl_query_reg && ubctl_query_reg->execute) { + if (ubctl_query_func && ubctl_query_func->execute) { + ret = ubctl_query_func->execute(ucdev, query_cmd_param, + ubctl_query_func); + } else if (ubctl_query_reg && ubctl_query_reg->execute) { ret = ubctl_query_reg->execute(ucdev, query_cmd_param, ubctl_query_reg); } else { diff --git a/drivers/fwctl/ub/ub_cmd.c b/drivers/fwctl/ub/ub_cmd.c new file mode 100644 index 000000000000..22577289998c --- /dev/null +++ b/drivers/fwctl/ub/ub_cmd.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + */ + +#include "ub_cmdq.h" +#include "ub_cmd.h" + +struct ubctl_query_trace { + u32 port_id; + u32 index; + u32 cur_count; + u32 total_count; + u32 data[]; +}; + +static int ubctl_trace_data_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset) +{ +#define UBCTL_TRACE_SIZE 4U +#define UBCTL_TOTAL_CNT_MAX 64U + + struct fwctl_rpc_ub_out *trace_out = query_cmd_param->out; + struct ubctl_query_trace *trace_info = cmd->out_data; + u32 trace_max_len = query_cmd_param->out_len; + u32 pos_index = offset * UBCTL_TRACE_SIZE; + + if ((trace_info->total_count > UBCTL_TOTAL_CNT_MAX) || + (trace_info->total_count * UBCTL_TRACE_SIZE >= trace_max_len) || + (pos_index >= trace_max_len || cmd->out_len < sizeof(struct ubctl_query_trace))) { + ubctl_err(ucdev, "cmd out data length is error.\n"); + return -EINVAL; + } + + if (pos_index == 0) + memcpy(trace_out->data, cmd->out_data, cmd->out_len); + else + memcpy((u32 *)(trace_out->data) + pos_index, trace_info->data, + cmd->out_len - sizeof(struct ubctl_query_trace)); + + trace_out->data_size = query_cmd_param->out_len; + return 0; +} + +static int ubctl_send_deal_trace(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_query_cmd_dp *cmd_data, u32 offset) +{ + u32 out_len = UBCTL_DL_TRACE_LEN; + struct ubctl_cmd cmd = {}; + int ret = 0; + + if (!cmd_data->query_func->data_deal) { + ubctl_err(ucdev, "ubctl data deal func is null.\n"); + return -EINVAL; + } + + cmd.op_code = UBCTL_QUERY_DL_TRACE_DFX; + + ret = ubctl_fill_cmd(&cmd, cmd_data->cmd_in, cmd_data->cmd_out, + out_len, UBCTL_READ); + if (ret) { + ubctl_err(ucdev, "ubctl fill cmd failed.\n"); + return ret; + } + + ret = ubctl_ubase_cmd_send(ucdev->adev, &cmd); + if (ret) { + ubctl_err(ucdev, "ubctl ubase cmd send failed, ret = %d.\n", ret); + return -EINVAL; + } + + ret = cmd_data->query_func->data_deal(ucdev, query_cmd_param, &cmd, + out_len, offset); + if (ret) + ubctl_err(ucdev, "ubctl data deal failed, ret = %d.\n", ret); + + return ret; +} + +static int ubctl_query_dl_trace_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct fwctl_pkt_in_port *pkt_in = (struct fwctl_pkt_in_port *)query_cmd_param->in->data; + u32 trace_index = 0, offset = 0, expect_total = 0, out_len = UBCTL_DL_TRACE_LEN, tmp_sum; + struct ubctl_query_cmd_dp cmd_dp = {}; + int ret = 0; + + if (query_cmd_param->in->data_size != sizeof(struct fwctl_pkt_in_port)) { + ubctl_err(ucdev, "user data of trace is invalid.\n"); + return -EINVAL; + } + + while (1) { + struct ubctl_query_trace *cmd_in __free(kvfree) = kvzalloc(out_len, GFP_KERNEL); + if (!cmd_in) + return -ENOMEM; + + struct ubctl_query_trace *cmd_out __free(kvfree) = kvzalloc(out_len, GFP_KERNEL); + if (!cmd_out) + return -ENOMEM; + + cmd_in->index = trace_index; + cmd_in->port_id = pkt_in->port_id; + + cmd_dp = (struct ubctl_query_cmd_dp) { + .cmd_in = cmd_in, + .cmd_out = cmd_out, + .query_func = query_func, + }; + + ret = ubctl_send_deal_trace(ucdev, query_cmd_param, + &cmd_dp, offset); + if (ret) + return ret; + + offset = cmd_out->cur_count + 1; + trace_index = cmd_out->cur_count; + tmp_sum = cmd_out->cur_count + cmd_in->index; + + if ((tmp_sum <= expect_total) || (tmp_sum > cmd_out->total_count)) { + ubctl_err(ucdev, "software data of trace is invalid.\n"); + return -EINVAL; + } + + if (tmp_sum == cmd_out->total_count) + break; + + expect_total = tmp_sum; + } + + return ret; +} + +static struct ubctl_func_dispatch g_ubctl_query_func[] = { + { UTOOL_CMD_QUERY_DL_LINK_TRACE, ubctl_query_dl_trace_data, + ubctl_trace_data_deal }, + + { UTOOL_CMD_QUERY_MAX, NULL, NULL } +}; + +struct ubctl_func_dispatch *ubctl_get_query_func(struct ubctl_dev *ucdev, u32 rpc_cmd) +{ + u32 i; + + if (!ucdev) + return NULL; + + for (i = 0; i < ARRAY_SIZE(g_ubctl_query_func); i++) { + if (g_ubctl_query_func[i].rpc_cmd == rpc_cmd) + return &g_ubctl_query_func[i]; + } + + return NULL; +} diff --git a/drivers/fwctl/ub/ub_cmd.h b/drivers/fwctl/ub/ub_cmd.h new file mode 100644 index 000000000000..69bb2ea43c52 --- /dev/null +++ b/drivers/fwctl/ub/ub_cmd.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +#ifndef __UB_CMD_H__ +#define __UB_CMD_H__ + +#include "ub_common.h" + +struct ubctl_func_dispatch *ubctl_get_query_func(struct ubctl_dev *ucdev, + u32 rpc_cmd); +#endif diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index 67e0bd576a6f..32323a4c0bbb 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -24,6 +24,7 @@ #define UBCTL_QUERY_DL_PKT_STATS_DFX 0xA007 #define UBCTL_QUERY_DL_LINK_STATUS_DFX 0xA008 #define UBCTL_QUERY_DL_LANE_DFX 0xA009 +#define UBCTL_QUERY_DL_TRACE_DFX 0xA010 #define UBCTL_QUERY_DL_BIT_ERR_DFX 0xA00A #define UBCTL_QUERY_CONF_DL_BIST_DFX 0xA020 #define UBCTL_QUERY_DL_BIST_ERR_DFX 0xA021 @@ -75,6 +76,7 @@ #define UBCTL_DL_BIT_ERR_LEN 56 #define UBCTL_DL_BIST_LEN 24 #define UBCTL_DL_BIST_ERR_LEN 24 +#define UBCTL_DL_TRACE_LEN 1016 #define UBCTL_BA_PKT_STATS_LEN 792 #define UBCTL_BA_MAR_LEN 440 diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 1e40bd8f4a1c..411d8849d43b 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -53,6 +53,7 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_QUERY_DL_LINK_STATUS = 0x0013, UTOOL_CMD_QUERY_DL_LANE = 0x0014, UTOOL_CMD_QUERY_DL_BIT_ERR = 0x0015, + UTOOL_CMD_QUERY_DL_LINK_TRACE = 0x0016, UTOOL_CMD_QUERY_DL_BIST = 0x0017, UTOOL_CMD_CONF_DL_BIST = 0x0018, UTOOL_CMD_QUERY_DL_BIST_ERR = 0x0019, @@ -104,5 +105,8 @@ struct fwctl_pkt_in_table { __u32 index; }; +struct fwctl_pkt_in_port { + __u32 port_id; +}; #endif -- Gitee From 51106e493290d56e12e4e4cddb57414a80801cb6 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 21 Aug 2025 17:25:32 +0800 Subject: [PATCH 036/243] ub: ub_fwctl: support querying SCC and io_die related information. commit 59043edff2c8b6a5cdd746dda78e62db9a718d2e openEuler Support ub_fwctl query, support querying SCC version number and diagnostic log information. Support ub_fwctl to query all die information on the environment, including the chip ID of each IO die, die ID, and all enabled port information under that die (including port number, port type, and port link status). Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd.c | 219 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 5 + drivers/fwctl/ub/ub_common.h | 2 + include/uapi/fwctl/ub_fwctl.h | 7 ++ 4 files changed, 233 insertions(+) diff --git a/drivers/fwctl/ub/ub_cmd.c b/drivers/fwctl/ub/ub_cmd.c index 22577289998c..da37575bb36f 100644 --- a/drivers/fwctl/ub/ub_cmd.c +++ b/drivers/fwctl/ub/ub_cmd.c @@ -6,6 +6,8 @@ #include "ub_cmdq.h" #include "ub_cmd.h" +#define UBCTL_SCC_SZ_1M 0x100000 + struct ubctl_query_trace { u32 port_id; u32 index; @@ -14,6 +16,13 @@ struct ubctl_query_trace { u32 data[]; }; +struct ubctl_scc_data { + u32 phy_addr_low; + u32 phy_addr_high; + u32 data_size; + u32 rsv[3]; +}; + static int ubctl_trace_data_deal(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_cmd *cmd, u32 out_len, u32 offset) @@ -134,10 +143,220 @@ static int ubctl_query_dl_trace_data(struct ubctl_dev *ucdev, return ret; } +static int ubctl_scc_data_deal(struct ubctl_dev *ucdev, u32 index, + struct fwctl_rpc_ub_out *out, + struct ubctl_scc_data *scc) +{ +#define UBCTL_SCC_OUT_LEN ((UBCTL_SCC_SZ_1M) / (sizeof(u32))) +#define UBCTL_SCC_INDEX_MAX_NUM 1 + + u32 scc_data_len = scc->data_size / sizeof(u32); + u32 data_len = out->data_size / sizeof(u32); + u32 offset = index * UBCTL_SCC_OUT_LEN; + u32 *scc_data = out->data; + void __iomem *vir_addr; + u64 phy_addr; + u32 i, j; + + if (index > UBCTL_SCC_INDEX_MAX_NUM) { + ubctl_err(ucdev, "scc index is invalid, index = %u.\n", index); + return -EINVAL; + } + + phy_addr = UBCTL_GET_PHY_ADDR(scc->phy_addr_high, scc->phy_addr_low); + + vir_addr = ioremap(phy_addr, scc->data_size); + if (!vir_addr) { + ubctl_err(ucdev, "addr ioremap failed.\n"); + return -EFAULT; + } + + for (i = offset, j = 0; i < scc_data_len && j < data_len; i++, j++) + scc_data[j] = readl(vir_addr + i * sizeof(u32)); + + iounmap(vir_addr); + return 0; +} + +static int ubctl_scc_data_len_check(struct ubctl_dev *ucdev, u32 out_len, + u32 data_size, u32 scc_len) +{ +#define UBCTL_SCC_CACHE 0x200000 + + if (data_size != UBCTL_SCC_CACHE) { + ubctl_err(ucdev, "scc data size is not equal to 2M, data size = %u.\n", + data_size); + return -EINVAL; + } + + if (out_len != scc_len) { + ubctl_err(ucdev, "scc out len is invalid, out len = %u.\n", + out_len); + return -EINVAL; + } + + return 0; +} + +static int ubctl_scc_version_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset) +{ +#define UBCTL_SCC_VERSION_SZ 24 + + struct fwctl_pkt_in_index *pkt_in = NULL; + struct ubctl_scc_data *scc = NULL; + int ret = 0; + + if (query_cmd_param->in->data_size != sizeof(struct fwctl_pkt_in_index)) { + ubctl_err(ucdev, "user data of scc version is invalid.\n"); + return -EINVAL; + } + pkt_in = (struct fwctl_pkt_in_index *)query_cmd_param->in->data; + scc = (struct ubctl_scc_data *)cmd->out_data; + + ret = ubctl_scc_data_len_check(ucdev, query_cmd_param->out_len, + scc->data_size, UBCTL_SCC_VERSION_SZ); + if (ret) { + ubctl_err(ucdev, "scc version data len check failed, ret = %d.\n", ret); + return -EINVAL; + } + + query_cmd_param->out->data_size = query_cmd_param->out_len; + scc->data_size = sizeof(u32); + + return ubctl_scc_data_deal(ucdev, pkt_in->index, query_cmd_param->out, scc); +} + +static int ubctl_scc_log_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset) +{ + struct fwctl_pkt_in_index *pkt_in = (struct fwctl_pkt_in_index *)query_cmd_param->in->data; + struct ubctl_scc_data *scc = (struct ubctl_scc_data *)cmd->out_data; + int ret = 0; + + if (query_cmd_param->in->data_size != sizeof(*pkt_in)) { + ubctl_err(ucdev, "user data of scc log is invalid.\n"); + return -EINVAL; + } + + ret = ubctl_scc_data_len_check(ucdev, query_cmd_param->out_len, + scc->data_size, UBCTL_SCC_SZ_1M); + if (ret) { + ubctl_err(ucdev, "scc log data len check failed, ret = %d.\n", ret); + return -EINVAL; + } + + query_cmd_param->out->data_size = query_cmd_param->out_len; + + return ubctl_scc_data_deal(ucdev, pkt_in->index, query_cmd_param->out, scc); +} + +static int ubctl_query_scc_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_SCC_DFX, UBCTL_SCC_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_port_infos(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func, + u32 port_bitmap) +{ +#define UBCTL_U32_BIT_NUM 32U + + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_DL_LINK_STATUS_DFX, UBCTL_DL_LINK_STATUS_LEN, UBCTL_READ, NULL, 0 }, + }; + u32 iodie_len = sizeof(struct fwctl_rpc_ub_out) + UBCTL_IO_DIE_INFO_LEN; + u32 out_data_offset = UBCTL_IO_DIE_INFO_LEN / sizeof(u32); + struct fwctl_rpc_ub_out *out = query_cmd_param->out; + u32 out_mem_size = query_cmd_param->out_len; + u32 *pkt_in = query_cmd_param->in->data; + u32 data_size = out->data_size; + int port_num = 0; + int ret = 0; + u32 i; + + if (port_bitmap == 0) + return ret; + + struct fwctl_rpc_ub_out *out_temp __free(kvfree) = kvzalloc(iodie_len, GFP_KERNEL); + if (!out_temp) + return -ENOMEM; + + query_cmd_param->out = out_temp; + + for (i = 0; i < UBCTL_U32_BIT_NUM; i++) { + if (!(port_bitmap & (1UL << i))) + continue; + out_temp->data_size = 0; + *pkt_in = i; + ret = ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); + if (ret != 0) + break; + + if ((out_temp->data_size + out_data_offset * sizeof(u32)) > out_mem_size) { + ubctl_err(ucdev, "port info size = %u, total size = %u, offset size = %lu.\n", + out_temp->data_size, out_mem_size, + out_data_offset * sizeof(u32)); + ret = -ENOMEM; + break; + } + + memcpy(&out->data[out_data_offset], out_temp->data, out_temp->data_size); + data_size += out_temp->data_size; + out_data_offset += UBCTL_DL_LINK_STATUS_LEN / sizeof(u32); + port_num++; + } + + query_cmd_param->out = out; + out->data_size = data_size; + out->data[0] = port_num; + + return ret; +} + +static int ubctl_query_iodie_info_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_PORT_NUM_DFX, UBCTL_IO_DIE_INFO_LEN, UBCTL_READ, NULL, 0 }, + }; + u32 port_bitmap; + int ret; + + ret = ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); + if (ret != 0) + return ret; + + if (query_cmd_param->out->data_size < sizeof(u32)) + return -ENOMEM; + port_bitmap = *query_cmd_param->out->data; + + return ubctl_query_port_infos(ucdev, query_cmd_param, query_func, port_bitmap); +} + static struct ubctl_func_dispatch g_ubctl_query_func[] = { { UTOOL_CMD_QUERY_DL_LINK_TRACE, ubctl_query_dl_trace_data, ubctl_trace_data_deal }, + { UTOOL_CMD_QUERY_SCC_VERSION, ubctl_query_scc_data, ubctl_scc_version_deal}, + { UTOOL_CMD_QUERY_SCC_LOG, ubctl_query_scc_data, ubctl_scc_log_deal }, + + { UTOOL_CMD_QUERY_IO_DIE_PORT_INFO, ubctl_query_iodie_info_data, + ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index 32323a4c0bbb..be46f852ca0d 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -39,12 +39,14 @@ #define UBCTL_QUERY_QOS_DFX 0xA00D +#define UBCTL_QUERY_SCC_DFX 0xA00E #define UBCTL_QUERY_SCC_DEBUG_DFX 0xA011 #define UBCTL_QUERY_QUEUE_DFX 0xA01B #define UBCTL_QUERY_UBOMMU_DFX 0xA016 +#define UBCTL_QUERY_PORT_NUM_DFX 0xA017 #define UBCTL_QUERY_PORT_INFO_DFX 0xA018 #define UBCTL_QUERY_ECC_2B_DFX 0xA019 @@ -87,10 +89,13 @@ #define UBCTL_QOS_LEN 284 +#define UBCTL_SCC_LEN 24 #define UBCTL_SCC_DEBUG_EN_LEN 24 #define UBCTL_QUEUE_LEN 120 +#define UBCTL_IO_DIE_INFO_LEN 24 + #define UBCTL_PORT_INFO_LEN 56 #define UBCTL_UBOMMU_LEN 56 diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index cde0a09b85e2..5debf2f14134 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -26,6 +26,8 @@ dev_info(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ ##__VA_ARGS__) +#define UBCTL_GET_PHY_ADDR(high, low) ((((u64)(high)) << 32) | (low)) + struct ubctl_dev { struct fwctl_device fwctl; DECLARE_KFIFO_PTR(ioctl_fifo, unsigned long); diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 411d8849d43b..e2d412212102 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -73,12 +73,15 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_QUERY_QOS = 0x0051, + UTOOL_CMD_QUERY_SCC_VERSION = 0x0061, + UTOOL_CMD_QUERY_SCC_LOG = 0x0062, UTOOL_CMD_QUERY_SCC_DEBUG_EN = 0x0063, UTOOL_CMD_CONF_SCC_DEBUG_EN = 0x0064, UTOOL_CMD_QUERY_QUEUE = 0x0073, UTOOL_CMD_QUERY_PORT_INFO = 0x0081, + UTOOL_CMD_QUERY_IO_DIE_PORT_INFO = 0x0082, UTOOL_CMD_QUERY_UBOMMU = 0x0091, @@ -109,4 +112,8 @@ struct fwctl_pkt_in_port { __u32 port_id; }; +struct fwctl_pkt_in_index { + __u32 index; +}; + #endif -- Gitee From 351519ea00d4313210bc103b93c109a14c4dd885 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 21 Aug 2025 19:54:00 +0800 Subject: [PATCH 037/243] ub: ub_fwctl: query the MSG queue information and entry details within UB. commit 7ed154d74ca3de460e8ba97cc133c90501ae9d81 openEuler Query the MSG(message) queue information and entry details within UB 1. The dump corresponds to the physical registers under MSGQ, including queue pointer, queue depth, status, interrupt status, and other registers. 2. The dump corresponds to a specified entry of MSGQ, where the entry idx falls within the queue depth range. The dump covers the complete entries of SQE and CQE. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd.c | 305 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 2 + drivers/fwctl/ub/ub_common.h | 2 + include/uapi/fwctl/ub_fwctl.h | 2 + 4 files changed, 311 insertions(+) diff --git a/drivers/fwctl/ub/ub_cmd.c b/drivers/fwctl/ub/ub_cmd.c index da37575bb36f..86fae2dfd82c 100644 --- a/drivers/fwctl/ub/ub_cmd.c +++ b/drivers/fwctl/ub/ub_cmd.c @@ -3,9 +3,12 @@ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ +#include + #include "ub_cmdq.h" #include "ub_cmd.h" +#define UBCTL_CQE_SIZE 16 #define UBCTL_SCC_SZ_1M 0x100000 struct ubctl_query_trace { @@ -23,6 +26,69 @@ struct ubctl_scc_data { u32 rsv[3]; }; +struct ubctl_msgq_to_user { + u32 sq_pi; + u32 sq_ci; + u32 sq_dep; + u32 sq_status; + u32 sq_int_mask; + u32 sq_int_status; + u32 sq_int_ro; + + u32 rq_pi; + u32 rq_ci; + u32 rq_dep; + u32 rq_entry_block_size; + u32 rq_status; + + u32 cq_pi; + u32 cq_ci; + u32 cq_dep; + u32 cq_status; + u32 cq_int_mask; + u32 cq_int_status; + u32 cq_int_ro; + + u32 rsvd[5]; +}; + +struct ubctl_msgq { + u32 sq_base_addr_low; + u32 sq_base_addr_high; + u32 sq_pi; + u32 sq_ci; + u32 sq_dep; + u32 sq_status; + u32 sq_int_mask; + u32 sq_int_status; + u32 sq_int_ro; + + u32 rq_base_addr_low; + u32 rq_base_addr_high; + u32 rq_pi; + u32 rq_ci; + u32 rq_dep; + u32 rq_entry_block_size; + u32 rq_status; + + u32 cq_base_addr_low; + u32 cq_base_addr_high; + u32 cq_pi; + u32 cq_ci; + u32 cq_dep; + u32 cq_status; + u32 cq_int_mask; + u32 cq_int_status; + u32 cq_int_ro; + + u32 resv[5]; +}; + +struct ubctl_msgq_phy_addr { + u64 sq_entry_phy_addr; + u64 cq_entry_phy_addr; +}; + static int ubctl_trace_data_deal(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_cmd *cmd, u32 out_len, u32 offset) @@ -347,6 +413,240 @@ static int ubctl_query_iodie_info_data(struct ubctl_dev *ucdev, return ubctl_query_port_infos(ucdev, query_cmd_param, query_func, port_bitmap); } +static int ubctl_msgq_que_data_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset) +{ + struct ubctl_msgq *msgq_que_info = (struct ubctl_msgq *)cmd->out_data; + struct ubctl_msgq_to_user *user_msgq_info = NULL; + u32 msgq_que_size = query_cmd_param->out_len; + + if (cmd->out_len != out_len || + msgq_que_size != sizeof(struct ubctl_msgq_to_user)) + return -EINVAL; + + user_msgq_info = (struct ubctl_msgq_to_user *)(query_cmd_param->out->data); + + user_msgq_info->sq_pi = msgq_que_info->sq_pi; + user_msgq_info->sq_ci = msgq_que_info->sq_ci; + user_msgq_info->sq_dep = msgq_que_info->sq_dep; + user_msgq_info->sq_status = msgq_que_info->sq_status; + user_msgq_info->sq_int_mask = msgq_que_info->sq_int_mask; + user_msgq_info->sq_int_status = msgq_que_info->sq_int_status; + user_msgq_info->sq_int_ro = msgq_que_info->sq_int_ro; + + user_msgq_info->rq_pi = msgq_que_info->rq_pi; + user_msgq_info->rq_ci = msgq_que_info->rq_ci; + user_msgq_info->rq_dep = msgq_que_info->rq_dep; + user_msgq_info->rq_entry_block_size = msgq_que_info->rq_entry_block_size; + user_msgq_info->rq_status = msgq_que_info->rq_status; + + user_msgq_info->cq_pi = msgq_que_info->cq_pi; + user_msgq_info->cq_ci = msgq_que_info->cq_ci; + user_msgq_info->cq_dep = msgq_que_info->cq_dep; + user_msgq_info->cq_status = msgq_que_info->cq_status; + user_msgq_info->cq_int_mask = msgq_que_info->cq_int_mask; + user_msgq_info->cq_int_status = msgq_que_info->cq_int_status; + user_msgq_info->cq_int_ro = msgq_que_info->cq_int_ro; + + query_cmd_param->out->data_size = msgq_que_size; + + return 0; +} + +static int ubctl_msgq_is_dump(void __iomem *entry_addr) +{ +#define UBCTL_MSGQ_PROTOCOL_OPCODE 5 +#define UBCTL_MSGQ_OPCODE_START 9 +#define UBCTL_MSGQ_OPCODE_END 11 + + u32 first_data = readl(entry_addr); + u32 protocol_op_code = 0; + u32 task_type = 0; + + protocol_op_code = UBCTL_EXTRACT_BITS(first_data, + UBCTL_MSGQ_OPCODE_START, + UBCTL_MSGQ_OPCODE_END); + task_type = UBCTL_EXTRACT_BITS(first_data, 0, 1); + if (task_type == 0 && protocol_op_code == UBCTL_MSGQ_PROTOCOL_OPCODE) + return -EINVAL; + + return 0; +} + +static int ubctl_msgq_entry_move_data(struct ubctl_query_cmd_param *query_cmd_param, + u32 offset, u32 block_size, + void __iomem *entry_addr) +{ + u32 msgq_entry_data_size = block_size + offset * sizeof(u32); + u32 *data_offset = query_cmd_param->out->data + offset; + u32 i; + + if (msgq_entry_data_size > query_cmd_param->out_len) + return -EINVAL; + + for (i = 0; i < block_size / sizeof(u32); i++) + data_offset[i] = readl(entry_addr + i); + + return 0; +} + +static int ubctl_msgq_check_index(struct ubctl_dev *ucdev, u32 entry_index, + struct ubctl_msgq *entry_info) +{ + if (entry_index >= entry_info->sq_dep || + entry_index >= entry_info->cq_dep) { + ubctl_err(ucdev, "index is illegal, index = %u.\n", entry_index); + return -EINVAL; + } + + return 0; +} + +static int ubctl_msgq_all_get_phy_addr(struct ubctl_dev *ucdev, u32 entry_index, + struct ubctl_msgq_phy_addr *entry_phy_addr, + struct ubctl_msgq *entry_info) +{ +#define UBCTL_SQE_SIZE 16 + + u64 base_addr; + int ret; + + ret = ubctl_msgq_check_index(ucdev, entry_index, entry_info); + if (ret) + return ret; + + base_addr = UBCTL_GET_PHY_ADDR(entry_info->sq_base_addr_high, + entry_info->sq_base_addr_low); + if (!base_addr) { + ubctl_err(ucdev, "sqe msgq not initialized.\n"); + return -EINVAL; + } + + entry_phy_addr->sq_entry_phy_addr = base_addr + + entry_index * UBCTL_SQE_SIZE; + + base_addr = UBCTL_GET_PHY_ADDR(entry_info->cq_base_addr_high, + entry_info->cq_base_addr_low); + if (!base_addr) { + ubctl_err(ucdev, "cqe msgq not initialized.\n"); + return -EINVAL; + } + + entry_phy_addr->cq_entry_phy_addr = base_addr + + entry_index * UBCTL_CQE_SIZE; + + return 0; +} + +static int ubctl_msgq_sq_entry_data_deal(struct ubctl_dev *ucdev, + u64 sq_entry_phy_addr, + struct ubctl_query_cmd_param *query_cmd_param) +{ +#define UBCTL_SQE_TO_USER_SIZE 8 + + void __iomem *sq_entry_addr; + int ret = 0; + + sq_entry_addr = memremap(sq_entry_phy_addr, UBCTL_SQE_TO_USER_SIZE, MEMREMAP_WB); + if (!sq_entry_addr) + return -EFAULT; + + ret = ubctl_msgq_is_dump(sq_entry_addr); + if (ret) { + ubctl_err(ucdev, "this entry cannot be dumped, sqe is SPDM verified msg.\n"); + goto err_exec; + } + + ret = ubctl_msgq_entry_move_data(query_cmd_param, 0, + UBCTL_SQE_TO_USER_SIZE, sq_entry_addr); + if (ret) + ubctl_err(ucdev, "move sqe data failed.\n"); + +err_exec: + memunmap(sq_entry_addr); + return ret; +} + +static int ubctl_msgq_cq_entry_data_deal(struct ubctl_dev *ucdev, + u64 cq_entry_phy_addr, + struct ubctl_query_cmd_param *query_cmd_param) +{ +#define UBCTL_CQE_OFFSET 2 + + void __iomem *cq_entry_addr; + int ret = 0; + + cq_entry_addr = memremap(cq_entry_phy_addr, UBCTL_CQE_SIZE, MEMREMAP_WB); + if (!cq_entry_addr) + return -EFAULT; + + ret = ubctl_msgq_is_dump(cq_entry_addr); + if (ret) { + ubctl_err(ucdev, "this entry cannot be dumped, cqe is SPDM verified msg.\n"); + goto err_exec; + } + + ret = ubctl_msgq_entry_move_data(query_cmd_param, UBCTL_CQE_OFFSET, + UBCTL_CQE_SIZE, cq_entry_addr); + if (ret) + ubctl_err(ucdev, "move cqe data failed.\n"); + +err_exec: + memunmap(cq_entry_addr); + return ret; +} + +static int ubctl_msgq_entry_data_deal(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_cmd *cmd, u32 out_len, u32 offset) +{ + struct ubctl_msgq *entry_info = (struct ubctl_msgq *)cmd->out_data; + u32 msgq_entry_max_len = query_cmd_param->out_len; + struct ubctl_msgq_phy_addr entry_phy_addr = {}; + u32 entry_index = 0; + int ret = 0; + + if (query_cmd_param->in->data_size != sizeof(struct fwctl_pkt_in_index)) { + ubctl_err(ucdev, "user data of msgq is invalid.\n"); + return -EINVAL; + } + entry_index = ((struct fwctl_pkt_in_index *)query_cmd_param->in->data)->index; + + ret = ubctl_msgq_all_get_phy_addr(ucdev, entry_index, &entry_phy_addr, + entry_info); + if (ret) + return ret; + + ret = ubctl_msgq_sq_entry_data_deal(ucdev, + entry_phy_addr.sq_entry_phy_addr, + query_cmd_param); + if (ret) + return ret; + + ret = ubctl_msgq_cq_entry_data_deal(ucdev, + entry_phy_addr.cq_entry_phy_addr, + query_cmd_param); + if (ret) + return ret; + + query_cmd_param->out->data_size = msgq_entry_max_len; + + return ret; +} + +static int ubctl_query_msgq_que_stats_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_MSGQ_DFX, UBCTL_MSGQ_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static struct ubctl_func_dispatch g_ubctl_query_func[] = { { UTOOL_CMD_QUERY_DL_LINK_TRACE, ubctl_query_dl_trace_data, ubctl_trace_data_deal }, @@ -354,6 +654,11 @@ static struct ubctl_func_dispatch g_ubctl_query_func[] = { { UTOOL_CMD_QUERY_SCC_VERSION, ubctl_query_scc_data, ubctl_scc_version_deal}, { UTOOL_CMD_QUERY_SCC_LOG, ubctl_query_scc_data, ubctl_scc_log_deal }, + { UTOOL_CMD_QUERY_MSGQ_QUE_STATS, ubctl_query_msgq_que_stats_data, + ubctl_msgq_que_data_deal }, + { UTOOL_CMD_QUERY_MSGQ_ENTRY, ubctl_query_msgq_que_stats_data, + ubctl_msgq_entry_data_deal }, + { UTOOL_CMD_QUERY_IO_DIE_PORT_INFO, ubctl_query_iodie_info_data, ubctl_query_data_deal }, diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index be46f852ca0d..2855cb8dff00 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -42,6 +42,7 @@ #define UBCTL_QUERY_SCC_DFX 0xA00E #define UBCTL_QUERY_SCC_DEBUG_DFX 0xA011 +#define UBCTL_QUERY_MSGQ_DFX 0xA00F #define UBCTL_QUERY_QUEUE_DFX 0xA01B #define UBCTL_QUERY_UBOMMU_DFX 0xA016 @@ -92,6 +93,7 @@ #define UBCTL_SCC_LEN 24 #define UBCTL_SCC_DEBUG_EN_LEN 24 +#define UBCTL_MSGQ_LEN 120 #define UBCTL_QUEUE_LEN 120 #define UBCTL_IO_DIE_INFO_LEN 24 diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index 5debf2f14134..ab6761ffaad8 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -27,6 +27,8 @@ ##__VA_ARGS__) #define UBCTL_GET_PHY_ADDR(high, low) ((((u64)(high)) << 32) | (low)) +#define UBCTL_EXTRACT_BITS(value, start, end) \ + (((value) >> (start)) & ((1UL << ((end) - (start) + 1)) - 1)) struct ubctl_dev { struct fwctl_device fwctl; diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index e2d412212102..139a413bf94a 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -78,6 +78,8 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_QUERY_SCC_DEBUG_EN = 0x0063, UTOOL_CMD_CONF_SCC_DEBUG_EN = 0x0064, + UTOOL_CMD_QUERY_MSGQ_QUE_STATS = 0x0071, + UTOOL_CMD_QUERY_MSGQ_ENTRY = 0x0072, UTOOL_CMD_QUERY_QUEUE = 0x0073, UTOOL_CMD_QUERY_PORT_INFO = 0x0081, -- Gitee From 2d14d90e5109419b8774ccb523d51cdebbf393d8 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Thu, 21 Aug 2025 20:14:11 +0800 Subject: [PATCH 038/243] ub: ub_fwctl: support ummu data processing commit d44bcbc048ceacc0fdd3a4fb5bcb3b808c395582 openEuler ub_fwctl add ummu(UB Memory Management Unit) module cmd, support query all ummu register and support query or config sync_timeout_open register. Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/ub_cmd.c | 428 ++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 126 ++++++++++ include/uapi/fwctl/ub_fwctl.h | 9 + 3 files changed, 563 insertions(+) diff --git a/drivers/fwctl/ub/ub_cmd.c b/drivers/fwctl/ub/ub_cmd.c index 86fae2dfd82c..5b3895107b31 100644 --- a/drivers/fwctl/ub/ub_cmd.c +++ b/drivers/fwctl/ub/ub_cmd.c @@ -4,6 +4,7 @@ */ #include +#include #include "ub_cmdq.h" #include "ub_cmd.h" @@ -11,6 +12,133 @@ #define UBCTL_CQE_SIZE 16 #define UBCTL_SCC_SZ_1M 0x100000 +static u32 g_ubctl_ummu_reg_addr[] = { + // KCMD + UBCTL_UMMU_SWIF_KCMDQ_DFX_KCMD_STATUS, + UBCTL_UMMU_SWIF_KCMDQ_DFX_KCMD_ERR_STATUS, + // CMD_CTRL + UBCTL_UMMU_SWIF_KCMDQ_DFX_SNP_ERR_CNT, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_0, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_1, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_2, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_3, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_4, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_5, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_6, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_7, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_8, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_9, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_10, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_11, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_12, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_13, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_14, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_15, + UBCTL_UMMU_SWIF_KCMDQ_DFX_SNP_STATUS, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_CTRL_STATUS1, + UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_CTRL_STATUS2, + UBCTL_UMMU_SYNC_TIMEOUT_INFO, + UBCTL_UMMU_DVM_RECEIVE_REQ_CNT, + UBCTL_UMMU_DVM_SEND_REQ_CNT, + UBCTL_UMMU_DVM_REQ_INFO0, + UBCTL_UMMU_DVM_REQ_INFO1, + // UCMD + UBCTL_UMMU_SWIF_UMCMD_DFX0, + UBCTL_UMMU_SWIF_UMCMD_DFX1, + UBCTL_UMMU_SWIF_UMCMD_DFX2, + UBCTL_UMMU_SWIF_UMCMD_DFX3, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_0, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_1, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_2, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_3, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_4, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_5, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_6, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX1, + UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX2, + UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX1, + UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX2, + UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX3, + UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX4, + UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX5, + UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX6, + // EVENT + UBCTL_UMMU_SWIF_EVENTQ_DFX_DROP_CNT, + UBCTL_UMMU_GLB_INT_EN, + UBCTL_UMMU_PMCG_INT_EN, + UBCTL_UMMU_INT_MASK, + UBCTL_UMMU_CTRL1, + UBCTL_UMMU_SPEC_DEF_DFX, + UBCTL_UMMU_TECT_BASE_CFG, + UBCTL_UMMU_ERR_STATUS_0, + UBCTL_UMMU_ROOT_GPF_FAR_L, + UBCTL_UMMU_ROOT_GPF_FAR_H, + UBCTL_UMMU_EVENT_QUE_PI, + UBCTL_UMMU_EVENT_QUE_CI, + // UBIF + UBCTL_UMMU_UBIF_DFX0, + UBCTL_UMMU_UBIF_DFX1, + UBCTL_UMMU_UBIF_DSTEID_DFX, + UBCTL_UMMU_UBIF_SYNC_DFX, + UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX0, + UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX1, + UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX2, + UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX3, + UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX4, + // TBU + UBCTL_UMMU_TBU_TLB_LKUP_PROC, + UBCTL_UMMU_TBU_TLB_STAT, + UBCTL_UMMU_TBU_TLB_FAULT_CNT, + UBCTL_UMMU_TBU_PLB_LKUP_PROC, + UBCTL_UMMU_TBU_PLB_STAT, + UBCTL_UMMU_TBU_PLB_FAULT_CNT, + UBCTL_UMMU_TBU_INVLD_MG_INFO, + UBCTL_UMMU_TBU_RAB_STAT, + UBCTL_UMMU_TBU_CNT, + UBCTL_UMMU_DFX_TBU_PERM_ERR_CNT, + UBCTL_UMMU_TBU_DFX0, + UBCTL_UMMU_TBU_DFX1, + UBCTL_UMMU_TBU_RAB_ENTRY_INFO_0_7_15, + // TCU + UBCTL_UMMU_TCU_PTW_QUEUE_STAT_0_47, + UBCTL_UMMU_TCU_PPTW_QUEUE_STAT_0_39, + // CFG + UBCTL_UMMU_DFX_ECC_MONITOR_0, + UBCTL_UMMU_DFX_ECC_MONITOR_1, + UBCTL_UMMU_CFG_DFX_CFGBUS_STATUS, + // GPC + UBCTL_UMMU_GPC_QUEUE_STAT_0_15, + // SKY + UBCTL_UMMU_SKY_QUEUE_STAT3_SP_0_63, + // MCMD + UBCTL_UMMU_MCMD_QUE_PI_0, + UBCTL_UMMU_MCMD_QUE_PI_1, + UBCTL_UMMU_MCMD_QUE_PI_2, + UBCTL_UMMU_MCMD_QUE_PI_3, + UBCTL_UMMU_MCMD_QUE_PI_4, + UBCTL_UMMU_MCMD_QUE_PI_5, + UBCTL_UMMU_MCMD_QUE_PI_6, + UBCTL_UMMU_MCMD_QUE_PI_7, + UBCTL_UMMU_MCMD_QUE_CI_0, + UBCTL_UMMU_MCMD_QUE_CI_1, + UBCTL_UMMU_MCMD_QUE_CI_2, + UBCTL_UMMU_MCMD_QUE_CI_3, + UBCTL_UMMU_MCMD_QUE_CI_4, + UBCTL_UMMU_MCMD_QUE_CI_5, + UBCTL_UMMU_MCMD_QUE_CI_6, + UBCTL_UMMU_MCMD_QUE_CI_7, + // UMMU_EN + UBCTL_UMMU_CTRL0, + // OTHER + UBCTL_UMMU_SYNC_TIMEOUT_OPEN, +}; + +struct ubctl_ummu_relation { + u32 reg_addr; + u32 reg_config_addr; + u32 reg_count; +}; + struct ubctl_query_trace { u32 port_id; u32 index; @@ -647,6 +775,302 @@ static int ubctl_query_msgq_que_stats_data(struct ubctl_dev *ucdev, query_dp, ARRAY_SIZE(query_dp)); } +static int compare_resources(const void *a, const void *b) +{ + const struct resource *ra = *(const struct resource **)a; + const struct resource *rb = *(const struct resource **)b; + + if (ra->start < rb->start) + return -1; + if (ra->start > rb->start) + return 1; + return 0; +} + +static struct resource *ubctl_find_and_sort_resources(struct ubctl_dev *ucdev, + struct resource *root, + const char *name_substr, + u32 ummu_id) +{ +#define UBCL_MAX_UMMU_NUM 32U + + struct resource *entry_arr[UBCL_MAX_UMMU_NUM] = {}; + struct resource *p; + u32 count = 0; + + /* + * To traverse the UMMU memory subtree, only need to traverse the child + * subtree of the root node. + */ + for (p = root->child; p; p = p->sibling) { + if (!p->name || !strstr(p->name, name_substr)) + continue; + if (count >= UBCL_MAX_UMMU_NUM) { + ubctl_err(ucdev, "ummu resources is more than max num = %u.\n", + UBCL_MAX_UMMU_NUM); + return NULL; + } + entry_arr[count] = p; + count++; + } + + if (ummu_id >= count) { + ubctl_err(ucdev, "ummuid = %u out of range, current count = %u\n", + ummu_id, count); + return NULL; + } + + sort(entry_arr, count, sizeof(struct resource *), compare_resources, NULL); + + return entry_arr[ummu_id]; +} + +static inline u32 ubctl_ummu_get_register_offset(u32 index) +{ + return g_ubctl_ummu_reg_addr[index] - UBCTL_UMMU_REGISTER_BASE; +} + +static inline u32 ubctl_ummu_get_reg_count(void) +{ +#define UBCTL_UMMU_REPEAT_REG_TYPE_COUNT 5U + + return ARRAY_SIZE(g_ubctl_ummu_reg_addr) + UBCTL_UMMU_GPC_QUEUE_COUNT + + UBCTL_UMMU_SKY_QUEUE_COUNT + UBCTL_UMMU_TCU_PTW_QUEUE_COUNT + + UBCTL_UMMU_TCU_PPTW_QUEUE_COUNT + UBCTL_UMMU_ENTRY_NUM * + UBCTL_UMMU_BANK_NUM - UBCTL_UMMU_REPEAT_REG_TYPE_COUNT; +} + +struct ubctl_reg_pro_cmd { + struct ubctl_dev *ucdev; + u32 reg_index; + void __iomem *map_addr; + u32 *ummu_data; + u32 map_length; + u32 *index_offset; +}; + +static int ubctl_ummu_normal_read(struct ubctl_reg_pro_cmd *cmd) +{ + u32 ummu_reg_cnt = ubctl_ummu_get_reg_count(); + u32 reg_addr_offset; + + reg_addr_offset = ubctl_ummu_get_register_offset(cmd->reg_index); + if ((reg_addr_offset >= cmd->map_length) || (*cmd->index_offset >= ummu_reg_cnt)) { + ubctl_err(cmd->ucdev, "ummu reg offset is bigger than map length, index=%u, reg offset=%u, map length=%u.\n", + *cmd->index_offset, reg_addr_offset, cmd->map_length); + return -EFAULT; + } + cmd->ummu_data[*cmd->index_offset] = readl(cmd->map_addr + reg_addr_offset); + (*cmd->index_offset)++; + + return 0; +} + +static int ubctl_ummu_process_repeat_reg(struct ubctl_reg_pro_cmd *cmd) +{ + static struct ubctl_ummu_relation ummu_relation[] = { + { UBCTL_UMMU_GPC_QUEUE_STAT_0_15, UBCTL_UMMU_GPC_QUEUE_POINTER, + UBCTL_UMMU_GPC_QUEUE_COUNT }, + { UBCTL_UMMU_SKY_QUEUE_STAT3_SP_0_63, UBCTL_UMMU_SKY_QUEUE_POINTER_SP, + UBCTL_UMMU_SKY_QUEUE_COUNT }, + { UBCTL_UMMU_TCU_PTW_QUEUE_STAT_0_47, UBCTL_UMMU_TCU_PTW_QUEUE_POINTER, + UBCTL_UMMU_TCU_PTW_QUEUE_COUNT }, + { UBCTL_UMMU_TCU_PPTW_QUEUE_STAT_0_39, UBCTL_UMMU_TCU_PPTW_QUEUE_POINTER, + UBCTL_UMMU_TCU_PPTW_QUEUE_COUNT } + }; + + u32 read_reg_offset, set_reg_offset, write_count, i, j; + u32 ummu_reg_cnt = ubctl_ummu_get_reg_count(); + + for (i = 0; i < ARRAY_SIZE(ummu_relation); i++) { + if (ummu_relation[i].reg_addr != g_ubctl_ummu_reg_addr[cmd->reg_index]) + continue; + write_count = ummu_relation[i].reg_count; + set_reg_offset = ummu_relation[i].reg_config_addr - + UBCTL_UMMU_REGISTER_BASE; + read_reg_offset = ummu_relation[i].reg_addr - + UBCTL_UMMU_REGISTER_BASE; + if ((set_reg_offset >= cmd->map_length) || + (read_reg_offset >= cmd->map_length)) { + ubctl_err(cmd->ucdev, "ummu set or read reg offset is bigger than map length, set offset=%u, read offset=%u, map length=%u.\n", + set_reg_offset, read_reg_offset, cmd->map_length); + return -EFAULT; + } + + for (j = 0; j < write_count; j++, (*cmd->index_offset)++) { + writel(j, cmd->map_addr + set_reg_offset); + if (*cmd->index_offset >= ummu_reg_cnt) { + ubctl_err(cmd->ucdev, "index offset is bigger than ummu reg count, index offset=%u, ummu reg count=%u.\n", + *cmd->index_offset, ummu_reg_cnt); + return -EFAULT; + } + cmd->ummu_data[*cmd->index_offset] = readl(cmd->map_addr + + read_reg_offset); + } + return 0; + } + + return ubctl_ummu_normal_read(cmd); +} + +static int ubctl_ummu_process_reg(struct ubctl_reg_pro_cmd *cmd) +{ +#define UBCTL_TBU_MASK 0xFFFFFC00U +#define UBCTL_BANK_OFFSET 6 + + u32 read_reg_offset, set_reg_offset, origin_value, value, i, j; + u32 ummu_reg_cnt = ubctl_ummu_get_reg_count(); + + if (g_ubctl_ummu_reg_addr[cmd->reg_index] != UBCTL_UMMU_TBU_RAB_ENTRY_INFO_0_7_15) + return ubctl_ummu_process_repeat_reg(cmd); + + set_reg_offset = UBCTL_UMMU_TBU_RAB_FUNC_EN - UBCTL_UMMU_REGISTER_BASE; + read_reg_offset = UBCTL_UMMU_TBU_RAB_ENTRY_INFO_0_7_15 - + UBCTL_UMMU_REGISTER_BASE; + if ((set_reg_offset >= cmd->map_length) || + (read_reg_offset >= cmd->map_length)) { + ubctl_err(cmd->ucdev, "ummu set or read reg offset is bigger than map length, set offset=%u, read offset=%u, map length=%u.\n", + set_reg_offset, read_reg_offset, cmd->map_length); + return -EFAULT; + } + + origin_value = readl(cmd->map_addr + set_reg_offset); + origin_value &= UBCTL_TBU_MASK; + for (i = 0; i < UBCTL_UMMU_BANK_NUM; i++) { + for (j = 0; j < UBCTL_UMMU_ENTRY_NUM; j++, (*cmd->index_offset)++) { + value = (i << UBCTL_BANK_OFFSET) | j | origin_value; + writel(value, cmd->map_addr + set_reg_offset); + if (*cmd->index_offset >= ummu_reg_cnt) { + ubctl_err(cmd->ucdev, "index offset is bigger than ummu reg count, index offset=%u, ummu reg count=%u.\n", + *cmd->index_offset, ummu_reg_cnt); + return -EFAULT; + } + cmd->ummu_data[*cmd->index_offset] = readl(cmd->map_addr + + read_reg_offset); + } + } + return 0; +} + +static int ubctl_ummu_copy_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + void __iomem *map_addr, u32 map_length) +{ + u32 ummu_array_cnt = ARRAY_SIZE(g_ubctl_ummu_reg_addr); + u32 ummu_reg_cnt = ubctl_ummu_get_reg_count(); + u32 *ummu_data = query_cmd_param->out->data; + u32 index_offset = 0; + int ret; + u32 i; + + struct ubctl_reg_pro_cmd reg_pro_cmd = { + .ucdev = ucdev, + .reg_index = 0, + .map_addr = map_addr, + .ummu_data = ummu_data, + .map_length = map_length, + .index_offset = &index_offset, + }; + + if (ummu_reg_cnt * sizeof(u32) > query_cmd_param->out_len) { + ubctl_err(ucdev, "ummu reg size is big than out len, reg sie=%lu, out len=%lu.\n", + ummu_reg_cnt * sizeof(u32), query_cmd_param->out_len); + return -EINVAL; + } + + for (i = 0; i < ummu_array_cnt; i++) { + reg_pro_cmd.reg_index = i; + ret = ubctl_ummu_process_reg(®_pro_cmd); + if (ret) { + ubctl_err(ucdev, "ummu process reg failed, ret=%d.\n", ret); + return ret; + } + } + query_cmd_param->out->data_size = ummu_reg_cnt * sizeof(u32); + + return 0; +} + +static int ubctl_ummu_proc_all_data(struct ubctl_dev *ucdev, struct resource *res, + struct ubctl_query_cmd_param *query_cmd_param) +{ + u32 map_length = UBCTL_UMMU_REGISTER_MAX_ADDR - UBCTL_UMMU_REGISTER_BASE; + void __iomem *vaddr; + int ret; + + vaddr = ioremap(res->start + UBCTL_UMMU_REGISTER_BASE, map_length); + if (!vaddr) { + ubctl_err(ucdev, "ioremap ummu reg base failed, map length = %u.\n", + map_length); + return -ENOMEM; + } + ret = ubctl_ummu_copy_data(ucdev, query_cmd_param, vaddr, map_length); + iounmap(vaddr); + + return ret; +} + +static int ubctl_ummu_proc_sync_data(struct resource *res, + struct ubctl_query_cmd_param *query_cmd_param, + struct fwctl_pkt_in_ummuid_value *ummu_data, + bool is_query) +{ + u32 *out_data = query_cmd_param->out->data; + u32 map_length = sizeof(u32); + void __iomem *vaddr; + + if (sizeof(u32) > query_cmd_param->out_len) + return -EINVAL; + + vaddr = ioremap(res->start + UBCTL_UMMU_SYNC_TIMEOUT_OPEN, map_length); + if (!vaddr) + return -ENOMEM; + + if (is_query) { + *out_data = readl(vaddr); + } else { + *out_data = ummu_data->value; + writel(*out_data, vaddr); + } + + query_cmd_param->out->data_size = sizeof(u32); + iounmap(vaddr); + + return 0; +} + +static int ubctl_ummu_process_data(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ +#define UMMU_NAME_STR "ummu." + + struct fwctl_pkt_in_ummuid_value *ummu_data; + struct resource *root = &iomem_resource; + struct resource *res; + + if (query_cmd_param->in->data_size != sizeof(*ummu_data)) { + ubctl_err(ucdev, "invalid ummuid value size = %u.\n", + query_cmd_param->in->data_size); + return -EINVAL; + } + + ummu_data = (struct fwctl_pkt_in_ummuid_value *)(query_cmd_param->in->data); + res = ubctl_find_and_sort_resources(ucdev, root, UMMU_NAME_STR, + ummu_data->ummu_id); + if (!res) + return -EINVAL; + + if (query_func->rpc_cmd == UTOOL_CMD_QUERY_UMMU_ALL) + return ubctl_ummu_proc_all_data(ucdev, res, query_cmd_param); + if (query_func->rpc_cmd == UTOOL_CMD_QUERY_UMMU_SYNC) + return ubctl_ummu_proc_sync_data(res, query_cmd_param, ummu_data, true); + if (query_func->rpc_cmd == UTOOL_CMD_CONFIG_UMMU_SYNC) + return ubctl_ummu_proc_sync_data(res, query_cmd_param, ummu_data, false); + + return -EINVAL; +} + static struct ubctl_func_dispatch g_ubctl_query_func[] = { { UTOOL_CMD_QUERY_DL_LINK_TRACE, ubctl_query_dl_trace_data, ubctl_trace_data_deal }, @@ -662,6 +1086,10 @@ static struct ubctl_func_dispatch g_ubctl_query_func[] = { { UTOOL_CMD_QUERY_IO_DIE_PORT_INFO, ubctl_query_iodie_info_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_UMMU_ALL, ubctl_ummu_process_data, NULL }, + { UTOOL_CMD_QUERY_UMMU_SYNC, ubctl_ummu_process_data, NULL }, + { UTOOL_CMD_CONFIG_UMMU_SYNC, ubctl_ummu_process_data, NULL }, + { UTOOL_CMD_QUERY_MAX, NULL, NULL } }; diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index 2855cb8dff00..a8a4e63c42e3 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -106,4 +106,130 @@ #define UBCTL_QUERY_DEBUG_EN 24 +#define UBCTL_UMMU_CTRL0 0x0030 +#define UBCTL_UMMU_CTRL1 0x0038 +#define UBCTL_UMMU_TECT_BASE_CFG 0x0078 +#define UBCTL_UMMU_MCMD_QUE_PI_0 0x0108 +#define UBCTL_UMMU_MCMD_QUE_CI_0 0x010C +#define UBCTL_UMMU_MCMD_QUE_PI_1 0x0118 +#define UBCTL_UMMU_MCMD_QUE_CI_1 0x011C +#define UBCTL_UMMU_MCMD_QUE_PI_2 0x0128 +#define UBCTL_UMMU_MCMD_QUE_CI_2 0x012C +#define UBCTL_UMMU_MCMD_QUE_PI_3 0x0138 +#define UBCTL_UMMU_MCMD_QUE_CI_3 0x013C +#define UBCTL_UMMU_MCMD_QUE_PI_4 0x0148 +#define UBCTL_UMMU_MCMD_QUE_CI_4 0x014C +#define UBCTL_UMMU_MCMD_QUE_PI_5 0x0158 +#define UBCTL_UMMU_MCMD_QUE_CI_5 0x015C +#define UBCTL_UMMU_MCMD_QUE_PI_6 0x0168 +#define UBCTL_UMMU_MCMD_QUE_CI_6 0x016C +#define UBCTL_UMMU_MCMD_QUE_PI_7 0x0178 +#define UBCTL_UMMU_MCMD_QUE_CI_7 0x017C +#define UBCTL_UMMU_EVENT_QUE_PI 0x1108 +#define UBCTL_UMMU_EVENT_QUE_CI 0x110C +#define UBCTL_UMMU_EVENT_QUE_USI_ADDR0 0x1110 +#define UBCTL_UMMU_EVENT_QUE_USI_ADDR1 0x1114 +#define UBCTL_UMMU_GLB_INT_EN 0x1130 +#define UBCTL_UMMU_GLB_ERR_INT_USI_ADDR0 0x1140 +#define UBCTL_UMMU_GLB_ERR_INT_USI_ADDR1 0x1144 +#define UBCTL_UMMU_ERR_STATUS_0 0x2010 +#define UBCTL_UMMU_INT_MASK 0x3404 +#define UBCTL_UMMU_SYNC_TIMEOUT_OPEN 0x3410 +#define UBCTL_UMMU_SYNC_TIMEOUT_INFO 0x3418 +#define UBCTL_UMMU_SKY_QUEUE_STAT3_SP_0_63 0x4558 +#define UBCTL_UMMU_DFX_ECC_MONITOR_0 0x4D18 +#define UBCTL_UMMU_DFX_ECC_MONITOR_1 0x4D1C +#define UBCTL_UMMU_SPEC_DEF_DFX 0x4D60 +#define UBCTL_UMMU_DVM_RECEIVE_REQ_CNT 0x4D70 +#define UBCTL_UMMU_DVM_SEND_REQ_CNT 0x4D74 +#define UBCTL_UMMU_DVM_REQ_INFO0 0x4D78 +#define UBCTL_UMMU_DVM_REQ_INFO1 0x4D7C +#define UBCTL_UMMU_PMCG_INT_EN 0x5018 +#define UBCTL_UMMU_CFG_DFX_CFGBUS_STATUS 0x6000 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_SNP_ERR_CNT 0x6200 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_0 0x6204 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_1 0x6208 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_2 0x620C +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_3 0x6210 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_4 0x6214 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_5 0x6218 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_6 0x621C +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_7 0x6220 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_8 0x6224 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_9 0x6228 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_10 0x622C +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_11 0x6230 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_12 0x6234 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_13 0x6238 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_14 0x623C +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_ENTRY_STATUS_15 0x6240 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_SNP_STATUS 0x6280 +#define UBCTL_UMMU_SWIF_EVENTQ_DFX_DROP_CNT 0x6284 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_CTRL_STATUS1 0x6288 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_CMD_CTRL_STATUS2 0x628C +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_KCMD_STATUS 0x6290 +#define UBCTL_UMMU_SWIF_KCMDQ_DFX_KCMD_ERR_STATUS 0x6294 +#define UBCTL_UMMU_SWIF_UMCMD_DFX0 0x6300 +#define UBCTL_UMMU_SWIF_UMCMD_DFX1 0x6304 +#define UBCTL_UMMU_SWIF_UMCMD_DFX2 0x6308 +#define UBCTL_UMMU_SWIF_UMCMD_DFX3 0x630C +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_0 0x6310 +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_1 0x6314 +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_2 0x6318 +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_3 0x631C +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_4 0x6320 +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_5 0x6324 +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX0_6 0x6328 +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX1 0x632C +#define UBCTL_UMMU_SWIF_UMCMD_RR_WIN_DFX2 0x6330 +#define UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX1 0x6334 +#define UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX2 0x6338 +#define UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX3 0x633C +#define UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX4 0x6340 +#define UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX5 0x6344 +#define UBCTL_UMMU_SWIF_UMCMD_CACHE_DFX6 0x6348 +#define UBCTL_UMMU_UBIF_DFX0 0x6400 +#define UBCTL_UMMU_UBIF_DFX1 0x6404 +#define UBCTL_UMMU_UBIF_DSTEID_DFX 0x640C +#define UBCTL_UMMU_UBIF_SYNC_DFX 0x6410 +#define UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX0 0x641C +#define UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX1 0x6420 +#define UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX2 0x6424 +#define UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX3 0x6428 +#define UBCTL_UMMU_UBIF_KV_CACHE_NS_NSE_MISMATCH_DFX4 0x642C +#define UBCTL_UMMU_TBU_TLB_LKUP_PROC 0x6600 +#define UBCTL_UMMU_TBU_TLB_STAT 0x6604 +#define UBCTL_UMMU_TBU_TLB_FAULT_CNT 0x6608 +#define UBCTL_UMMU_TBU_PLB_LKUP_PROC 0x660C +#define UBCTL_UMMU_TBU_PLB_STAT 0x6610 +#define UBCTL_UMMU_TBU_PLB_FAULT_CNT 0x6614 +#define UBCTL_UMMU_TBU_INVLD_MG_INFO 0x6618 +#define UBCTL_UMMU_TBU_RAB_STAT 0x661C +#define UBCTL_UMMU_TBU_RAB_ENTRY_INFO_0_7_15 0x6624 +#define UBCTL_UMMU_TBU_CNT 0x662C +#define UBCTL_UMMU_DFX_TBU_PERM_ERR_CNT 0x6634 +#define UBCTL_UMMU_TBU_DFX0 0x6638 +#define UBCTL_UMMU_TBU_DFX1 0x663C +#define UBCTL_UMMU_TCU_PTW_QUEUE_STAT_0_47 0x6804 +#define UBCTL_UMMU_TCU_PPTW_QUEUE_STAT_0_39 0x680C +#define UBCTL_UMMU_GPC_QUEUE_STAT_0_15 0x6814 +#define UBCTL_UMMU_ROOT_GPF_FAR_L 0x10028 +#define UBCTL_UMMU_ROOT_GPF_FAR_H 0x1002C + +#define UBCTL_UMMU_GPC_QUEUE_POINTER 0x6810 +#define UBCTL_UMMU_SKY_QUEUE_POINTER_SP 0x4540 +#define UBCTL_UMMU_TCU_PTW_QUEUE_POINTER 0x6800 +#define UBCTL_UMMU_TCU_PPTW_QUEUE_POINTER 0x6808 +#define UBCTL_UMMU_TBU_RAB_FUNC_EN 0x6620 + +#define UBCTL_UMMU_BANK_NUM 8 +#define UBCTL_UMMU_ENTRY_NUM 16 +#define UBCTL_UMMU_GPC_QUEUE_COUNT 16 +#define UBCTL_UMMU_TCU_PPTW_QUEUE_COUNT 40 +#define UBCTL_UMMU_TCU_PTW_QUEUE_COUNT 48 +#define UBCTL_UMMU_SKY_QUEUE_COUNT 64 + +#define UBCTL_UMMU_REGISTER_BASE 0 +#define UBCTL_UMMU_REGISTER_MAX_ADDR (UBCTL_UMMU_ROOT_GPF_FAR_H + 4U) + #endif diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 139a413bf94a..05d7be4d7f8f 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -87,6 +87,10 @@ enum ub_fwctl_cmdrpc_type { UTOOL_CMD_QUERY_UBOMMU = 0x0091, + UTOOL_CMD_QUERY_UMMU_ALL = 0x00A1, + UTOOL_CMD_QUERY_UMMU_SYNC = 0x00A2, + UTOOL_CMD_CONFIG_UMMU_SYNC = 0x00A3, + UTOOL_CMD_QUERY_ECC_2B = 0x00B1, UTOOL_CMD_QUERY_LOOPBACK = 0x00D1, @@ -118,4 +122,9 @@ struct fwctl_pkt_in_index { __u32 index; }; +struct fwctl_pkt_in_ummuid_value { + __u32 ummu_id; + __u32 value; +}; + #endif -- Gitee From 2f3e214d24f1c6c96f01011c2d2493fd179dd038 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Wed, 19 Nov 2025 17:11:45 +0800 Subject: [PATCH 039/243] fwctl:Change the CONFIG_FWCTL in the config options to m commit 730e6c76af4ce310e9b3469b85a05b036bb61713 openEuler Change the CONFIG_FWCTL in the config optionsto m Fixes: aabc3d653349 ("ub: ub_fwctl: Add the ub_fwctl driver and its basic features.") Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- arch/arm64/configs/tencent.config | 2 +- arch/x86/configs/tencent.config | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index d122f864bc43..9601389859b6 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1865,7 +1865,7 @@ CONFIG_VFIO_DEVICE_CDEV=y # end of IOMMUFD # fwctl -CONFIG_FWCTL=y +CONFIG_FWCTL=m # UB_FWCTL CONFIG_FWCTL_UB=m diff --git a/arch/x86/configs/tencent.config b/arch/x86/configs/tencent.config index 126020c405a7..2444a388a8c1 100644 --- a/arch/x86/configs/tencent.config +++ b/arch/x86/configs/tencent.config @@ -2028,4 +2028,4 @@ CONFIG_TEST_BPF=m CONFIG_EXT4_FS=y # fwctl -CONFIG_FWCTL=y +CONFIG_FWCTL=m -- Gitee From 959cf5420cdb2fa32efb1cf90afa4be9ad49d7b2 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Wed, 19 Nov 2025 17:26:41 +0800 Subject: [PATCH 040/243] ub: ub_fwctl: Release rpc_out when kernel state return error commit d222718375d8c0deb1c74d3152c3662c0d49907d openEuler ub_fwctl encountered an error while executing in kernel mode and did not release rpc_out before returning to fwctl. Fix this issue now. Fixes: aabc3d653349 ("ub: ub_fwctl: Add the ub_fwctl driver and its basic features.") Signed-off-by: Jiaqi Cheng Signed-off-by: huwentao --- drivers/fwctl/ub/main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/fwctl/ub/main.c b/drivers/fwctl/ub/main.c index e96ccf5afa55..6b1f619dc0a4 100644 --- a/drivers/fwctl/ub/main.c +++ b/drivers/fwctl/ub/main.c @@ -147,6 +147,11 @@ static void *ubctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, ubctl_dbg(ucdev, "cmdif: opcode 0x%x retval %d\n", opcode, ret); + if (ret) { + kvfree(rpc_out); + return ERR_PTR(ret); + } + return rpc_out; } -- Gitee From 4fc9fd4158af64bce2683eb7859ba88618da404c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 31 Oct 2025 15:33:14 +0800 Subject: [PATCH 041/243] ub: udma: Support loading and unloading driver. commit 05c3cda9c33a875b228a3f70b9f18f4346b68461 openEuler This patch adds the function of loading and unloading the driver. In driver loading process, udma queries the required hardware information from ubase and stores it in the internal structure of the driver. At the same time, udma also provides ubcore with ops for querying device status and create rc table. This patch also adds the function of mailbox and software table for udma driver. Signed-off-by: Wei Qin Signed-off-by: zhaoweibo Signed-off-by: zhaolichang <943677312@qq.com> --- arch/arm64/configs/tencent.config | 3 + drivers/ub/Kconfig | 1 + drivers/ub/Makefile | 1 + drivers/ub/urma/hw/udma/Kconfig | 12 + drivers/ub/urma/hw/udma/Makefile | 6 + drivers/ub/urma/hw/udma/udma_cmd.c | 208 ++++++ drivers/ub/urma/hw/udma/udma_cmd.h | 241 +++++++ drivers/ub/urma/hw/udma/udma_common.c | 125 ++++ drivers/ub/urma/hw/udma/udma_common.h | 28 + drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 16 + drivers/ub/urma/hw/udma/udma_def.h | 97 +++ drivers/ub/urma/hw/udma/udma_dev.h | 147 ++++ drivers/ub/urma/hw/udma/udma_main.c | 892 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_rct.c | 207 ++++++ drivers/ub/urma/hw/udma/udma_rct.h | 57 ++ 15 files changed, 2041 insertions(+) create mode 100644 drivers/ub/urma/hw/udma/Kconfig create mode 100644 drivers/ub/urma/hw/udma/Makefile create mode 100644 drivers/ub/urma/hw/udma/udma_cmd.c create mode 100644 drivers/ub/urma/hw/udma/udma_cmd.h create mode 100644 drivers/ub/urma/hw/udma/udma_common.c create mode 100644 drivers/ub/urma/hw/udma/udma_common.h create mode 100644 drivers/ub/urma/hw/udma/udma_ctrlq_tp.h create mode 100644 drivers/ub/urma/hw/udma/udma_def.h create mode 100644 drivers/ub/urma/hw/udma/udma_dev.h create mode 100644 drivers/ub/urma/hw/udma/udma_main.c create mode 100644 drivers/ub/urma/hw/udma/udma_rct.c create mode 100644 drivers/ub/urma/hw/udma/udma_rct.h diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 9601389859b6..7ca1fa88f877 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1837,6 +1837,9 @@ CONFIG_UB_UNIC_DCB=y # UB CDMA driver CONFIG_UB_CDMA=m + +# UB UDMA driver +CONFIG_UB_UDMA=m # end of unified bus # UMMU diff --git a/drivers/ub/Kconfig b/drivers/ub/Kconfig index 6197483bd71e..5aaa3bcc014a 100644 --- a/drivers/ub/Kconfig +++ b/drivers/ub/Kconfig @@ -19,6 +19,7 @@ source "drivers/ub/ubase/Kconfig" source "drivers/ub/cdma/Kconfig" source "drivers/ub/obmm/Kconfig" source "drivers/ub/sentry/Kconfig" +source "drivers/ub/urma/hw/udma/Kconfig" config UB_URMA tristate "Unified Bus (UB) urma support" default m diff --git a/drivers/ub/Makefile b/drivers/ub/Makefile index 2a40689dafac..1725f006d197 100644 --- a/drivers/ub/Makefile +++ b/drivers/ub/Makefile @@ -5,5 +5,6 @@ obj-y += ubfi/ obj-$(CONFIG_UB_URMA) += urma/ obj-$(CONFIG_UB_UBASE) += ubase/ obj-$(CONFIG_UB_CDMA) += cdma/ +obj-$(CONFIG_UB_UDMA) += urma/hw/udma/ obj-y += obmm/ obj-$(CONFIG_UB_SENTRY) += sentry/ diff --git a/drivers/ub/urma/hw/udma/Kconfig b/drivers/ub/urma/hw/udma/Kconfig new file mode 100644 index 000000000000..fd5d27ef9813 --- /dev/null +++ b/drivers/ub/urma/hw/udma/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. + +menuconfig UB_UDMA + default n + tristate "UB UDMA Driver" + depends on UB_UBASE && UB_URMA && UB_UMMU_CORE + help + UDMA driver support for Hisilicon UBUS engine + in Hisilicon SoC. To compile this driver, + choose Y here: if UB_UDMA is m, this module + will be called udma. diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile new file mode 100644 index 000000000000..2cd71b916ec9 --- /dev/null +++ b/drivers/ub/urma/hw/udma/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0+ + +udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o \ + udma_rct.o + +obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_cmd.c b/drivers/ub/urma/hw/udma/udma_cmd.c new file mode 100644 index 000000000000..244646f86a42 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_cmd.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include "udma_cmd.h" + +bool debug_switch = true; + +int udma_cmd_init(struct udma_dev *udma_dev) +{ + sema_init(&udma_dev->mb_cmd.poll_sem, 1); + udma_dev->mb_cmd.pool = dma_pool_create("udma_cmd", udma_dev->dev, + UDMA_MAILBOX_SIZE, + UDMA_MAILBOX_SIZE, 0); + if (!udma_dev->mb_cmd.pool) { + dev_err(udma_dev->dev, "failed to dma_pool_create.\n"); + return -ENOMEM; + } + + init_rwsem(&udma_dev->mb_cmd.udma_mb_rwsem); + + return 0; +} + +void udma_cmd_cleanup(struct udma_dev *udma_dev) +{ + down_write(&udma_dev->mb_cmd.udma_mb_rwsem); + dma_pool_destroy(udma_dev->mb_cmd.pool); + up_write(&udma_dev->mb_cmd.udma_mb_rwsem); +} + +struct ubase_cmd_mailbox *udma_alloc_cmd_mailbox(struct udma_dev *dev) +{ + struct ubase_cmd_mailbox *mailbox; + + mailbox = kzalloc(sizeof(*mailbox), GFP_KERNEL); + if (!mailbox) + goto failed_alloc_mailbox; + + down_read(&dev->mb_cmd.udma_mb_rwsem); + mailbox->buf = dma_pool_zalloc(dev->mb_cmd.pool, GFP_KERNEL, + &mailbox->dma); + if (!mailbox->buf) { + dev_err(dev->dev, "failed to alloc buffer of mailbox.\n"); + goto failed_alloc_mailbox_buf; + } + + return mailbox; + +failed_alloc_mailbox_buf: + up_read(&dev->mb_cmd.udma_mb_rwsem); + kfree(mailbox); +failed_alloc_mailbox: + return NULL; +} + +void udma_free_cmd_mailbox(struct udma_dev *dev, + struct ubase_cmd_mailbox *mailbox) +{ + if (!mailbox) { + dev_err(dev->dev, "Invalid mailbox.\n"); + return; + } + + dma_pool_free(dev->mb_cmd.pool, mailbox->buf, mailbox->dma); + up_read(&dev->mb_cmd.udma_mb_rwsem); + kfree(mailbox); +} + +static bool udma_op_ignore_eagain(uint8_t op, void *buf) +{ + struct udma_mbx_op_match matches[] = { + { UDMA_CMD_CREATE_JFS_CONTEXT, false }, + { UDMA_CMD_MODIFY_JFS_CONTEXT, true }, + { UDMA_CMD_DESTROY_JFS_CONTEXT, true }, + { UDMA_CMD_QUERY_JFS_CONTEXT, true }, + { UDMA_CMD_CREATE_JFC_CONTEXT, false }, + { UDMA_CMD_MODIFY_JFC_CONTEXT, true }, + { UDMA_CMD_DESTROY_JFC_CONTEXT, true }, + { UDMA_CMD_QUERY_JFC_CONTEXT, true }, + { UDMA_CMD_CREATE_JFR_CONTEXT, false }, + { UDMA_CMD_MODIFY_JFR_CONTEXT, true }, + { UDMA_CMD_DESTROY_JFR_CONTEXT, true }, + { UDMA_CMD_QUERY_JFR_CONTEXT, true }, + { UDMA_CMD_QUERY_TP_CONTEXT, true }, + { UDMA_CMD_CREATE_JETTY_GROUP_CONTEXT, false }, + { UDMA_CMD_MODIFY_JETTY_GROUP_CONTEXT, true }, + { UDMA_CMD_DESTROY_JETTY_GROUP_CONTEXT, true }, + { UDMA_CMD_QUERY_JETTY_GROUP_CONTEXT, true }, + { UDMA_CMD_CREATE_RC_CONTEXT, false }, + { UDMA_CMD_MODIFY_RC_CONTEXT, true }, + { UDMA_CMD_DESTROY_RC_CONTEXT, true }, + { UDMA_CMD_QUERY_RC_CONTEXT, true }, + { UDMA_CMD_READ_SEID_UPI, true }, + }; + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(matches); i++) { + if (op == matches[i].op) + return matches[i].ignore_ret; + } + + return false; +} + +int udma_post_mbox(struct udma_dev *dev, struct ubase_cmd_mailbox *mailbox, + struct ubase_mbx_attr *attr) +{ + int ret; + + if (debug_switch) + dev_info_ratelimited(dev->dev, + "Send cmd mailbox, data: %08x %04x%04x.\n", + attr->tag, attr->op, attr->mbx_ue_id); + + ret = ubase_hw_upgrade_ctx_ex(dev->comdev.adev, attr, mailbox); + + return (ret == -EAGAIN && + udma_op_ignore_eagain(attr->op, mailbox->buf)) ? 0 : ret; +} + +int udma_config_ctx_buf_to_hw(struct udma_dev *udma_dev, + struct udma_buf *ctx_buf, + struct ubase_mbx_attr *attr) +{ + struct ubase_cmd_mailbox mailbox; + int ret; + + mailbox.dma = ctx_buf->addr; + ret = udma_post_mbox(udma_dev, &mailbox, attr); + if (ret) + dev_err(udma_dev->dev, + "failed to config ctx_buf to hw, ret = %d.\n", ret); + + return ret; +} + +int udma_cmd_query_hw_resource(struct udma_dev *udma_dev, void *out_addr) +{ + struct ubase_cmd_buf out = {}; + struct ubase_cmd_buf in = {}; + + udma_fill_buf(&in, UDMA_CMD_QUERY_UE_RES, true, 0, NULL); + udma_fill_buf(&out, UDMA_CMD_QUERY_UE_RES, true, + sizeof(struct udma_cmd_ue_resource), out_addr); + + return ubase_cmd_send_inout(udma_dev->comdev.adev, &in, &out); +} + +int post_mailbox_update_ctx(struct udma_dev *udma_dev, void *ctx, uint32_t size, + struct ubase_mbx_attr *attr) +{ + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(udma_dev); + if (!mailbox) { + dev_err(udma_dev->dev, + "failed to alloc mailbox for opcode 0x%x.\n", attr->op); + return -ENOMEM; + } + + if (ctx) + memcpy(mailbox->buf, ctx, size); + + ret = udma_post_mbox(udma_dev, mailbox, attr); + if (ret) + dev_err(udma_dev->dev, + "failed to post mailbox, opcode = 0x%x, ret = %d.\n", attr->op, + ret); + + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + +struct ubase_cmd_mailbox *udma_mailbox_query_ctx(struct udma_dev *udma_dev, + struct ubase_mbx_attr *attr) +{ + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(udma_dev); + if (!mailbox) { + dev_err(udma_dev->dev, + "failed to alloc mailbox query ctx, opcode = %u, id = %u.\n", + attr->op, attr->tag); + return NULL; + } + + ret = udma_post_mbox(udma_dev, mailbox, attr); + if (ret) { + dev_err(udma_dev->dev, + "failed to post mailbox query ctx, opcode = %u, id = %u, ret = %d.\n", + attr->op, attr->tag, ret); + udma_free_cmd_mailbox(udma_dev, mailbox); + return NULL; + } + + return mailbox; +} + +module_param(debug_switch, bool, 0444); +MODULE_PARM_DESC(debug_switch, "set debug print ON, default: true"); diff --git a/drivers/ub/urma/hw/udma/udma_cmd.h b/drivers/ub/urma/hw/udma/udma_cmd.h new file mode 100644 index 000000000000..3dd27765fb56 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_cmd.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_CMD_H__ +#define __UDMA_CMD_H__ + +#include +#include +#include +#include "udma_dev.h" + +extern bool debug_switch; + +#define UDMA_MAILBOX_SIZE 4096 + +#define SPEED_200G 200000 +#define SPEED_400G 400000 +#define SPEED_100G 100000 +#define SPEED_50G 50000 +#define SPEED_25G 25000 + +struct udma_ctrlq_eid_info { + uint32_t eid_idx; + union ubcore_eid eid; + uint32_t upi; +} __packed; + +enum udma_cmd_opcode_type { + UDMA_CMD_QUERY_UE_RES = 0x0002, + UDMA_CMD_QUERY_UE_INDEX = 0x241d, + UDMA_CMD_CFG_CONG_PARAM = 0x3003, + UDMA_CMD_CHANGE_ACTIVE_PORT = 0x3102, + UDMA_CMD_DEBUGFS_TP_INFO = 0x3110, + UDMA_CMD_DEBUGFS_TA_INFO = 0x4210, + UDMA_CMD_GET_CQE_AUX_INFO = 0x4213, + UDMA_CMD_GET_AE_AUX_INFO = 0x4214, + UDMA_CMD_QUERY_PORT_INFO = 0x6200, + UDMA_CMD_WQEBB_VA_INFO = 0xa01f, +}; + +struct udma_cmd { + uint32_t opcode; + void *in_buf; + uint32_t in_len; + void *out_buf; + uint32_t out_len; +}; + +enum { + /* JFS CMDS */ + UDMA_CMD_WRIET_JFS_CONTEXT_VA = 0x00, + UDMA_CMD_READ_JFS_CONTEXT_VA = 0x01, + UDMA_CMD_DESTROY_JFS_CONTEXT_VA = 0x02, + UDMA_CMD_CREATE_JFS_CONTEXT = 0x04, + UDMA_CMD_MODIFY_JFS_CONTEXT = 0x05, + UDMA_CMD_QUERY_JFS_CONTEXT = 0x06, + UDMA_CMD_DESTROY_JFS_CONTEXT = 0x07, + + /* RC CMDS */ + UDMA_CMD_WRITE_RC_CONTEXT_VA = 0x10, + UDMA_CMD_READ_RC_CONTEXT_VA = 0x11, + UDMA_CMD_DESTROY_RC_CONTEXT_VA = 0x12, + UDMA_CMD_CREATE_RC_CONTEXT = 0x14, + UDMA_CMD_MODIFY_RC_CONTEXT = 0x15, + UDMA_CMD_QUERY_RC_CONTEXT = 0X16, + UDMA_CMD_DESTROY_RC_CONTEXT = 0x17, + + /* JFC CMDS */ + UDMA_CMD_WRITE_JFC_CONTEXT_VA = 0x20, + UDMA_CMD_READ_JFC_CONTEXT_VA = 0x21, + UDMA_CMD_DESTROY_JFC_CONTEXT_VA = 0x22, + UDMA_CMD_CREATE_JFC_CONTEXT = 0x24, + UDMA_CMD_MODIFY_JFC_CONTEXT = 0x25, + UDMA_CMD_QUERY_JFC_CONTEXT = 0x26, + UDMA_CMD_DESTROY_JFC_CONTEXT = 0x27, + + /* CEQ CMDS */ + UDMA_CMD_CREATE_CEQ_CONTEXT = 0x44, + UDMA_CMD_MODIFY_CEQ_CONTEXT = 0x45, + UDMA_CMD_QUERY_CEQ_CONTEXT = 0x46, + UDMA_CMD_DESTROY_CEQ_CONTEXT = 0x47, + + /* JFR CMDS */ + UDMA_CMD_WRITE_JFR_CONTEXT_VA = 0x50, + UDMA_CMD_READ_JFR_CONTEXT_VA = 0x51, + UDMA_CMD_DESTROY_JFR_CONTEXT_VA = 0x52, + UDMA_CMD_CREATE_JFR_CONTEXT = 0x54, + UDMA_CMD_MODIFY_JFR_CONTEXT = 0x55, + UDMA_CMD_QUERY_JFR_CONTEXT = 0x56, + UDMA_CMD_DESTROY_JFR_CONTEXT = 0x57, + + /* JETTY CMDS */ + UDMA_CMD_WRITE_JETTY_GROUP_CONTEXT_VA = 0x60, + UDMA_CMD_READ_JETTY_GROUP_CONTEXT_VA = 0x61, + UDMA_CMD_DESTROY_JETTY_GROUP_CONTEXT_VA = 0x62, + UDMA_CMD_CREATE_JETTY_GROUP_CONTEXT = 0x64, + UDMA_CMD_MODIFY_JETTY_GROUP_CONTEXT = 0x65, + UDMA_CMD_QUERY_JETTY_GROUP_CONTEXT = 0x66, + UDMA_CMD_DESTROY_JETTY_GROUP_CONTEXT = 0x67, + + /* TP CMDS */ + UDMA_CMD_QUERY_TP_CONTEXT = 0x86, + + /* SEID_UPI CMDS */ + UDMA_CMD_READ_SEID_UPI = 0xb5, +}; + +struct udma_mbx_op_match { + uint32_t op; + bool ignore_ret; + uint32_t entry_size; +}; + +struct cap_info { + uint16_t ar_en : 1; + uint16_t jfc_per_wr : 1; + uint16_t stride_up : 1; + uint16_t load_store_op : 1; + uint16_t jfc_inline : 1; + uint16_t non_pin : 1; + uint16_t selective_retrans : 1; + uint16_t rsvd : 9; + uint16_t rsvd1; +}; + +struct udma_cmd_ue_resource { + /* BD0 */ + uint16_t jfs_num_shift : 4; + uint16_t jfr_num_shift : 4; + uint16_t jfc_num_shift : 4; + uint16_t jetty_num_shift : 4; + + uint16_t jetty_grp_num; + + uint16_t jfs_depth_shift : 4; + uint16_t jfr_depth_shift : 4; + uint16_t jfc_depth_shift : 4; + uint16_t cqe_size_shift : 4; + + uint16_t jfs_sge : 5; + uint16_t jfr_sge : 5; + uint16_t jfs_rsge : 6; + + uint16_t max_jfs_inline_sz; + uint16_t max_jfc_inline_sz; + uint32_t cap_info; + + uint16_t trans_mode : 5; + uint16_t ue_num : 8; + uint16_t virtualization : 1; + uint16_t dcqcn_sw_en : 1; + uint16_t rsvd0 : 1; + + uint16_t ue_cnt; + uint8_t ue_id; + uint8_t default_cong_alg; + uint8_t cons_ctrl_alg; + uint8_t cc_priority_cnt; + + /* BD1 */ + uint16_t src_addr_tbl_sz; + uint16_t src_addr_tbl_num; + uint16_t dest_addr_tbl_sz; + uint16_t dest_addr_tbl_num; + uint16_t seid_upi_tbl_sz; + uint16_t seid_upi_tbl_num; + uint16_t tpm_tbl_sz; + uint16_t tpm_tbl_num; + uint32_t tp_range; + uint8_t port_num; + uint8_t port_id; + uint8_t rsvd1[2]; + uint16_t rc_queue_num; + uint16_t rc_depth; + uint8_t rc_entry; + uint8_t rsvd2[3]; + + /* BD2 */ + uint16_t well_known_jetty_start; + uint16_t well_known_jetty_num; + uint16_t ccu_jetty_start; + uint16_t ccu_jetty_num; + uint16_t drv_jetty_start; + uint16_t drv_jetty_num; + uint16_t cache_lock_jetty_start; + uint16_t cache_lock_jetty_num; + uint16_t normal_jetty_start; + uint16_t normal_jetty_num; + uint16_t standard_jetty_start; + uint16_t standard_jetty_num; + uint32_t rsvd3[2]; + + /* BD3 */ + uint32_t max_write_size; + uint32_t max_read_size; + uint32_t max_cas_size; + uint32_t max_fetch_and_add_size; + uint32_t atomic_feat; + uint32_t rsvd4[3]; +}; + +struct udma_cmd_port_info { + uint32_t speed; + uint8_t rsv[10]; + uint8_t lanes; + uint8_t rsv2[9]; +}; + +struct udma_cmd_wqebb_va { + uint64_t va_start; + uint64_t va_size; + uint32_t die_num; + uint32_t ue_num; +}; + +static inline void udma_fill_buf(struct ubase_cmd_buf *buf, u16 opcode, + bool is_read, u32 data_size, void *data) +{ + buf->opcode = opcode; + buf->is_read = is_read; + buf->data_size = data_size; + buf->data = data; +} + +int udma_cmd_init(struct udma_dev *udma_dev); +void udma_cmd_cleanup(struct udma_dev *udma_dev); +struct ubase_cmd_mailbox *udma_alloc_cmd_mailbox(struct udma_dev *dev); +void udma_free_cmd_mailbox(struct udma_dev *dev, + struct ubase_cmd_mailbox *mailbox); +int udma_post_mbox(struct udma_dev *dev, struct ubase_cmd_mailbox *mailbox, + struct ubase_mbx_attr *attr); +int udma_cmd_query_hw_resource(struct udma_dev *udma_dev, void *out_addr); +int udma_config_ctx_buf_to_hw(struct udma_dev *udma_dev, + struct udma_buf *ctx_buf, + struct ubase_mbx_attr *attr); +int post_mailbox_update_ctx(struct udma_dev *udma_dev, void *ctx, uint32_t size, + struct ubase_mbx_attr *attr); +struct ubase_cmd_mailbox *udma_mailbox_query_ctx(struct udma_dev *udma_dev, + struct ubase_mbx_attr *attr); + +#endif /* __UDMA_CMD_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c new file mode 100644 index 000000000000..cb8e6b6f4e90 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "udma_dev.h" +#include "udma_cmd.h" +#include "udma_common.h" + +static void udma_init_ida_table(struct udma_ida *ida_table, uint32_t max, uint32_t min) +{ + ida_init(&ida_table->ida); + spin_lock_init(&ida_table->lock); + ida_table->max = max; + ida_table->min = min; + ida_table->next = min; +} + +void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min) +{ + udma_init_ida_table(&table->ida_table, max, min); + xa_init(&table->xa); +} + +void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex) +{ + xa_init(table); + mutex_init(udma_mutex); +} + +void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, + const char *table_name) +{ + if (!ida_is_empty(&table->ida_table.ida)) + dev_err(dev->dev, "IDA not empty in clean up %s table.\n", + table_name); + ida_destroy(&table->ida_table.ida); + + if (!xa_empty(&table->xa)) + dev_err(dev->dev, "%s not empty.\n", table_name); + xa_destroy(&table->xa); +} + +static void udma_clear_eid_table(struct udma_dev *udma_dev) +{ + struct udma_ctrlq_eid_info *eid_entry = NULL; + unsigned long index = 0; + eid_t ummu_eid = 0; + guid_t guid = {}; + + if (!xa_empty(&udma_dev->eid_table)) { + xa_for_each(&udma_dev->eid_table, index, eid_entry) { + xa_erase(&udma_dev->eid_table, index); + if (!udma_dev->is_ue) { + (void)memcpy(&ummu_eid, eid_entry->eid.raw, sizeof(ummu_eid)); + ummu_core_del_eid(&guid, ummu_eid, EID_NONE); + } + kfree(eid_entry); + eid_entry = NULL; + } + } +} + +void udma_destroy_eid_table(struct udma_dev *udma_dev) +{ + udma_clear_eid_table(udma_dev); + xa_destroy(&udma_dev->eid_table); + mutex_destroy(&udma_dev->eid_mutex); +} + +void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr) +{ + struct iova_slot *slot; + uint32_t npage; + size_t sizep; + int ret; + + slot = dma_alloc_iova(udma_dev->dev, memory_size, 0, addr, &sizep); + if (IS_ERR_OR_NULL(slot)) { + dev_err(udma_dev->dev, + "failed to dma alloc iova, size = %lu, ret = %ld.\n", + memory_size, PTR_ERR(slot)); + return NULL; + } + + npage = sizep >> PAGE_SHIFT; + ret = ummu_fill_pages(slot, *addr, npage); + if (ret) { + dev_err(udma_dev->dev, + "ummu fill pages failed, npage = %u, ret = %d", npage, ret); + dma_free_iova(slot); + return NULL; + } + + return (void *)slot; +} + +void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, + dma_addr_t addr) +{ + size_t aligned_memory_size; + struct iova_slot *slot; + uint32_t npage; + int ret; + + aligned_memory_size = PAGE_ALIGN(memory_size); + npage = aligned_memory_size >> PAGE_SHIFT; + slot = (struct iova_slot *)kva_or_slot; + ret = ummu_drain_pages(slot, addr, npage); + if (ret) + dev_err(udma_dev->dev, + "ummu drain pages failed, npage = %u, ret = %d.\n", + npage, ret); + + dma_free_iova(slot); +} diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h new file mode 100644 index 000000000000..4f843356c755 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_COMM_H__ +#define __UDMA_COMM_H__ + +#include +#include +#include "udma_dev.h" + +struct udma_umem_param { + struct ubcore_device *ub_dev; + uint64_t va; + uint64_t len; + union ubcore_umem_flag flag; + bool is_kernel; +}; + +void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); +void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); +void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, + const char *table_name); +void udma_destroy_eid_table(struct udma_dev *udma_dev); +void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); +void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, + dma_addr_t addr); + +#endif /* __UDMA_COMM_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h new file mode 100644 index 000000000000..93898a153a98 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_CTRLQ_TP_H__ +#define __UDMA_CTRLQ_TP_H__ + +#include "udma_common.h" + +#define UDMA_UE_NUM 64 + +struct udma_ue_idx_table { + uint32_t num; + uint8_t ue_idx[UDMA_UE_NUM]; +}; + +#endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h new file mode 100644 index 000000000000..14d747c3fb8f --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_DEF_H__ +#define __UDMA_DEF_H__ + +#include +#include +#include +#include + +enum { + UDMA_CAP_FEATURE_AR = BIT(0), + UDMA_CAP_FEATURE_JFC_INLINE = BIT(4), + UDMA_CAP_FEATURE_DIRECT_WQE = BIT(11), + UDMA_CAP_FEATURE_CONG_CTRL = BIT(16), + UDMA_CAP_FEATURE_REDUCE = BIT(17), + UDMA_CAP_FEATURE_UE_RX_CLOSE = BIT(18), + UDMA_CAP_FEATURE_RNR_RETRY = BIT(19), +}; + +struct udma_res { + uint32_t max_cnt; + uint32_t start_idx; + uint32_t next_idx; + uint32_t depth; +}; + +struct udma_tbl { + uint32_t max_cnt; + uint32_t size; +}; + +struct udma_caps { + unsigned long init_flag; + struct udma_res jfs; + struct udma_res jfr; + struct udma_res jfc; + struct udma_res jetty; + struct udma_res jetty_grp; + uint32_t jetty_in_grp; + uint32_t jfs_sge; + uint32_t jfr_sge; + uint32_t jfs_rsge; + uint32_t jfs_inline_sz; + uint32_t comp_vector_cnt; + uint16_t ue_cnt; + uint8_t ue_id; + uint32_t trans_mode; + uint32_t max_msg_len; + uint32_t feature; + uint32_t rsvd_jetty_cnt; + uint32_t max_read_size; + uint32_t max_write_size; + uint32_t max_cas_size; + uint32_t max_fetch_and_add_size; + uint32_t atomic_feat; + struct udma_res ccu_jetty; + struct udma_res hdc_jetty; + struct udma_res stars_jetty; + struct udma_res public_jetty; + struct udma_res user_ctrl_normal_jetty; + uint16_t rc_queue_num; + uint16_t rc_queue_depth; + uint8_t rc_entry_size; + uint8_t ack_queue_num; + uint8_t port_num; + uint8_t cqe_size; + struct udma_tbl seid; +}; + +struct udma_buf { + dma_addr_t addr; + union { + void *kva; /* used for kernel mode */ + struct iova_slot *slot; + void *kva_or_slot; + }; + void *aligned_va; + struct ubcore_umem *umem; + uint32_t entry_size; + uint32_t entry_cnt; + uint32_t cnt_per_page_shift; + struct xarray id_table_xa; + struct mutex id_table_mutex; +}; + +enum num_elem_in_grp { + NUM_TP_PER_GROUP = 16, + NUM_JETTY_PER_GROUP = 32, +}; + +enum { + RCT_INIT_FLAG, +}; + +#endif /* __UDMA_DEF_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h new file mode 100644 index 000000000000..941dd2a0540e --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_DEV_H__ +#define __UDMA_DEV_H__ + +#include +#include +#include +#include "udma_def.h" + +#define UBCORE_MAX_DEV_NAME 64 + +#define MAX_JETTY_IN_JETTY_GRP 32 + +#define MAX_WQEBB_IN_SQE 4 + +#define UDMA_HW_PAGE_SHIFT 12 +#define UDMA_HW_PAGE_SIZE (1 << UDMA_HW_PAGE_SHIFT) + +#define UDMA_DEV_UE_NUM 47 + +#define UDMA_MAX_SL_NUM 16 +#define UDMA_DEFAULT_SL_NUM 0 + +#define UDMA_CQE_SIZE 64 + +#define UDMA_MAX_GRANT_SIZE 0xFFFFFFFFF000 + +enum udma_status { + UDMA_NORMAL, + UDMA_SUSPEND, +}; + +struct udma_ida { + struct ida ida; + uint32_t min; /* Lowest ID to allocate. */ + uint32_t max; /* Highest ID to allocate. */ + uint32_t next; /* Next ID to allocate. */ + spinlock_t lock; +}; + +struct udma_group_bitmap { + uint32_t min; + uint32_t max; + uint32_t grp_next; + uint32_t n_bits; + uint32_t *bit; + uint32_t bitmap_cnt; + spinlock_t lock; +}; + +struct udma_group_table { + struct xarray xa; + struct udma_group_bitmap bitmap_table; +}; + +struct udma_table { + struct xarray xa; + struct udma_ida ida_table; +}; + +struct udma_mailbox_cmd { + struct dma_pool *pool; + struct semaphore poll_sem; + struct rw_semaphore udma_mb_rwsem; +}; + +struct udma_dev { + struct ubase_adev_com comdev; + struct ubcore_device ub_dev; + struct device *dev; + struct udma_caps caps; + uint16_t adev_id; + uint32_t chip_id; + uint32_t die_id; + uint32_t port_id; + uint32_t port_logic_id; + bool is_ue; + char dev_name[UBCORE_MAX_DEV_NAME]; + struct udma_mailbox_cmd mb_cmd; + struct udma_table jfr_table; + struct udma_group_table jetty_table; + struct udma_table jfc_table; + struct udma_table jetty_grp_table; + struct udma_ida rsvd_jetty_ida_table; + struct udma_table rc_table; + struct xarray crq_nb_table; + struct xarray npu_nb_table; + struct mutex npu_nb_mutex; + struct xarray tpn_ue_idx_table; + resource_size_t db_base; + void __iomem *k_db_base; + struct xarray ksva_table; + struct mutex ksva_mutex; + struct xarray eid_table; + struct mutex eid_mutex; + uint32_t tid; + struct iommu_sva *ksva; + uint32_t status; + uint32_t ue_num; + uint32_t ue_id; + struct page *db_page; + u8 udma_tp_sl_num; + u8 udma_ctp_sl_num; + u8 unic_sl_num; + u8 udma_total_sl_num; + u8 udma_tp_resp_vl_off; + u8 udma_tp_sl[UDMA_MAX_SL_NUM]; + u8 udma_ctp_sl[UDMA_MAX_SL_NUM]; + u8 unic_sl[UDMA_MAX_SL_NUM]; + u8 udma_sl[UDMA_MAX_SL_NUM]; + int disable_ue_rx_count; + struct mutex disable_ue_rx_mutex; +}; + +#define UDMA_ERR_MSG_LEN 128 +struct udma_func_map { + char err_msg[UDMA_ERR_MSG_LEN]; + int (*init_func)(struct udma_dev *udma_dev); + void (*uninit_func)(struct udma_dev *udma_dev); +}; + +static inline struct udma_dev *get_udma_dev(struct auxiliary_device *adev) +{ + return (struct udma_dev *)dev_get_drvdata(&adev->dev); +} + +static inline struct udma_dev *to_udma_dev(struct ubcore_device *ub_device) +{ + return container_of(ub_device, struct udma_dev, ub_dev); +} + +static inline void udma_id_free(struct udma_ida *ida_table, int idx) +{ + ida_free(&ida_table->ida, idx); +} + +void udma_destroy_tables(struct udma_dev *udma_dev); +int udma_init_tables(struct udma_dev *udma_dev); +int udma_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id); +void udma_remove(struct auxiliary_device *adev); +void udma_reset_init(struct auxiliary_device *adev); +void udma_reset_uninit(struct auxiliary_device *adev); +void udma_reset_down(struct auxiliary_device *adev); + +#endif /* __UDMA_DEV_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c new file mode 100644 index 000000000000..0224b6d248d0 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -0,0 +1,892 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "udma_dev.h" +#include "udma_cmd.h" +#include "udma_rct.h" +#include "udma_common.h" +#include "udma_ctrlq_tp.h" + +bool is_rmmod; +static DEFINE_MUTEX(udma_reset_mutex); + +static const struct auxiliary_device_id udma_id_table[] = { + { + .name = UBASE_ADEV_NAME ".udma", + }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, udma_id_table); + +static int udma_set_eth_device_speed(struct ubcore_device *dev, + struct ubcore_device_status *dev_status, uint32_t speed) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + + switch (speed) { + case SPEED_400G: + dev_status->port_status[0].active_speed = UBCORE_SP_400G; + break; + case SPEED_200G: + dev_status->port_status[0].active_speed = UBCORE_SP_200G; + break; + case SPEED_100G: + dev_status->port_status[0].active_speed = UBCORE_SP_100G; + break; + case SPEED_50G: + dev_status->port_status[0].active_speed = UBCORE_SP_50G; + break; + case SPEED_25G: + dev_status->port_status[0].active_speed = UBCORE_SP_25G; + break; + default: + dev_err(udma_dev->dev, "invalid port speed(%u) in UBOE mode.\n", speed); + return -EINVAL; + } + + return 0; +} + +static int udma_query_device_status(struct ubcore_device *dev, + struct ubcore_device_status *dev_status) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + struct udma_cmd_port_info port_info; + struct ubase_cmd_buf in, out; + int ret; + + dev_status->port_status[0].state = UBCORE_PORT_ACTIVE; + dev_status->port_status[0].active_mtu = UBCORE_MTU_4096; + + udma_fill_buf(&in, UDMA_CMD_QUERY_PORT_INFO, true, 0, NULL); + udma_fill_buf(&out, UDMA_CMD_QUERY_PORT_INFO, true, + sizeof(port_info), (void *)&port_info); + ret = ubase_cmd_send_inout(udma_dev->comdev.adev, &in, &out); + if (ret) { + dev_err(udma_dev->dev, "failed to query speed, ret = %d.\n", ret); + return -EINVAL; + } + + dev_status->port_status[0].active_width = (enum ubcore_link_width)port_info.lanes; + + if (!ubase_adev_ubl_supported(udma_dev->comdev.adev)) + return udma_set_eth_device_speed(dev, dev_status, port_info.speed); + + if (port_info.speed == SPEED_200G) { + dev_status->port_status[0].active_speed = UBCORE_SP_200G; + } else if (port_info.speed == SPEED_400G) { + dev_status->port_status[0].active_speed = UBCORE_SP_400G; + } else { + dev_err(udma_dev->dev, "invalid port speed = %u.\n", port_info.speed); + ret = -EINVAL; + } + + return ret; +} + +static void udma_set_dev_caps(struct ubcore_device_attr *attr, struct udma_dev *udma_dev) +{ + attr->dev_cap.max_jfs_depth = udma_dev->caps.jfs.depth; + attr->dev_cap.max_jfr_depth = udma_dev->caps.jfr.depth; + attr->dev_cap.max_jfc_depth = udma_dev->caps.jfc.depth; + attr->dev_cap.max_jfs = udma_dev->caps.jfs.max_cnt + + udma_dev->caps.public_jetty.max_cnt + + udma_dev->caps.user_ctrl_normal_jetty.max_cnt; + attr->dev_cap.max_jfr = udma_dev->caps.jfr.max_cnt; + attr->dev_cap.max_jfc = udma_dev->caps.jfc.max_cnt; + attr->dev_cap.max_jetty = udma_dev->caps.jetty.max_cnt + + udma_dev->caps.public_jetty.max_cnt + + udma_dev->caps.user_ctrl_normal_jetty.max_cnt; + attr->dev_cap.max_jetty_grp = udma_dev->caps.jetty_grp.max_cnt; + attr->dev_cap.max_jetty_in_jetty_grp = udma_dev->caps.jetty_in_grp; + attr->dev_cap.max_jfs_rsge = udma_dev->caps.jfs_rsge; + attr->dev_cap.max_jfs_sge = udma_dev->caps.jfs_sge; + attr->dev_cap.max_jfs_inline_size = udma_dev->caps.jfs_inline_sz; + attr->dev_cap.max_jfr_sge = udma_dev->caps.jfr_sge; + attr->dev_cap.max_msg_size = udma_dev->caps.max_msg_len; + attr->dev_cap.trans_mode = udma_dev->caps.trans_mode; + attr->port_cnt = udma_dev->caps.port_num; + attr->dev_cap.ceq_cnt = udma_dev->caps.comp_vector_cnt; + attr->dev_cap.max_ue_cnt = udma_dev->caps.ue_cnt; + attr->dev_cap.max_rc = udma_dev->caps.rc_queue_num; + attr->dev_cap.max_rc_depth = udma_dev->caps.rc_queue_depth; + attr->dev_cap.max_eid_cnt = udma_dev->caps.seid.max_cnt; + attr->dev_cap.feature.bs.jfc_inline = (udma_dev->caps.feature & + UDMA_CAP_FEATURE_JFC_INLINE) ? 1 : 0; + attr->dev_cap.max_read_size = udma_dev->caps.max_read_size; + attr->dev_cap.max_write_size = udma_dev->caps.max_write_size; + attr->dev_cap.max_cas_size = udma_dev->caps.max_cas_size; + attr->dev_cap.max_fetch_and_add_size = udma_dev->caps.max_fetch_and_add_size; + attr->dev_cap.atomic_feat.value = udma_dev->caps.atomic_feat; +} + +static int udma_query_device_attr(struct ubcore_device *dev, + struct ubcore_device_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + + udma_set_dev_caps(attr, udma_dev); + attr->ue_idx = udma_dev->caps.ue_id; + attr->port_attr[0].max_mtu = UBCORE_MTU_4096; + attr->reserved_jetty_id_max = udma_dev->caps.public_jetty.max_cnt - 1; + + return 0; +} + +static struct ubcore_ops g_dev_ops = { + .owner = THIS_MODULE, + .abi_version = 0, + .query_device_attr = udma_query_device_attr, + .query_device_status = udma_query_device_status, + .config_device = udma_config_device, +}; + +static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) +{ + if (!xa_empty(&table->xa)) + dev_err(dev->dev, "table is not empty.\n"); + xa_destroy(&table->xa); + + vfree(table->bitmap_table.bit); + table->bitmap_table.bit = NULL; +} + +static void udma_destroy_tp_ue_idx_table(struct udma_dev *udma_dev) +{ + struct udma_ue_idx_table *tp_ue_idx_info; + unsigned long index = 0; + + xa_lock(&udma_dev->tpn_ue_idx_table); + if (!xa_empty(&udma_dev->tpn_ue_idx_table)) { + xa_for_each(&udma_dev->tpn_ue_idx_table, index, tp_ue_idx_info) { + __xa_erase(&udma_dev->tpn_ue_idx_table, index); + kfree(tp_ue_idx_info); + tp_ue_idx_info = NULL; + } + } + + xa_unlock(&udma_dev->tpn_ue_idx_table); + xa_destroy(&udma_dev->tpn_ue_idx_table); +} + +void udma_destroy_tables(struct udma_dev *udma_dev) +{ + udma_destroy_eid_table(udma_dev); + mutex_destroy(&udma_dev->disable_ue_rx_mutex); + if (!ida_is_empty(&udma_dev->rsvd_jetty_ida_table.ida)) + dev_err(udma_dev->dev, + "IDA not empty in clean up rsvd jetty id table.\n"); + ida_destroy(&udma_dev->rsvd_jetty_ida_table.ida); + + if (!xa_empty(&udma_dev->crq_nb_table)) + dev_err(udma_dev->dev, "crq nb table is not empty.\n"); + xa_destroy(&udma_dev->crq_nb_table); + + udma_destroy_tp_ue_idx_table(udma_dev); + + if (!xa_empty(&udma_dev->ksva_table)) + dev_err(udma_dev->dev, "ksva table is not empty.\n"); + xa_destroy(&udma_dev->ksva_table); + mutex_destroy(&udma_dev->ksva_mutex); + udma_destroy_udma_table(udma_dev, &udma_dev->jetty_grp_table, "JettyGroup"); + udma_destroy_udma_table(udma_dev, &udma_dev->jfc_table, "JFC"); + udma_destroy_udma_table(udma_dev, &udma_dev->jfr_table, "JFR"); + udma_uninit_group_table(udma_dev, &udma_dev->jetty_table); +} + +static int udma_init_group_table(struct udma_dev *udma_dev, struct udma_group_table *table, + uint32_t max, uint32_t min, uint32_t num_per_group) +{ + struct udma_group_bitmap *bitmap_table; + int i; + + bitmap_table = &table->bitmap_table; + if (max < min) { + dev_err(udma_dev->dev, + "max value is less than min value when init group bitmap.\n"); + return -EINVAL; + } + + bitmap_table->max = max; + bitmap_table->min = min; + bitmap_table->grp_next = min; + bitmap_table->n_bits = max - min + 1; + bitmap_table->bitmap_cnt = ALIGN(bitmap_table->n_bits, num_per_group) / + num_per_group; + bitmap_table->bit = vmalloc(bitmap_table->bitmap_cnt * sizeof(uint32_t)); + if (!bitmap_table->bit) { + dev_err(udma_dev->dev, "failed to alloc jetty bitmap.\n"); + return -ENOMEM; + } + + for (i = 0; i < bitmap_table->bitmap_cnt; ++i) + bitmap_table->bit[i] = ~(0U); + + spin_lock_init(&bitmap_table->lock); + xa_init(&table->xa); + + return 0; +} + +static void udma_init_managed_by_ctrl_cpu_table(struct udma_dev *udma_dev) +{ + mutex_init(&udma_dev->eid_mutex); + xa_init(&udma_dev->eid_table); +} + +int udma_init_tables(struct udma_dev *udma_dev) +{ + int ret; + + ret = udma_init_group_table(udma_dev, &udma_dev->jetty_table, + udma_dev->caps.jetty.max_cnt + + udma_dev->caps.jetty.start_idx - 1, + udma_dev->caps.jetty.start_idx, + NUM_JETTY_PER_GROUP); + if (ret) { + dev_err(udma_dev->dev, + "failed to init jetty table when start_idx = %u, and max_cnt = %u.\n", + udma_dev->caps.jetty.start_idx, udma_dev->caps.jetty.max_cnt); + return ret; + } + + udma_init_udma_table(&udma_dev->jfr_table, udma_dev->caps.jfr.max_cnt + + udma_dev->caps.jfr.start_idx - 1, udma_dev->caps.jfr.start_idx); + udma_init_udma_table(&udma_dev->jfc_table, udma_dev->caps.jfc.max_cnt + + udma_dev->caps.jfc.start_idx - 1, udma_dev->caps.jfc.start_idx); + udma_init_udma_table(&udma_dev->jetty_grp_table, udma_dev->caps.jetty_grp.max_cnt + + udma_dev->caps.jetty_grp.start_idx - 1, + udma_dev->caps.jetty_grp.start_idx); + udma_init_udma_table_mutex(&udma_dev->ksva_table, &udma_dev->ksva_mutex); + udma_init_udma_table_mutex(&udma_dev->npu_nb_table, &udma_dev->npu_nb_mutex); + xa_init(&udma_dev->tpn_ue_idx_table); + xa_init(&udma_dev->crq_nb_table); + ida_init(&udma_dev->rsvd_jetty_ida_table.ida); + mutex_init(&udma_dev->disable_ue_rx_mutex); + udma_init_managed_by_ctrl_cpu_table(udma_dev); + + return 0; +} + +static void udma_free_rct(struct udma_dev *udev) +{ + uint32_t min = udev->rc_table.ida_table.min; + uint32_t max = udev->rc_table.ida_table.max; + uint32_t i; + + if (test_and_clear_bit(RCT_INIT_FLAG, &udev->caps.init_flag)) + for (i = min; i < max; i++) + udma_free_rc_queue(udev, i); +} + +static void udma_unset_ubcore_dev(struct udma_dev *udma_dev) +{ + struct ubcore_device *ub_dev = &udma_dev->ub_dev; + + ubcore_unregister_device(ub_dev); + udma_free_rct(udma_dev); +} + +static int udma_set_ubcore_dev(struct udma_dev *udma_dev) +{ + struct ubcore_device *ub_dev = &udma_dev->ub_dev; + int ret; + + ub_dev->transport_type = UBCORE_TRANSPORT_UB; + ub_dev->ops = &g_dev_ops; + ub_dev->dev.parent = udma_dev->dev; + ub_dev->dma_dev = ub_dev->dev.parent; + ub_dev->attr.dev_cap.feature.value = udma_dev->caps.feature; + + scnprintf(udma_dev->dev_name, UBCORE_MAX_DEV_NAME, "udma%hu", udma_dev->adev_id); + strscpy(ub_dev->dev_name, udma_dev->dev_name, UBCORE_MAX_DEV_NAME); + scnprintf(ub_dev->ops->driver_name, UBCORE_MAX_DRIVER_NAME, "udma"); + + ret = ubcore_register_device(ub_dev); + if (ret) + dev_err(udma_dev->dev, "failed to register udma_dev to ubcore, ret is %d.\n", ret); + + return ret; +} + +static void udma_dump_jetty_id_range(struct udma_dev *udma_dev) +{ +#define UDMA_JETTY_CNT 6 + const char *jetty_name[UDMA_JETTY_CNT] = { + "public", + "ccu", + "hdc", + "cache_lock", + "user_ctrl_normal", + "urma_normal", + }; + struct udma_res *jetty_res_list[UDMA_JETTY_CNT] = { + &udma_dev->caps.public_jetty, + &udma_dev->caps.ccu_jetty, + &udma_dev->caps.hdc_jetty, + &udma_dev->caps.stars_jetty, + &udma_dev->caps.user_ctrl_normal_jetty, + &udma_dev->caps.jetty, + }; + uint32_t i; + + for (i = 0; i < UDMA_JETTY_CNT; i++) + dev_info(udma_dev->dev, "%s jetty start_idx=%u, max_cnt=%u\n", + jetty_name[i], jetty_res_list[i]->start_idx, + jetty_res_list[i]->max_cnt); +} + +static void udma_get_jetty_id_range(struct udma_dev *udma_dev, + struct udma_cmd_ue_resource *cmd) +{ + udma_dev->caps.public_jetty.start_idx = cmd->well_known_jetty_start; + udma_dev->caps.public_jetty.max_cnt = cmd->well_known_jetty_num; + + udma_dev->caps.ccu_jetty.start_idx = cmd->ccu_jetty_start; + udma_dev->caps.ccu_jetty.max_cnt = cmd->ccu_jetty_num; + udma_dev->caps.ccu_jetty.next_idx = udma_dev->caps.ccu_jetty.start_idx; + + udma_dev->caps.hdc_jetty.start_idx = cmd->drv_jetty_start; + udma_dev->caps.hdc_jetty.max_cnt = cmd->drv_jetty_num; + + udma_dev->caps.stars_jetty.start_idx = cmd->cache_lock_jetty_start; + udma_dev->caps.stars_jetty.max_cnt = cmd->cache_lock_jetty_num; + udma_dev->caps.stars_jetty.next_idx = udma_dev->caps.stars_jetty.start_idx; + + udma_dev->caps.user_ctrl_normal_jetty.start_idx = cmd->normal_jetty_start; + udma_dev->caps.user_ctrl_normal_jetty.max_cnt = cmd->normal_jetty_num; + udma_dev->caps.user_ctrl_normal_jetty.next_idx = + udma_dev->caps.user_ctrl_normal_jetty.start_idx; + + udma_dev->caps.jetty.start_idx = cmd->standard_jetty_start; + udma_dev->caps.jetty.max_cnt = cmd->standard_jetty_num; + + udma_dev->caps.rsvd_jetty_cnt = udma_dev->caps.public_jetty.max_cnt + + udma_dev->caps.ccu_jetty.max_cnt + + udma_dev->caps.hdc_jetty.max_cnt + + udma_dev->caps.stars_jetty.max_cnt + + udma_dev->caps.user_ctrl_normal_jetty.max_cnt; + + if (debug_switch) + udma_dump_jetty_id_range(udma_dev); +} + +static int query_caps_from_firmware(struct udma_dev *udma_dev) +{ +#define RC_QUEUE_ENTRY_SIZE 128 + struct udma_cmd_ue_resource cmd = {}; + int ret; + + ret = udma_cmd_query_hw_resource(udma_dev, (void *)&cmd); + if (ret) { + dev_err(udma_dev->dev, "fail to query hw resource from FW %d\n", ret); + return ret; + } + + udma_dev->caps.jfs_sge = cmd.jfs_sge; + udma_dev->caps.jfs_rsge = cmd.jfs_rsge; + udma_dev->caps.jfr_sge = cmd.jfr_sge; + udma_dev->caps.jfs_inline_sz = cmd.max_jfs_inline_sz; + udma_dev->caps.jetty_grp.max_cnt = cmd.jetty_grp_num; + udma_dev->caps.trans_mode = cmd.trans_mode; + udma_dev->caps.seid.size = cmd.seid_upi_tbl_sz; + udma_dev->caps.seid.max_cnt = cmd.seid_upi_tbl_num; + udma_dev->caps.port_num = cmd.port_num; + udma_dev->caps.max_read_size = cmd.max_read_size; + udma_dev->caps.max_write_size = cmd.max_write_size; + udma_dev->caps.max_cas_size = cmd.max_cas_size; + udma_dev->caps.max_fetch_and_add_size = cmd.max_fetch_and_add_size; + udma_dev->caps.atomic_feat = cmd.atomic_feat; + + udma_get_jetty_id_range(udma_dev, &cmd); + + udma_dev->caps.rc_queue_num = cmd.rc_queue_num; + udma_dev->caps.rc_queue_depth = cmd.rc_depth; + udma_dev->caps.rc_entry_size = RC_QUEUE_ENTRY_SIZE; + + udma_dev->caps.feature = cmd.cap_info; + udma_dev->caps.ue_cnt = cmd.ue_cnt >= UDMA_DEV_UE_NUM ? + UDMA_DEV_UE_NUM - 1 : cmd.ue_cnt; + udma_dev->caps.ue_id = cmd.ue_id; + udma_dev->is_ue = !!(cmd.ue_id); + + return 0; +} + +static void get_dev_caps_from_ubase(struct udma_dev *udma_dev) +{ + struct ubase_caps *ubase_caps; + + ubase_caps = ubase_get_dev_caps(udma_dev->comdev.adev); + + udma_dev->caps.comp_vector_cnt = ubase_caps->num_ceq_vectors; + udma_dev->caps.ack_queue_num = ubase_caps->ack_queue_num; + + udma_dev->chip_id = ubase_caps->chip_id; + udma_dev->die_id = ubase_caps->die_id; + udma_dev->port_id = ubase_caps->io_port_id; + udma_dev->port_logic_id = ubase_caps->io_port_logic_id; + udma_dev->ue_id = ubase_caps->ue_id; +} + +static int udma_construct_qos_param(struct udma_dev *dev) +{ + struct ubase_adev_qos *qos_info; + uint8_t i; + + qos_info = ubase_get_adev_qos(dev->comdev.adev); + if (!qos_info) { + dev_err(dev->dev, "cannot get qos information from ubase.\n"); + return -EINVAL; + } + + dev->udma_tp_sl_num = qos_info->tp_sl_num; + dev->udma_ctp_sl_num = qos_info->ctp_sl_num; + dev->unic_sl_num = qos_info->nic_sl_num; + dev->udma_tp_resp_vl_off = qos_info->tp_resp_vl_offset; + dev->udma_total_sl_num = dev->udma_tp_sl_num + dev->udma_ctp_sl_num; + if (dev->udma_total_sl_num > UDMA_MAX_SL_NUM) { + dev_err(dev->dev, + "total sl num is invalid, tp sl num is %u, ctp sl num is %u.\n", + dev->udma_tp_sl_num, dev->udma_ctp_sl_num); + return -EINVAL; + } + + (void)memcpy(dev->udma_tp_sl, + qos_info->tp_sl, sizeof(u8) * qos_info->tp_sl_num); + (void)memcpy(dev->udma_ctp_sl, + qos_info->ctp_sl, sizeof(u8) * qos_info->ctp_sl_num); + (void)memcpy(dev->unic_sl, + qos_info->nic_sl, sizeof(u8) * qos_info->nic_sl_num); + (void)memcpy(dev->udma_sl, + qos_info->tp_sl, sizeof(u8) * qos_info->tp_sl_num); + + for (i = 0; i < qos_info->ctp_sl_num; i++) + dev->udma_sl[qos_info->tp_sl_num + i] = qos_info->ctp_sl[i]; + + return 0; +} + +static int udma_set_hw_caps(struct udma_dev *udma_dev) +{ +#define MAX_MSG_LEN 0x10000 + struct ubase_adev_caps *a_caps; + uint32_t jetty_grp_cnt; + int ret; + + get_dev_caps_from_ubase(udma_dev); + + ret = query_caps_from_firmware(udma_dev); + if (ret) + return ret; + + a_caps = ubase_get_udma_caps(udma_dev->comdev.adev); + udma_dev->caps.jfs.max_cnt = a_caps->jfs.max_cnt; + udma_dev->caps.jfs.depth = a_caps->jfs.depth / MAX_WQEBB_IN_SQE; + udma_dev->caps.jfs.start_idx = a_caps->jfs.start_idx; + udma_dev->caps.jfr.max_cnt = a_caps->jfr.max_cnt; + udma_dev->caps.jfr.depth = a_caps->jfr.depth; + udma_dev->caps.jfr.start_idx = a_caps->jfr.start_idx; + udma_dev->caps.jfc.max_cnt = a_caps->jfc.max_cnt; + udma_dev->caps.jfc.depth = a_caps->jfc.depth; + udma_dev->caps.jfc.start_idx = a_caps->jfc.start_idx; + udma_dev->caps.jetty.max_cnt = a_caps->jfs.max_cnt; + udma_dev->caps.jetty.depth = a_caps->jfs.depth; + udma_dev->caps.jetty.start_idx = a_caps->jfs.start_idx; + udma_dev->caps.jetty.next_idx = udma_dev->caps.jetty.start_idx; + udma_dev->caps.cqe_size = UDMA_CQE_SIZE; + ret = udma_construct_qos_param(udma_dev); + if (ret) + return ret; + + udma_dev->caps.max_msg_len = MAX_MSG_LEN; + udma_dev->caps.jetty_in_grp = MAX_JETTY_IN_JETTY_GRP; + + if (udma_dev->caps.jetty_in_grp) { + jetty_grp_cnt = udma_dev->caps.jetty.max_cnt / udma_dev->caps.jetty_in_grp; + udma_dev->caps.jetty_grp.max_cnt = + jetty_grp_cnt < udma_dev->caps.jetty_grp.max_cnt ? + jetty_grp_cnt : udma_dev->caps.jetty_grp.max_cnt; + } + + return 0; +} + +static int udma_init_dev_param(struct udma_dev *udma_dev) +{ + struct auxiliary_device *adev = udma_dev->comdev.adev; + struct ubase_resource_space *mem_base = ubase_get_mem_base(adev); + int ret; + + udma_dev->dev = adev->dev.parent; + udma_dev->db_base = mem_base->addr_unmapped; + udma_dev->k_db_base = mem_base->addr; + udma_dev->adev_id = udma_dev->comdev.adev->id; + + ret = udma_set_hw_caps(udma_dev); + if (ret) { + dev_err(udma_dev->dev, "failed to query hw caps, ret = %d\n", ret); + return ret; + } + + ret = udma_init_tables(udma_dev); + if (ret) { + dev_err(udma_dev->dev, + "Failed to init tables, ret = %d\n", ret); + return ret; + } + + dev_set_drvdata(&adev->dev, udma_dev); + + return 0; +} + +static void udma_uninit_dev_param(struct udma_dev *udma_dev) +{ + dev_set_drvdata(&udma_dev->comdev.adev->dev, NULL); + udma_destroy_tables(udma_dev); +} + +static int udma_alloc_dev_tid(struct udma_dev *udma_dev) +{ + struct ummu_seg_attr seg_attr = {.token = NULL, .e_bit = UMMU_EBIT_ON}; + struct ummu_param param = {.mode = MAPT_MODE_TABLE}; + int ret; + + ret = iommu_dev_enable_feature(udma_dev->dev, IOMMU_DEV_FEAT_KSVA); + if (ret) { + dev_err(udma_dev->dev, "enable ksva failed, ret = %d.\n", ret); + return ret; + } + + ret = iommu_dev_enable_feature(udma_dev->dev, IOMMU_DEV_FEAT_SVA); + if (ret) { + dev_err(udma_dev->dev, "enable sva failed, ret = %d.\n", ret); + goto err_sva_enable_dev; + } + + udma_dev->ksva = ummu_ksva_bind_device(udma_dev->dev, ¶m); + if (!udma_dev->ksva) { + dev_err(udma_dev->dev, "ksva bind device failed.\n"); + ret = -EINVAL; + goto err_ksva_bind_device; + } + + ret = ummu_get_tid(udma_dev->dev, udma_dev->ksva, &udma_dev->tid); + if (ret) { + dev_err(udma_dev->dev, "Failed to get tid for udma device.\n"); + goto err_get_tid; + } + + ret = ummu_sva_grant_range(udma_dev->ksva, 0, UDMA_MAX_GRANT_SIZE, + UMMU_DEV_WRITE | UMMU_DEV_READ, &seg_attr); + if (ret) { + dev_err(udma_dev->dev, "Failed to sva grant range for udma device.\n"); + goto err_sva_grant_range; + } + + return ret; + +err_sva_grant_range: +err_get_tid: + ummu_ksva_unbind_device(udma_dev->ksva); +err_ksva_bind_device: + if (iommu_dev_disable_feature(udma_dev->dev, IOMMU_DEV_FEAT_SVA)) + dev_warn(udma_dev->dev, "disable sva failed.\n"); +err_sva_enable_dev: + if (iommu_dev_disable_feature(udma_dev->dev, IOMMU_DEV_FEAT_KSVA)) + dev_warn(udma_dev->dev, "disable ksva failed.\n"); + return ret; +} + +static void udma_free_dev_tid(struct udma_dev *udma_dev) +{ + struct iommu_sva *ksva = NULL; + size_t token_id; + int ret; + + ret = ummu_sva_ungrant_range(udma_dev->ksva, 0, UDMA_MAX_GRANT_SIZE, NULL); + if (ret) + dev_warn(udma_dev->dev, + "sva ungrant range for udma device failed, ret = %d.\n", + ret); + + mutex_lock(&udma_dev->ksva_mutex); + xa_for_each(&udma_dev->ksva_table, token_id, ksva) { + __xa_erase(&udma_dev->ksva_table, token_id); + ummu_ksva_unbind_device(ksva); + } + mutex_unlock(&udma_dev->ksva_mutex); + + ummu_ksva_unbind_device(udma_dev->ksva); + + ret = iommu_dev_disable_feature(udma_dev->dev, IOMMU_DEV_FEAT_SVA); + if (ret) + dev_warn(udma_dev->dev, "disable sva failed, ret = %d.\n", ret); + + ret = iommu_dev_disable_feature(udma_dev->dev, IOMMU_DEV_FEAT_KSVA); + if (ret) + dev_warn(udma_dev->dev, "disable ksva failed, ret = %d.\n", ret); +} + +static int udma_create_db_page(struct udma_dev *udev) +{ + udev->db_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!udev->db_page) + return -ENOMEM; + + return 0; +} + +static void udma_destroy_db_page(struct udma_dev *udev) +{ + put_page(udev->db_page); + udev->db_page = NULL; +} + +static const struct udma_func_map udma_dev_func_map[] = { + {"dev param", udma_init_dev_param, udma_uninit_dev_param}, + {"cmd", udma_cmd_init, udma_cmd_cleanup}, + {"dev tid", udma_alloc_dev_tid, udma_free_dev_tid}, + {"db page", udma_create_db_page, udma_destroy_db_page}, +}; + +static void udma_destroy_dev(struct udma_dev *udev) +{ + int i; + + for (i = ARRAY_SIZE(udma_dev_func_map) - 1; i >= 0; i--) + if (udma_dev_func_map[i].uninit_func) + udma_dev_func_map[i].uninit_func(udev); + kfree(udev); +} + +static struct udma_dev *udma_create_dev(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev; + int ret, i; + + udma_dev = kzalloc((sizeof(struct udma_dev)), GFP_KERNEL); + if (udma_dev == NULL) + return NULL; + + udma_dev->comdev.adev = adev; + + for (i = 0; i < ARRAY_SIZE(udma_dev_func_map); i++) { + if (!udma_dev_func_map[i].init_func) + continue; + + ret = udma_dev_func_map[i].init_func(udma_dev); + if (ret) { + dev_err(udma_dev->dev, "Failed to init %s, ret = %d\n", + udma_dev_func_map[i].err_msg, ret); + goto err_init; + } + } + + return udma_dev; + +err_init: + for (i -= 1; i >= 0; i--) + if (udma_dev_func_map[i].uninit_func) + udma_dev_func_map[i].uninit_func(udma_dev); + + kfree(udma_dev); + return NULL; +} + +static bool udma_is_need_probe(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev; + + if (is_rmmod) { + dev_info(&adev->dev, + "udma drv is uninstalling, not allowed to create dev(%s.%u).\n", + adev->name, adev->id); + return false; + } + + udma_dev = get_udma_dev(adev); + if (udma_dev) { + dev_info(&adev->dev, + "dev(%s.%u) is exist, bypass probe.\n", + adev->name, adev->id); + return false; + } + + return true; +} + +static void udma_reset_handler(struct auxiliary_device *adev, + enum ubase_reset_stage stage) +{ + switch (stage) { + case UBASE_RESET_STAGE_DOWN: + udma_reset_down(adev); + break; + case UBASE_RESET_STAGE_UNINIT: + udma_reset_uninit(adev); + break; + case UBASE_RESET_STAGE_INIT: + udma_reset_init(adev); + break; + default: + break; + } +} + +static int udma_init_dev(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev; + int ret; + + mutex_lock(&udma_reset_mutex); + dev_info(&adev->dev, "udma init dev called, matched aux dev(%s.%u).\n", + adev->name, adev->id); + if (!udma_is_need_probe(adev)) { + mutex_unlock(&udma_reset_mutex); + return 0; + } + + udma_dev = udma_create_dev(adev); + if (!udma_dev) + goto err_create; + + ret = udma_set_ubcore_dev(udma_dev); + if (ret) { + dev_err(udma_dev->dev, "failed to set ubcore dev, ret is %d.\n", ret); + goto err_create; + } + + udma_dev->status = UDMA_NORMAL; + mutex_unlock(&udma_reset_mutex); + dev_info(udma_dev->dev, "init udma successfully.\n"); + + return 0; + +err_create: + mutex_unlock(&udma_reset_mutex); + + return -EINVAL; +} + +void udma_reset_down(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev; + + mutex_lock(&udma_reset_mutex); + udma_dev = get_udma_dev(adev); + if (!udma_dev) { + mutex_unlock(&udma_reset_mutex); + dev_info(&adev->dev, "udma device is not exist.\n"); + return; + } + + if (udma_dev->status != UDMA_NORMAL) { + mutex_unlock(&udma_reset_mutex); + dev_info(&adev->dev, "udma device status(%u).\n", udma_dev->status); + return; + } + + udma_dev->status = UDMA_SUSPEND; + + udma_unset_ubcore_dev(udma_dev); + mutex_unlock(&udma_reset_mutex); +} + +void udma_reset_uninit(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev; + + mutex_lock(&udma_reset_mutex); + udma_dev = get_udma_dev(adev); + if (!udma_dev) { + dev_info(&adev->dev, "udma device is not exist.\n"); + mutex_unlock(&udma_reset_mutex); + return; + } + + if (udma_dev->status != UDMA_SUSPEND) { + dev_info(&adev->dev, "udma device status(%u).\n", udma_dev->status); + mutex_unlock(&udma_reset_mutex); + return; + } + + udma_destroy_dev(udma_dev); + mutex_unlock(&udma_reset_mutex); +} + +void udma_reset_init(struct auxiliary_device *adev) +{ + udma_init_dev(adev); +} + +int udma_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + if (udma_init_dev(adev)) + return -EINVAL; + + ubase_reset_register(adev, udma_reset_handler); + return 0; +} + +void udma_remove(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev; + + udma_dev = get_udma_dev(adev); + if (!udma_dev) { + dev_info(&adev->dev, "udma device is not exist.\n"); + ubase_reset_unregister(adev); + return; + } + + udma_reset_down(adev); + udma_reset_uninit(adev); + + ubase_reset_unregister(adev); +} + +static struct auxiliary_driver udma_drv = { + .name = "udma", + .probe = udma_probe, + .remove = udma_remove, + .id_table = udma_id_table, +}; + +static int __init udma_init(void) +{ + int ret; + + ret = auxiliary_driver_register(&udma_drv); + if (ret) + pr_err("failed to register auxiliary_driver\n"); + + return ret; +} + +static void __exit udma_exit(void) +{ + is_rmmod = true; + auxiliary_driver_unregister(&udma_drv); +} + +module_init(udma_init); +module_exit(udma_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/ub/urma/hw/udma/udma_rct.c b/drivers/ub/urma/hw/udma/udma_rct.c new file mode 100644 index 000000000000..149b9b6f27b4 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_rct.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include "udma_cmd.h" +#include "udma_rct.h" + +static int udma_create_rc_queue_ctx(struct udma_dev *dev, struct udma_rc_queue *rcq) +{ + struct ubase_mbx_attr attr = {}; + struct udma_rc_ctx ctx = {}; + + ctx.type = RC_TYPE; + ctx.state = RC_READY_STATE; + ctx.rce_token_id_l = dev->tid & (uint32_t)RCE_TOKEN_ID_L_MASK; + ctx.rce_token_id_h = dev->tid >> RCE_TOKEN_ID_H_OFFSET; + ctx.rce_base_addr_l = (rcq->buf.addr >> RCE_ADDR_L_OFFSET) & + (uint32_t)RCE_ADDR_L_MASK; + ctx.rce_base_addr_h = rcq->buf.addr >> RCE_ADDR_H_OFFSET; + ctx.rce_shift = ilog2(roundup_pow_of_two(rcq->buf.entry_cnt)); + ctx.avail_sgmt_ost = RC_AVAIL_SGMT_OST; + + attr.tag = rcq->id; + attr.op = UDMA_CMD_CREATE_RC_CONTEXT; + + return post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); +} + +static int udma_destroy_rc_queue_ctx(struct udma_dev *dev, struct udma_rc_queue *rcq) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for rc queue.\n"); + return -ENOMEM; + } + + mbox_attr.tag = rcq->id; + mbox_attr.op = UDMA_CMD_DESTROY_RC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, "failed to destroy rc queue ctx, ret = %d.\n", ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_alloc_rc_queue(struct udma_dev *dev, + struct ubcore_device_cfg *cfg, int rc_queue_id) +{ + uint32_t rcq_entry_size = dev->caps.rc_entry_size; + uint32_t rcq_entry_num = cfg->rc_cfg.depth; + struct udma_rc_queue *rcq; + uint32_t size; + int ret; + + rcq = kzalloc(sizeof(struct udma_rc_queue), GFP_KERNEL); + if (!rcq) + return -ENOMEM; + rcq->id = rc_queue_id; + + size = rcq_entry_size * rcq_entry_num; + rcq->buf.kva_or_slot = udma_alloc_iova(dev, size, &rcq->buf.addr); + if (!rcq->buf.kva_or_slot) { + ret = -ENOMEM; + dev_err(dev->dev, "failed to alloc rc queue buffer.\n"); + goto err_alloc_rcq; + } + rcq->buf.entry_size = rcq_entry_size; + rcq->buf.entry_cnt = rcq_entry_num; + + ret = udma_create_rc_queue_ctx(dev, rcq); + if (ret) { + dev_err(dev->dev, + "failed to create rc queue ctx, rcq id %u, ret = %d.\n", + rcq->id, ret); + goto err_create_rcq_ctx; + } + + ret = xa_err(xa_store(&dev->rc_table.xa, rcq->id, rcq, GFP_KERNEL)); + if (ret) { + dev_err(dev->dev, + "failed to stored rcq id to rc table, rcq id %d.\n", + rc_queue_id); + goto err_store_rcq_id; + } + + return ret; + +err_store_rcq_id: + if (udma_destroy_rc_queue_ctx(dev, rcq)) + dev_err(dev->dev, + "udma destroy rc queue ctx failed when alloc rc queue.\n"); +err_create_rcq_ctx: + udma_free_iova(dev, size, rcq->buf.kva_or_slot, rcq->buf.addr); + rcq->buf.kva_or_slot = NULL; + rcq->buf.addr = 0; +err_alloc_rcq: + kfree(rcq); + + return ret; +} + +void udma_free_rc_queue(struct udma_dev *dev, int rc_queue_id) +{ + struct udma_rc_queue *rcq; + int ret; + + rcq = (struct udma_rc_queue *)xa_load(&dev->rc_table.xa, rc_queue_id); + if (!rcq) { + dev_warn(dev->dev, + "failed to find rcq, id = %d.\n", rc_queue_id); + return; + } + + xa_erase(&dev->rc_table.xa, rc_queue_id); + ret = udma_destroy_rc_queue_ctx(dev, rcq); + if (ret) + dev_err(dev->dev, + "udma destroy rc queue ctx failed, ret = %d.\n", ret); + + udma_free_iova(dev, rcq->buf.entry_size * rcq->buf.entry_cnt, + rcq->buf.kva_or_slot, rcq->buf.addr); + rcq->buf.kva_or_slot = NULL; + rcq->buf.addr = 0; + kfree(rcq); +} + +static int udma_config_rc_table(struct udma_dev *dev, struct ubcore_device_cfg *cfg) +{ + uint32_t rc_ctx_num = cfg->rc_cfg.rc_cnt; + int ret = 0; + int i; + + for (i = 0; i < rc_ctx_num; i++) { + ret = udma_alloc_rc_queue(dev, cfg, i); + if (ret) { + dev_err(dev->dev, "failed to alloc rc queue.\n"); + goto err_alloc_rc_queue; + } + } + dev->rc_table.ida_table.min = 0; + dev->rc_table.ida_table.max = rc_ctx_num; + + return ret; + +err_alloc_rc_queue: + for (i -= 1; i >= 0; i--) + udma_free_rc_queue(dev, i); + + return ret; +} + +static int check_and_config_rc_table(struct udma_dev *dev, struct ubcore_device_cfg *cfg) +{ + int ret = 0; + + if (!cfg->mask.bs.rc_cnt && !cfg->mask.bs.rc_depth) + return 0; + + if (!cfg->mask.bs.rc_cnt || !cfg->mask.bs.rc_depth) { + dev_err(dev->dev, "Invalid rc mask, mask = %u.\n", cfg->mask.value); + return -EINVAL; + } + + if (!cfg->rc_cfg.rc_cnt || !cfg->rc_cfg.depth || + cfg->rc_cfg.rc_cnt > dev->caps.rc_queue_num || + cfg->rc_cfg.rc_cnt <= dev->caps.ack_queue_num) { + dev_err(dev->dev, + "Invalid rc param, rc cnt = %u, rc depth = %u, rc num = %u, ack queue num = %u.\n", + cfg->rc_cfg.rc_cnt, cfg->rc_cfg.depth, + dev->caps.rc_queue_num, dev->caps.ack_queue_num); + return -EINVAL; + } + + if (!test_and_set_bit_lock(RCT_INIT_FLAG, &dev->caps.init_flag)) + ret = udma_config_rc_table(dev, cfg); + + return ret; +} + +int udma_config_device(struct ubcore_device *ubcore_dev, + struct ubcore_device_cfg *cfg) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + int ret; + + if ((cfg->mask.bs.reserved_jetty_id_min && cfg->reserved_jetty_id_min != 0) || + (cfg->mask.bs.reserved_jetty_id_max && cfg->reserved_jetty_id_max != + dev->caps.public_jetty.max_cnt - 1)) { + dev_err(dev->dev, "public jetty range must 0-%u.\n", + dev->caps.public_jetty.max_cnt - 1); + return -EINVAL; + } + + ret = check_and_config_rc_table(dev, cfg); + if (ret) + dev_err(dev->dev, "failed to check device cfg, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_rct.h b/drivers/ub/urma/hw/udma/udma_rct.h new file mode 100644 index 000000000000..fc1e47d95043 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_rct.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_RCT_H__ +#define __UDMA_RCT_H__ + +#include "udma_common.h" + +#define RC_TYPE 2U +#define RC_READY_STATE 1U +#define RC_AVAIL_SGMT_OST 512U + +#define RCE_TOKEN_ID_L_MASK GENMASK(11, 0) +#define RCE_TOKEN_ID_H_OFFSET 12U +#define RCE_ADDR_L_OFFSET 12U +#define RCE_ADDR_L_MASK GENMASK(19, 0) +#define RCE_ADDR_H_OFFSET 32U + +struct udma_rc_queue { + uint32_t id; + struct udma_buf buf; +}; + +struct udma_rc_ctx { + /* DW0 */ + uint32_t rsv0 : 5; + uint32_t type : 3; + uint32_t rce_shift : 4; + uint32_t rsv1 : 4; + uint32_t state : 3; + uint32_t rsv2 : 1; + uint32_t rce_token_id_l : 12; + /* DW1 */ + uint32_t rce_token_id_h : 8; + uint32_t rsv3 : 4; + uint32_t rce_base_addr_l : 20; + /* DW2 */ + uint32_t rce_base_addr_h; + /* DW3~DW31 */ + uint32_t rsv4[28]; + uint32_t avail_sgmt_ost : 10; + uint32_t rsv5 : 22; + /* DW32~DW63 */ + uint32_t rsv6[32]; +}; + +struct udma_vir_cap { + uint8_t ue_idx; + uint8_t virtualization : 1; + uint8_t rsv : 7; +}; + +int udma_config_device(struct ubcore_device *ubcore_dev, + struct ubcore_device_cfg *cfg); +void udma_free_rc_queue(struct udma_dev *dev, int rc_queue_id); + +#endif /* __UDMA_RCT_H__ */ -- Gitee From 761c49d2e3a78fbfaf3667ea14e74d041a6306ec Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 18 Sep 2025 09:56:27 +0800 Subject: [PATCH 042/243] ub: udma: Create and destroy u-context. commit a1bbc9c41db4be60b93519f2146c09da30eb3298 openEuler This patch supports create and destroy u-context. At the same time, this patch also added function related to tid and doorbell during the driver loading process.In addition, this patch also adds basic functionalities such as applying for IDs and pin memory. Signed-off-by: Wei Qin Signed-off-by: Shengming Shu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 4 +- drivers/ub/urma/hw/udma/udma_common.c | 455 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_common.h | 39 +++ drivers/ub/urma/hw/udma/udma_ctl.h | 213 ++++++++++++ drivers/ub/urma/hw/udma/udma_ctx.c | 178 ++++++++++ drivers/ub/urma/hw/udma/udma_ctx.h | 42 +++ drivers/ub/urma/hw/udma/udma_db.c | 176 ++++++++++ drivers/ub/urma/hw/udma/udma_db.h | 16 + drivers/ub/urma/hw/udma/udma_def.h | 38 +++ drivers/ub/urma/hw/udma/udma_dev.h | 19 ++ drivers/ub/urma/hw/udma/udma_jetty.h | 30 ++ drivers/ub/urma/hw/udma/udma_jfs.h | 30 ++ drivers/ub/urma/hw/udma/udma_main.c | 23 ++ drivers/ub/urma/hw/udma/udma_tid.c | 142 ++++++++ drivers/ub/urma/hw/udma/udma_tid.h | 25 ++ include/uapi/ub/urma/udma/udma_abi.h | 178 ++++++++++ include/ub/urma/udma/udma_ctl.h | 213 ++++++++++++ 17 files changed, 1819 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/urma/hw/udma/udma_ctl.h create mode 100644 drivers/ub/urma/hw/udma/udma_ctx.c create mode 100644 drivers/ub/urma/hw/udma/udma_ctx.h create mode 100644 drivers/ub/urma/hw/udma/udma_db.c create mode 100644 drivers/ub/urma/hw/udma/udma_db.h create mode 100644 drivers/ub/urma/hw/udma/udma_jetty.h create mode 100644 drivers/ub/urma/hw/udma/udma_jfs.h create mode 100644 drivers/ub/urma/hw/udma/udma_tid.c create mode 100644 drivers/ub/urma/hw/udma/udma_tid.h create mode 100644 include/uapi/ub/urma/udma/udma_abi.h create mode 100644 include/ub/urma/udma/udma_ctl.h diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 2cd71b916ec9..0561b3f85191 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ -udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o \ - udma_rct.o +udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ + udma_rct.o udma_tid.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index cb8e6b6f4e90..d313e1d17443 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -12,10 +12,403 @@ #include #include #include +#include #include "udma_dev.h" #include "udma_cmd.h" #include "udma_common.h" +static int udma_verify_input(struct udma_umem_param *param) +{ + struct udma_dev *udma_dev = to_udma_dev(param->ub_dev); + + if (((param->va + param->len) < param->va) || + PAGE_ALIGN(param->va + param->len) < (param->va + param->len)) { + dev_err(udma_dev->dev, "invalid pin_page param, len=%llu.\n", + param->len); + return -EINVAL; + } + return 0; +} + +static void udma_fill_umem(struct ubcore_umem *umem, struct udma_umem_param *param) +{ + umem->ub_dev = param->ub_dev; + umem->va = param->va; + umem->length = param->len; + umem->flag = param->flag; +} + +static struct scatterlist *udma_sg_set_page(struct scatterlist *sg_start, + int pinned, struct page **page_list) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sg_start, sg, pinned, i) + sg_set_page(sg, page_list[i], PAGE_SIZE, 0); + + return sg; +} + +static int udma_pin_pages(uint64_t cur_base, uint64_t npages, + uint32_t gup_flags, struct page **page_list) +{ + return pin_user_pages_fast(cur_base, min_t(unsigned long, (unsigned long)npages, + PAGE_SIZE / sizeof(struct page *)), + gup_flags | FOLL_LONGTERM, page_list); +} + +static uint64_t udma_pin_all_pages(struct udma_dev *udma_dev, struct ubcore_umem *umem, + uint64_t npages, uint32_t gup_flags, + struct page **page_list) +{ + struct scatterlist *sg_list_start = umem->sg_head.sgl; + uint64_t cur_base = umem->va & PAGE_MASK; + uint64_t page_count = npages; + int pinned; + + while (page_count != 0) { + cond_resched(); + pinned = udma_pin_pages(cur_base, page_count, gup_flags, page_list); + if (pinned <= 0) { + dev_err(udma_dev->dev, "failed to pin_user_pages_fast, page_count: %llu, pinned: %d.\n", + page_count, pinned); + return npages - page_count; + } + cur_base += (uint64_t)pinned * PAGE_SIZE; + page_count -= (uint64_t)pinned; + sg_list_start = udma_sg_set_page(sg_list_start, pinned, page_list); + } + return npages; +} + +static uint64_t udma_k_pin_pages(struct udma_dev *dev, struct ubcore_umem *umem, + uint64_t npages) +{ + struct scatterlist *sg_cur = umem->sg_head.sgl; + uint64_t cur_base = umem->va & PAGE_MASK; + struct page *pg; + uint64_t pinned; + + for (pinned = 0; pinned < npages; pinned++) { + if (is_vmalloc_addr((void *)(uintptr_t)cur_base)) + pg = vmalloc_to_page((void *)(uintptr_t)cur_base); + else + pg = kmap_to_page((void *)(uintptr_t)cur_base); + if (!pg) { + dev_err(dev->dev, "vmalloc or kmap to page failed.\n"); + break; + } + get_page(pg); + + cur_base += PAGE_SIZE; + + sg_set_page(sg_cur, pg, PAGE_SIZE, 0); + sg_cur = sg_next(sg_cur); + } + + return pinned; +} + +static void udma_unpin_pages(struct ubcore_umem *umem, uint64_t nents, bool is_kernel) +{ + struct scatterlist *sg; + uint32_t i; + + for_each_sg(umem->sg_head.sgl, sg, nents, i) { + struct page *page = sg_page(sg); + + if (is_kernel) + put_page(page); + else + unpin_user_page(page); + } +} + +static struct ubcore_umem *udma_get_target_umem(struct udma_umem_param *param, + struct page **page_list) +{ + struct udma_dev *udma_dev = to_udma_dev(param->ub_dev); + struct ubcore_umem *umem; + uint32_t gup_flags; + uint64_t npages; + uint64_t pinned; + int ret = 0; + + umem = kzalloc(sizeof(*umem), GFP_KERNEL); + if (umem == 0) { + ret = -ENOMEM; + goto out; + } + + udma_fill_umem(umem, param); + + npages = udma_cal_npages(umem->va, umem->length); + if (npages == 0 || npages > UINT_MAX) { + dev_err(udma_dev->dev, + "Invalid npages %llu in getting target umem process.\n", npages); + ret = -EINVAL; + goto umem_kfree; + } + + ret = sg_alloc_table(&umem->sg_head, (unsigned int)npages, GFP_KERNEL); + if (ret) + goto umem_kfree; + + if (param->is_kernel) { + pinned = udma_k_pin_pages(udma_dev, umem, npages); + } else { + gup_flags = (param->flag.bs.writable == 1) ? FOLL_WRITE : 0; + pinned = udma_pin_all_pages(udma_dev, umem, npages, gup_flags, page_list); + } + if (pinned != npages) { + ret = -ENOMEM; + goto umem_release; + } + + goto out; + +umem_release: + udma_unpin_pages(umem, pinned, param->is_kernel); + sg_free_table(&umem->sg_head); +umem_kfree: + kfree(umem); +out: + return ret != 0 ? ERR_PTR(ret) : umem; +} + +struct ubcore_umem *udma_umem_get(struct udma_umem_param *param) +{ + struct ubcore_umem *umem; + struct page **page_list; + int ret; + + ret = udma_verify_input(param); + if (ret < 0) + return ERR_PTR(ret); + + page_list = (struct page **) __get_free_page(GFP_KERNEL); + if (page_list == 0) + return ERR_PTR(-ENOMEM); + + umem = udma_get_target_umem(param, page_list); + + free_page((uintptr_t)page_list); + + return umem; +} + +int pin_queue_addr(struct udma_dev *dev, uint64_t addr, uint32_t len, + struct udma_buf *buf) +{ + struct ubcore_device *ub_dev = &dev->ub_dev; + struct udma_umem_param param; + + param.ub_dev = ub_dev; + param.va = addr; + param.len = len; + param.flag.bs.writable = 1; + param.flag.bs.non_pin = 0; + param.is_kernel = false; + + buf->umem = udma_umem_get(¶m); + if (IS_ERR(buf->umem)) { + dev_err(dev->dev, "failed to pin queue addr.\n"); + return PTR_ERR(buf->umem); + } + + buf->addr = addr; + + return 0; +} + +void unpin_queue_addr(struct ubcore_umem *umem) +{ + udma_umem_release(umem, false); +} + +void udma_umem_release(struct ubcore_umem *umem, bool is_kernel) +{ + if (IS_ERR_OR_NULL(umem)) + return; + + udma_unpin_pages(umem, umem->sg_head.nents, is_kernel); + sg_free_table(&umem->sg_head); + kfree(umem); +} + +int udma_id_alloc_auto_grow(struct udma_dev *udma_dev, struct udma_ida *ida_table, + uint32_t *idx) +{ + int id; + + spin_lock(&ida_table->lock); + id = ida_alloc_range(&ida_table->ida, ida_table->next, ida_table->max, + GFP_ATOMIC); + if (id < 0) { + id = ida_alloc_range(&ida_table->ida, ida_table->min, ida_table->max, + GFP_ATOMIC); + if (id < 0) { + dev_err(udma_dev->dev, "failed to alloc id, ret = %d.\n", id); + spin_unlock(&ida_table->lock); + return id; + } + } + + ida_table->next = (uint32_t)id + 1 > ida_table->max ? + ida_table->min : (uint32_t)id + 1; + + *idx = (uint32_t)id; + spin_unlock(&ida_table->lock); + + return 0; +} + +int udma_id_alloc(struct udma_dev *udma_dev, struct udma_ida *ida_table, + uint32_t *idx) +{ + int id; + + id = ida_alloc_range(&ida_table->ida, ida_table->min, ida_table->max, + GFP_ATOMIC); + if (id < 0) { + dev_err(udma_dev->dev, "failed to alloc id, ret = %d.\n", id); + return id; + } + + *idx = (uint32_t)id; + + return 0; +} + +int udma_specify_adv_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, + uint32_t user_id) +{ + uint32_t id_bit_idx = (user_id - bitmap_table->min); + uint32_t bit_idx = id_bit_idx % NUM_JETTY_PER_GROUP; + uint32_t block = id_bit_idx / NUM_JETTY_PER_GROUP; + uint32_t *bit = bitmap_table->bit; + + spin_lock(&bitmap_table->lock); + if ((bit[block] & (1U << bit_idx)) == 0) { + dev_err(udma_dev->dev, + "user specify id %u been used.\n", user_id); + spin_unlock(&bitmap_table->lock); + return -ENOMEM; + } + + bit[block] &= ~(1U << bit_idx); + spin_unlock(&bitmap_table->lock); + + return 0; +} + +static int udma_adv_jetty_id_alloc(struct udma_dev *udma_dev, uint32_t *bit, + uint32_t next_bit, uint32_t start_idx, + struct udma_group_bitmap *bitmap_table) +{ + uint32_t bit_idx; + + bit_idx = find_next_bit((unsigned long *)bit, NUM_JETTY_PER_GROUP, next_bit); + if (bit_idx == NUM_JETTY_PER_GROUP) { + dev_err(udma_dev->dev, + "jid is larger than n_bits, bit=0x%x.\n", *bit); + return -ENOMEM; + } + + start_idx += bit_idx; + if (start_idx >= bitmap_table->n_bits) { + dev_err(udma_dev->dev, + "jid is larger than n_bits, id=%u, n_bits=%u.\n", + start_idx, bitmap_table->n_bits); + return -ENOMEM; + } + + *bit &= ~(1U << bit_idx); + return start_idx + bitmap_table->min; +} + +int udma_adv_id_alloc(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, + uint32_t *start_idx, bool is_grp, uint32_t next) +{ + uint32_t next_block = (next - bitmap_table->min) / NUM_JETTY_PER_GROUP; + uint32_t next_bit = (next - bitmap_table->min) % NUM_JETTY_PER_GROUP; + uint32_t bitmap_cnt = bitmap_table->bitmap_cnt; + uint32_t *bit = bitmap_table->bit; + uint32_t i; + int ret; + + spin_lock(&bitmap_table->lock); + + for (i = next_block; + (i < bitmap_cnt && bit[i] == 0) || + (i == next_block && + ((bit[i] & GENMASK(NUM_JETTY_PER_GROUP - 1, next_bit)) == 0)); ++i) + ; + + if (i == bitmap_cnt) { + dev_err(udma_dev->dev, + "all bitmaps have been used, bitmap_cnt = %u.\n", + bitmap_cnt); + spin_unlock(&bitmap_table->lock); + return -ENOMEM; + } + + if (!is_grp) { + ret = udma_adv_jetty_id_alloc(udma_dev, bit + i, next_bit, + i * NUM_JETTY_PER_GROUP, bitmap_table); + + spin_unlock(&bitmap_table->lock); + if (ret >= 0) { + *start_idx = (uint32_t)ret; + return 0; + } + return ret; + } + + for (; i < bitmap_cnt && ~bit[i] != 0; ++i) + ; + if (i == bitmap_cnt || + (i + 1) * NUM_JETTY_PER_GROUP > bitmap_table->n_bits) { + dev_err(udma_dev->dev, + "no completely bitmap for Jetty group.\n"); + spin_unlock(&bitmap_table->lock); + return -ENOMEM; + } + + bit[i] = 0; + *start_idx = i * NUM_JETTY_PER_GROUP + bitmap_table->min; + + spin_unlock(&bitmap_table->lock); + + return 0; +} + +void udma_adv_id_free(struct udma_group_bitmap *bitmap_table, uint32_t start_idx, + bool is_grp) +{ + uint32_t bitmap_num; + uint32_t bit_num; + + start_idx -= bitmap_table->min; + spin_lock(&bitmap_table->lock); + + bitmap_num = start_idx / NUM_JETTY_PER_GROUP; + if (bitmap_num >= bitmap_table->bitmap_cnt) { + spin_unlock(&bitmap_table->lock); + return; + } + + if (is_grp) { + bitmap_table->bit[bitmap_num] = ~0U; + } else { + bit_num = start_idx % NUM_JETTY_PER_GROUP; + bitmap_table->bit[bitmap_num] |= (1U << bit_num); + } + + spin_unlock(&bitmap_table->lock); +} + static void udma_init_ida_table(struct udma_ida *ida_table, uint32_t max, uint32_t min) { ida_init(&ida_table->ida); @@ -77,6 +470,68 @@ void udma_destroy_eid_table(struct udma_dev *udma_dev) mutex_destroy(&udma_dev->eid_mutex); } +static struct ubcore_umem *udma_pin_k_addr(struct ubcore_device *ub_dev, uint64_t va, + uint64_t len) +{ + struct udma_umem_param param; + + param.ub_dev = ub_dev; + param.va = va; + param.len = len; + param.flag.bs.writable = true; + param.flag.bs.non_pin = 0; + param.is_kernel = true; + + return udma_umem_get(¶m); +} + +static void udma_unpin_k_addr(struct ubcore_umem *umem) +{ + udma_umem_release(umem, true); +} + +int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, + struct udma_buf *buf) +{ + size_t aligned_memory_size; + int ret; + + aligned_memory_size = memory_size + UDMA_HW_PAGE_SIZE - 1; + buf->aligned_va = vmalloc(aligned_memory_size); + if (!buf->aligned_va) { + dev_err(udma_dev->dev, + "failed to vmalloc kernel buf, size = %lu.", + aligned_memory_size); + return -ENOMEM; + } + + memset(buf->aligned_va, 0, aligned_memory_size); + buf->umem = udma_pin_k_addr(&udma_dev->ub_dev, (uint64_t)buf->aligned_va, + aligned_memory_size); + if (IS_ERR(buf->umem)) { + ret = PTR_ERR(buf->umem); + vfree(buf->aligned_va); + dev_err(udma_dev->dev, "pin kernel buf failed, ret = %d.\n", ret); + return ret; + } + + buf->addr = ((uint64_t)buf->aligned_va + UDMA_HW_PAGE_SIZE - 1) & + ~(UDMA_HW_PAGE_SIZE - 1); + buf->kva = (void *)(uintptr_t)buf->addr; + + return 0; +} + +void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, + struct udma_buf *buf) +{ + udma_unpin_k_addr(buf->umem); + vfree(buf->aligned_va); + buf->aligned_va = NULL; + buf->kva = NULL; + buf->addr = 0; +} + void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr) { struct iova_slot *slot; diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 4f843356c755..f3f32862db0a 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -8,6 +8,37 @@ #include #include "udma_dev.h" +struct udma_jetty_queue { + struct udma_buf buf; + void *kva_curr; + uint32_t id; + void __iomem *db_addr; + void __iomem *dwqe_addr; + uint32_t pi; + uint32_t ci; + uintptr_t *wrid; + spinlock_t lock; + uint32_t max_inline_size; + uint32_t max_sge_num; + uint32_t tid; + bool flush_flag; + uint32_t old_entry_idx; + enum ubcore_transport_mode trans_mode; + struct ubcore_tjetty *rc_tjetty; + bool is_jetty; + uint32_t sqe_bb_cnt; + uint32_t lock_free; /* Support kernel mode lock-free mode */ + uint32_t ta_timeout; /* ms */ + enum ubcore_jetty_state state; + bool non_pin; + struct udma_jetty_grp *jetty_grp; + enum udma_jetty_type jetty_type; +}; + +int pin_queue_addr(struct udma_dev *dev, uint64_t addr, + uint32_t len, struct udma_buf *buf); +void unpin_queue_addr(struct ubcore_umem *umem); + struct udma_umem_param { struct ubcore_device *ub_dev; uint64_t va; @@ -16,13 +47,21 @@ struct udma_umem_param { bool is_kernel; }; +struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); +void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name); void udma_destroy_eid_table(struct udma_dev *udma_dev); +int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); +static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) +{ + return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; +} #endif /* __UDMA_COMM_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctl.h b/drivers/ub/urma/hw/udma/udma_ctl.h new file mode 100644 index 000000000000..da1d082aeec4 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_ctl.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef _UB_UMDK_URMA_UDMA_UDMA_CTL_H_ +#define _UB_UMDK_URMA_UDMA_UDMA_CTL_H_ + +#include + +#define UDMA_BUS_INSTANCE_SEID_SIZE 4 +#define UDMA_EID_PAIRS_COUNT 8 + +union udma_k_jfs_flag { + struct { + uint32_t sq_cstm : 1; + uint32_t db_cstm : 1; + uint32_t db_ctl_cstm : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +struct udma_que_cfg_ex { + uint32_t buff_size; + void *buff; +}; + +struct udma_jfs_cstm_cfg { + struct udma_que_cfg_ex sq; /* PA; should be converted by phys_to_virt. */ +}; + +struct udma_jfs_cfg_ex { + struct ubcore_jfs_cfg base_cfg; + struct ubcore_udata udata; + struct udma_jfs_cstm_cfg cstm_cfg; + ubcore_event_callback_t jfae_handler; +}; + +struct udma_jfc_cstm_cfg { + struct udma_que_cfg_ex cq; /* PA; should be using stars hw register addr. */ +}; + +struct udma_jfc_cfg_ex { + struct ubcore_jfc_cfg base_cfg; + struct ubcore_udata udata; + struct udma_jfc_cstm_cfg cstm_cfg; + ubcore_comp_callback_t jfce_handler; + ubcore_event_callback_t jfae_handler; +}; + +enum udma_jfc_type { + UDMA_NORMAL_JFC_TYPE, + UDMA_STARS_JFC_TYPE, + UDMA_CCU_JFC_TYPE, + UDMA_KERNEL_STARS_JFC_TYPE, + UDMA_JFC_TYPE_NUM, +}; + +struct udma_set_cqe_ex { + uint64_t addr; + uint32_t len; + enum udma_jfc_type jfc_type; +}; + +struct udma_ue_info_ex { + uint16_t ue_id; + uint32_t chip_id; + uint32_t die_id; + uint32_t offset_len; + resource_size_t db_base_addr; + resource_size_t dwqe_addr; + resource_size_t register_base_addr; +}; + +struct udma_tp_sport_in { + uint32_t tpn; +}; + +struct udma_tp_sport_out { + uint32_t data_udp_srcport; + uint32_t ack_udp_srcport; +}; + +struct udma_cqe_info_in { + enum ubcore_cr_status status; + uint8_t s_r; +}; + +enum udma_cqe_aux_info_type { + TPP2TQEM_WR_CNT, + DEVICE_RAS_STATUS_2, + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TWP_AE_DFX_FOR_CQE, + PA_OUT_PKT_ERR_CNT, + TP_DAM_AXI_ALARM, + TP_DAM_VFT_BT_ALARM, + TP_EUM_AXI_ALARM, + TP_EUM_VFT_BT_ALARM, + TP_TPMM_AXI_ALARM, + TP_TPMM_VFT_BT_ALARM, + TP_TPGCM_AXI_ALARM, + TP_TPGCM_VFT_BT_ALARM, + TWP_ALM, + TP_RWP_INNER_ALM_FOR_CQE, + TWP_DFX21, + LQC_TA_RNR_TANACK_CNT, + FVT, + RQMT0, + RQMT1, + RQMT2, + RQMT3, + RQMT4, + RQMT5, + RQMT6, + RQMT7, + RQMT8, + RQMT9, + RQMT10, + RQMT11, + RQMT12, + RQMT13, + RQMT14, + RQMT15, + PROC_ERROR_ALM, + LQC_TA_TIMEOUT_TAACK_CNT, + TP_RRP_ERR_FLG_0_FOR_CQE, + MAX_CQE_AUX_INFO_TYPE_NUM +}; + +struct udma_cqe_aux_info_out { + enum udma_cqe_aux_info_type *aux_info_type; + uint32_t *aux_info_value; + uint32_t aux_info_num; +}; + +struct udma_ae_info_in { + uint32_t event_type; +}; + +enum udma_ae_aux_info_type { + TP_RRP_FLUSH_TIMER_PKT_CNT, + TPP_DFX5, + TWP_AE_DFX_FOR_AE, + TP_RRP_ERR_FLG_0_FOR_AE, + TP_RRP_ERR_FLG_1, + TP_RWP_INNER_ALM_FOR_AE, + TP_RCP_INNER_ALM_FOR_AE, + LQC_TA_TQEP_WQE_ERR, + LQC_TA_CQM_CQE_INNER_ALARM, + MAX_AE_AUX_INFO_TYPE_NUM +}; + +struct udma_ae_aux_info_out { + enum udma_ae_aux_info_type *aux_info_type; + uint32_t *aux_info_value; + uint32_t aux_info_num; +}; + +enum udma_user_ctl_opcode { + UDMA_USER_CTL_CREATE_JFS_EX, + UDMA_USER_CTL_DELETE_JFS_EX, + UDMA_USER_CTL_CREATE_JFC_EX, + UDMA_USER_CTL_DELETE_JFC_EX, + UDMA_USER_CTL_SET_CQE_ADDR, + UDMA_USER_CTL_QUERY_UE_INFO, + UDMA_USER_CTL_GET_DEV_RES_RATIO, + UDMA_USER_CTL_NPU_REGISTER_INFO_CB, + UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB, + UDMA_USER_CTL_QUERY_TP_SPORT, + UDMA_USER_CTL_QUERY_CQE_AUX_INFO, + UDMA_USER_CTL_QUERY_AE_AUX_INFO, + UDMA_USER_CTL_QUERY_UBMEM_INFO, + UDMA_USER_CTL_QUERY_PAIR_DEVNUM, + UDMA_USER_CTL_MAX, +}; + +struct udma_ctrlq_event_nb { + uint8_t opcode; + int (*crq_handler)(struct ubcore_device *dev, void *data, uint16_t len); +}; + +struct udma_dev_pair_info { + uint32_t peer_dev_id; + uint32_t slot_id; + uint32_t pair_num; + struct { + uint32_t local_eid[UDMA_BUS_INSTANCE_SEID_SIZE]; + uint32_t remote_eid[UDMA_BUS_INSTANCE_SEID_SIZE]; + uint32_t flag : 16; + uint32_t hop : 4; + uint32_t rsv : 12; + } eid_pairs[UDMA_EID_PAIRS_COUNT]; +}; + +static inline bool udma_check_base_param(uint64_t addr, uint32_t in_len, uint32_t len) +{ + return (addr == 0 || in_len != len); +} + +typedef int (*udma_user_ctl_ops)(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +int udma_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ctl); +int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); +int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +#endif /* _UB_UMDK_URMA_UDMA_UDMA_CTL_H_ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c new file mode 100644 index 000000000000..985abb19929a --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include "udma_jfs.h" +#include "udma_jetty.h" +#include "udma_ctrlq_tp.h" +#include "udma_ctx.h" + +static int udma_init_ctx_resp(struct udma_dev *dev, struct ubcore_udrv_priv *udrv_data) +{ + struct udma_create_ctx_resp resp; + unsigned long byte; + + if (!udrv_data->out_addr || + udrv_data->out_len < sizeof(resp)) { + dev_err(dev->dev, + "Invalid ctx resp out: len %d or addr is invalid.\n", + udrv_data->out_len); + return -EINVAL; + } + + resp.cqe_size = dev->caps.cqe_size; + resp.dwqe_enable = !!(dev->caps.feature & UDMA_CAP_FEATURE_DIRECT_WQE); + resp.reduce_enable = !!(dev->caps.feature & UDMA_CAP_FEATURE_REDUCE); + resp.ue_id = dev->ue_id; + resp.chip_id = dev->chip_id; + resp.die_id = dev->die_id; + resp.dump_aux_info = dump_aux_info; + resp.jfr_sge = dev->caps.jfr_sge; + + byte = copy_to_user((void *)(uintptr_t)udrv_data->out_addr, &resp, + (uint32_t)sizeof(resp)); + if (byte) { + dev_err(dev->dev, + "copy ctx resp to user failed, byte = %lu.\n", byte); + return -EFAULT; + } + + return 0; +} + +struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, + uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data) +{ + struct udma_dev *dev = to_udma_dev(ub_dev); + struct udma_context *ctx; + int ret; + + ctx = kzalloc(sizeof(struct udma_context), GFP_KERNEL); + if (ctx == NULL) + return NULL; + + ctx->sva = ummu_sva_bind_device(dev->dev, current->mm, NULL); + if (!ctx->sva) { + dev_err(dev->dev, "SVA failed to bind device.\n"); + goto err_free_ctx; + } + + ret = ummu_get_tid(dev->dev, ctx->sva, &ctx->tid); + if (ret) { + dev_err(dev->dev, "Failed to get tid.\n"); + goto err_unbind_dev; + } + + ctx->dev = dev; + INIT_LIST_HEAD(&ctx->pgdir_list); + mutex_init(&ctx->pgdir_mutex); + + ret = udma_init_ctx_resp(dev, udrv_data); + if (ret) { + dev_err(dev->dev, "Init ctx resp failed.\n"); + goto err_init_ctx_resp; + } + + return &ctx->base; + +err_init_ctx_resp: + mutex_destroy(&ctx->pgdir_mutex); +err_unbind_dev: + ummu_sva_unbind_device(ctx->sva); +err_free_ctx: + kfree(ctx); + return NULL; +} + +int udma_free_ucontext(struct ubcore_ucontext *ucontext) +{ + struct udma_dev *udma_dev = to_udma_dev(ucontext->ub_dev); + struct udma_context *ctx; + int ret; + + ctx = to_udma_context(ucontext); + + ret = ummu_core_invalidate_cfg_table(ctx->tid); + if (ret) + dev_err(udma_dev->dev, "invalidate cfg_table failed, ret=%d.\n", ret); + + mutex_destroy(&ctx->pgdir_mutex); + ummu_sva_unbind_device(ctx->sva); + + kfree(ctx); + + return 0; +} + +int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) +{ +#define JFC_DB_UNMAP_BOUND 1 + struct udma_dev *udma_dev = to_udma_dev(uctx->ub_dev); + struct ubcore_ucontext *jetty_uctx; + struct udma_jetty_queue *sq; + resource_size_t db_addr; + uint64_t address; + uint64_t j_id; + uint32_t cmd; + + if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { + dev_err(udma_dev->dev, + "mmap failed, unexpected vm area size.\n"); + return -EINVAL; + } + + db_addr = udma_dev->db_base; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + cmd = get_mmap_cmd(vma); + switch (cmd) { + case UDMA_MMAP_JFC_PAGE: + if (io_remap_pfn_range(vma, vma->vm_start, + jfc_arm_mode > JFC_DB_UNMAP_BOUND ? + (uint64_t)db_addr >> PAGE_SHIFT : + page_to_pfn(udma_dev->db_page), + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + break; + case UDMA_MMAP_JETTY_DSQE: + j_id = get_mmap_idx(vma); + xa_lock(&udma_dev->jetty_table.xa); + sq = xa_load(&udma_dev->jetty_table.xa, j_id); + if (!sq) { + dev_err(udma_dev->dev, + "mmap failed, j_id: %llu not exist\n", j_id); + xa_unlock(&udma_dev->jetty_table.xa); + return -EINVAL; + } + + if (sq->is_jetty) + jetty_uctx = to_udma_jetty_from_queue(sq)->ubcore_jetty.uctx; + else + jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; + + if (jetty_uctx != uctx) { + dev_err(udma_dev->dev, + "mmap failed, j_id: %llu, uctx invalid\n", j_id); + xa_unlock(&udma_dev->jetty_table.xa); + return -EINVAL; + } + xa_unlock(&udma_dev->jetty_table.xa); + + address = (uint64_t)db_addr + JETTY_DSQE_OFFSET + j_id * UDMA_HW_PAGE_SIZE; + + if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + break; + default: + dev_err(udma_dev->dev, + "mmap failed, cmd(%u) not support\n", cmd); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctx.h b/drivers/ub/urma/hw/udma/udma_ctx.h new file mode 100644 index 000000000000..a93aab94c1e9 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_ctx.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_CTX_H__ +#define __UDMA_CTX_H__ + +#include +#include +#include "udma_dev.h" + +struct udma_context { + struct ubcore_ucontext base; + struct udma_dev *dev; + uint32_t uasid; + struct list_head pgdir_list; + struct mutex pgdir_mutex; + struct iommu_sva *sva; + uint32_t tid; +}; + +static inline struct udma_context *to_udma_context(struct ubcore_ucontext *uctx) +{ + return container_of(uctx, struct udma_context, base); +} + +static inline uint64_t get_mmap_idx(struct vm_area_struct *vma) +{ + return (vma->vm_pgoff >> MAP_INDEX_SHIFT) & MAP_INDEX_MASK; +} + +static inline int get_mmap_cmd(struct vm_area_struct *vma) +{ + return (vma->vm_pgoff & MAP_COMMAND_MASK); +} + +struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, + uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data); +int udma_free_ucontext(struct ubcore_ucontext *ucontext); +int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma); + +#endif /* __UDMA_CTX_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_db.c b/drivers/ub/urma/hw/udma/udma_db.c new file mode 100644 index 000000000000..ea7b5d98ee6b --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_db.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include "udma_common.h" +#include "udma_db.h" + +int udma_pin_sw_db(struct udma_context *ctx, struct udma_sw_db *db) +{ + uint64_t page_addr = db->db_addr & PAGE_MASK; + struct udma_sw_db_page *page; + struct udma_umem_param param; + uint32_t offset = 0; + int ret = 0; + + param.ub_dev = &ctx->dev->ub_dev; + param.va = page_addr; + param.len = PAGE_SIZE; + param.flag.bs.writable = 1; + param.flag.bs.non_pin = 0; + param.is_kernel = false; + offset = db->db_addr - page_addr; + + mutex_lock(&ctx->pgdir_mutex); + + list_for_each_entry(page, &ctx->pgdir_list, list) { + if (page->user_virt == page_addr) + goto found; + } + + page = kmalloc(sizeof(*page), GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto out; + } + + refcount_set(&page->refcount, 1); + page->user_virt = page_addr; + page->umem = udma_umem_get(¶m); + if (IS_ERR(page->umem)) { + ret = PTR_ERR(page->umem); + dev_err(ctx->dev->dev, "Failed to get umem, ret: %d.\n", ret); + kfree(page); + goto out; + } + + list_add(&page->list, &ctx->pgdir_list); + db->page = page; + db->virt_addr = (char *)sg_virt(page->umem->sg_head.sgl) + offset; + mutex_unlock(&ctx->pgdir_mutex); + return 0; +found: + db->page = page; + db->virt_addr = (char *)sg_virt(page->umem->sg_head.sgl) + offset; + refcount_inc(&page->refcount); +out: + mutex_unlock(&ctx->pgdir_mutex); + return ret; +} + +void udma_unpin_sw_db(struct udma_context *ctx, struct udma_sw_db *db) +{ + mutex_lock(&ctx->pgdir_mutex); + + if (refcount_dec_and_test(&db->page->refcount)) { + list_del(&db->page->list); + udma_umem_release(db->page->umem, false); + kfree(db->page); + } + + mutex_unlock(&ctx->pgdir_mutex); +} + +static int udma_alloc_db_from_page(struct udma_k_sw_db_page *page, + struct udma_sw_db *db, enum udma_db_type type) +{ + uint32_t index; + + index = find_first_bit(page->bitmap, page->num_db); + if (index >= page->num_db) + return -ENOMEM; + + clear_bit(index, page->bitmap); + + db->index = index; + db->kpage = page; + db->type = type; + db->db_addr = page->db_buf.addr + db->index * UDMA_DB_SIZE; + db->db_record = page->db_buf.kva + db->index * UDMA_DB_SIZE; + *db->db_record = 0; + + return 0; +} + +static struct udma_k_sw_db_page *udma_alloc_db_page(struct udma_dev *dev, + enum udma_db_type type) +{ + struct udma_k_sw_db_page *page; + int ret; + + page = kzalloc(sizeof(*page), GFP_KERNEL); + if (!page) + return NULL; + + page->num_db = PAGE_SIZE / UDMA_DB_SIZE; + + page->bitmap = bitmap_alloc(page->num_db, GFP_KERNEL); + if (!page->bitmap) { + dev_err(dev->dev, "Failed alloc db bitmap, db type is %u.\n", type); + goto err_bitmap; + } + + bitmap_fill(page->bitmap, page->num_db); + + ret = udma_k_alloc_buf(dev, PAGE_SIZE, &page->db_buf); + if (ret) { + dev_err(dev->dev, "Failed alloc db page buf, ret is %d.\n", ret); + goto err_kva; + } + + return page; +err_kva: + bitmap_free(page->bitmap); +err_bitmap: + kfree(page); + return NULL; +} + +int udma_alloc_sw_db(struct udma_dev *dev, struct udma_sw_db *db, + enum udma_db_type type) +{ + struct udma_k_sw_db_page *page; + int ret = 0; + + mutex_lock(&dev->db_mutex); + + list_for_each_entry(page, &dev->db_list[type], list) + if (!udma_alloc_db_from_page(page, db, type)) + goto out; + + page = udma_alloc_db_page(dev, type); + if (!page) { + ret = -ENOMEM; + dev_err(dev->dev, "Failed alloc sw db page db_type = %u\n", type); + goto out; + } + + list_add(&page->list, &dev->db_list[type]); + + /* This should never fail */ + (void)udma_alloc_db_from_page(page, db, type); +out: + mutex_unlock(&dev->db_mutex); + + return ret; +} + +void udma_free_sw_db(struct udma_dev *dev, struct udma_sw_db *db) +{ + mutex_lock(&dev->db_mutex); + + set_bit(db->index, db->kpage->bitmap); + + if (bitmap_full(db->kpage->bitmap, db->kpage->num_db)) { + udma_k_free_buf(dev, PAGE_SIZE, &db->kpage->db_buf); + bitmap_free(db->kpage->bitmap); + list_del(&db->kpage->list); + kfree(db->kpage); + db->kpage = NULL; + } + + mutex_unlock(&dev->db_mutex); +} diff --git a/drivers/ub/urma/hw/udma/udma_db.h b/drivers/ub/urma/hw/udma/udma_db.h new file mode 100644 index 000000000000..0fe018304149 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_db.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_DB_H__ +#define __UDMA_DB_H__ + +#include "udma_ctx.h" +#include "udma_dev.h" + +int udma_pin_sw_db(struct udma_context *ctx, struct udma_sw_db *db); +void udma_unpin_sw_db(struct udma_context *ctx, struct udma_sw_db *db); +int udma_alloc_sw_db(struct udma_dev *dev, struct udma_sw_db *db, + enum udma_db_type type); +void udma_free_sw_db(struct udma_dev *dev, struct udma_sw_db *db); + +#endif /* __UDMA_DB_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h index 14d747c3fb8f..c45c69cd0271 100644 --- a/drivers/ub/urma/hw/udma/udma_def.h +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -69,6 +69,13 @@ struct udma_caps { struct udma_tbl seid; }; +struct udma_sw_db_page { + struct list_head list; + struct ubcore_umem *umem; + uint64_t user_virt; + refcount_t refcount; +}; + struct udma_buf { dma_addr_t addr; union { @@ -85,6 +92,37 @@ struct udma_buf { struct mutex id_table_mutex; }; +struct udma_k_sw_db_page { + struct list_head list; + uint32_t num_db; + unsigned long *bitmap; + struct udma_buf db_buf; +}; + +struct udma_sw_db { + struct udma_sw_db_page *page; + struct udma_k_sw_db_page *kpage; + uint32_t index; + uint32_t type; + uint64_t db_addr; + uint32_t *db_record; + void *virt_addr; +}; + +struct udma_req_msg { + uint8_t dst_ue_idx; + uint8_t resp_code; + uint16_t rsv; + struct ubcore_req req; +}; + +struct udma_resp_msg { + uint8_t dst_ue_idx; + uint8_t resp_code; + uint16_t rsv; + struct ubcore_resp resp; +}; + enum num_elem_in_grp { NUM_TP_PER_GROUP = 16, NUM_JETTY_PER_GROUP = 32, diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 941dd2a0540e..a4df1f114d23 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -8,6 +8,11 @@ #include #include #include "udma_def.h" +#include +#include + +extern uint32_t jfc_arm_mode; +extern bool dump_aux_info; #define UBCORE_MAX_DEV_NAME 64 @@ -15,6 +20,8 @@ #define MAX_WQEBB_IN_SQE 4 +#define JETTY_DSQE_OFFSET 0x1000 + #define UDMA_HW_PAGE_SHIFT 12 #define UDMA_HW_PAGE_SIZE (1 << UDMA_HW_PAGE_SHIFT) @@ -97,6 +104,8 @@ struct udma_dev { struct mutex eid_mutex; uint32_t tid; struct iommu_sva *ksva; + struct list_head db_list[UDMA_DB_TYPE_NUM]; + struct mutex db_mutex; uint32_t status; uint32_t ue_num; uint32_t ue_id; @@ -136,6 +145,16 @@ static inline void udma_id_free(struct udma_ida *ida_table, int idx) ida_free(&ida_table->ida, idx); } +int udma_id_alloc_auto_grow(struct udma_dev *udma_dev, struct udma_ida *ida_table, + uint32_t *idx); +int udma_id_alloc(struct udma_dev *udma_dev, struct udma_ida *ida_table, + uint32_t *idx); +int udma_adv_id_alloc(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, + uint32_t *start_idx, bool is_grp, uint32_t next); +void udma_adv_id_free(struct udma_group_bitmap *bitmap_table, uint32_t start_idx, + bool is_grp); +int udma_specify_adv_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, + uint32_t user_id); void udma_destroy_tables(struct udma_dev *udma_dev); int udma_init_tables(struct udma_dev *udma_dev); int udma_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h new file mode 100644 index 000000000000..e213278bcca3 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_JETTY_H__ +#define __UDMA_JETTY_H__ + +#include "udma_common.h" + +struct udma_jetty { + struct ubcore_jetty ubcore_jetty; + struct udma_jfr *jfr; + struct udma_jetty_queue sq; + uint64_t jetty_addr; + refcount_t ae_refcount; + struct completion ae_comp; + bool pi_type; + bool ue_rx_closed; +}; + +static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) +{ + return container_of(jetty, struct udma_jetty, ubcore_jetty); +} + +static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queue *queue) +{ + return container_of(queue, struct udma_jetty, sq); +} + +#endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h new file mode 100644 index 000000000000..39a7b5d1bfc4 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_JFS_H__ +#define __UDMA_JFS_H__ + +#include "udma_common.h" + +struct udma_jfs { + struct ubcore_jfs ubcore_jfs; + struct udma_jetty_queue sq; + uint64_t jfs_addr; + refcount_t ae_refcount; + struct completion ae_comp; + uint32_t mode; + bool pi_type; + bool ue_rx_closed; +}; + +static inline struct udma_jfs *to_udma_jfs(struct ubcore_jfs *jfs) +{ + return container_of(jfs, struct udma_jfs, ubcore_jfs); +} + +static inline struct udma_jfs *to_udma_jfs_from_queue(struct udma_jetty_queue *queue) +{ + return container_of(queue, struct udma_jfs, sq); +} + +#endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 0224b6d248d0..77ade2c69f4a 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -18,12 +18,16 @@ #include #include "udma_dev.h" #include "udma_cmd.h" +#include "udma_ctx.h" #include "udma_rct.h" +#include "udma_tid.h" #include "udma_common.h" #include "udma_ctrlq_tp.h" bool is_rmmod; static DEFINE_MUTEX(udma_reset_mutex); +uint32_t jfc_arm_mode; +bool dump_aux_info; static const struct auxiliary_device_id udma_id_table[] = { { @@ -154,6 +158,11 @@ static struct ubcore_ops g_dev_ops = { .query_device_attr = udma_query_device_attr, .query_device_status = udma_query_device_status, .config_device = udma_config_device, + .alloc_ucontext = udma_alloc_ucontext, + .free_ucontext = udma_free_ucontext, + .mmap = udma_mmap, + .alloc_token_id = udma_alloc_tid, + .free_token_id = udma_free_tid, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) @@ -532,6 +541,7 @@ static int udma_init_dev_param(struct udma_dev *udma_dev) struct auxiliary_device *adev = udma_dev->comdev.adev; struct ubase_resource_space *mem_base = ubase_get_mem_base(adev); int ret; + int i; udma_dev->dev = adev->dev.parent; udma_dev->db_base = mem_base->addr_unmapped; @@ -553,11 +563,16 @@ static int udma_init_dev_param(struct udma_dev *udma_dev) dev_set_drvdata(&adev->dev, udma_dev); + mutex_init(&udma_dev->db_mutex); + for (i = 0; i < UDMA_DB_TYPE_NUM; i++) + INIT_LIST_HEAD(&udma_dev->db_list[i]); + return 0; } static void udma_uninit_dev_param(struct udma_dev *udma_dev) { + mutex_destroy(&udma_dev->db_mutex); dev_set_drvdata(&udma_dev->comdev.adev->dev, NULL); udma_destroy_tables(udma_dev); } @@ -890,3 +905,11 @@ static void __exit udma_exit(void) module_init(udma_init); module_exit(udma_exit); MODULE_LICENSE("GPL"); + +module_param(jfc_arm_mode, uint, 0444); +MODULE_PARM_DESC(jfc_arm_mode, + "Set the ARM mode of the JFC, default: 0(0:Always ARM, other: NO ARM."); + +module_param(dump_aux_info, bool, 0644); +MODULE_PARM_DESC(dump_aux_info, + "Set whether dump aux info, default: false(false:not print, true:print)"); diff --git a/drivers/ub/urma/hw/udma/udma_tid.c b/drivers/ub/urma/hw/udma/udma_tid.c new file mode 100644 index 000000000000..c5b5b9037162 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_tid.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include "udma_tid.h" + +static int udma_get_key_id_from_user(struct udma_dev *udma_dev, + struct ubcore_udata *udata, + struct udma_tid *udma_tid) +{ + unsigned long byte; + uint32_t tid; + + if (!udata->udrv_data || !udata->udrv_data->in_addr) { + dev_err(udma_dev->dev, "udrv_data or in_addr is null.\n"); + return -EINVAL; + } + + byte = copy_from_user(&tid, (void *)(uintptr_t)udata->udrv_data->in_addr, + min(udata->udrv_data->in_len, + (uint32_t)sizeof(tid))); + if (byte) { + dev_err(udma_dev->dev, "get user data failed, byte = %lu.\n", byte); + return -EFAULT; + } + + udma_tid->core_key_id.token_id = tid; + udma_tid->tid = tid >> UDMA_TID_SHIFT; + + return 0; +} + +static int udma_alloc_k_tid(struct udma_dev *udma_dev, + struct udma_tid *udma_tid) +{ + struct ummu_param param = {.mode = MAPT_MODE_TABLE}; + struct iommu_sva *ksva; + uint32_t tid; + int ret; + + ksva = ummu_ksva_bind_device(udma_dev->dev, ¶m); + if (!ksva) { + dev_err(udma_dev->dev, "ksva bind device failed.\n"); + return -ENOMEM; + } + + ret = ummu_get_tid(udma_dev->dev, ksva, &tid); + if (ret) { + dev_err(udma_dev->dev, "get tid from ummu failed, ret = %d.\n", ret); + goto err_get_tid; + } + + if (tid > UDMA_MAX_TID) { + dev_err(udma_dev->dev, "tid is overflow.\n"); + ret = -EINVAL; + goto err_get_tid; + } + + mutex_lock(&udma_dev->ksva_mutex); + ret = xa_err(__xa_store(&udma_dev->ksva_table, tid, ksva, GFP_KERNEL)); + mutex_unlock(&udma_dev->ksva_mutex); + if (ret) { + dev_err(udma_dev->dev, "save ksva failed, ret = %d.\n", ret); + goto err_get_tid; + } + + udma_tid->core_key_id.token_id = tid << UDMA_TID_SHIFT; + udma_tid->tid = tid; + udma_tid->kernel_mode = true; + + return ret; + +err_get_tid: + ummu_ksva_unbind_device(ksva); + + return ret; +} + +struct ubcore_token_id *udma_alloc_tid(struct ubcore_device *ub_dev, + union ubcore_token_id_flag flag, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_tid *udma_tid; + int ret; + + udma_tid = kzalloc(sizeof(*udma_tid), GFP_KERNEL); + if (!udma_tid) + return NULL; + + if (udata) { + ret = udma_get_key_id_from_user(udma_dev, udata, udma_tid); + if (ret) { + dev_err(udma_dev->dev, "get user key id failed, ret = %d.\n", ret); + goto err_get_key_id; + } + return &udma_tid->core_key_id; + } + + if (udma_alloc_k_tid(udma_dev, udma_tid)) + goto err_get_key_id; + + return &udma_tid->core_key_id; + +err_get_key_id: + kfree(udma_tid); + return NULL; +} + +int udma_free_tid(struct ubcore_token_id *token_id) +{ + struct udma_dev *udma_dev = to_udma_dev(token_id->ub_dev); + struct udma_tid *udma_tid = to_udma_tid(token_id); + struct iommu_sva *ksva; + int ret; + + ret = ummu_core_invalidate_cfg_table(udma_tid->tid); + if (ret) + dev_err(udma_dev->dev, "invalidate cfg_table failed, ret=%d.\n", ret); + + if (!udma_tid->kernel_mode) + goto out; + + mutex_lock(&udma_dev->ksva_mutex); + ksva = (struct iommu_sva *)xa_load(&udma_dev->ksva_table, udma_tid->tid); + if (!ksva) { + mutex_unlock(&udma_dev->ksva_mutex); + dev_warn(udma_dev->dev, + "unable to get ksva while free tid, token maybe is free.\n"); + goto out; + } + ummu_ksva_unbind_device(ksva); + __xa_erase(&udma_dev->ksva_table, udma_tid->tid); + mutex_unlock(&udma_dev->ksva_mutex); + +out: + kfree(udma_tid); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_tid.h b/drivers/ub/urma/hw/udma/udma_tid.h new file mode 100644 index 000000000000..72bf436fc23f --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_tid.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_TID_H__ +#define __UDMA_TID_H__ + +#include "udma_dev.h" + +struct udma_tid { + struct ubcore_token_id core_key_id; + bool kernel_mode; + uint32_t tid; +}; + +static inline struct udma_tid *to_udma_tid(struct ubcore_token_id *token_id) +{ + return container_of(token_id, struct udma_tid, core_key_id); +} + +struct ubcore_token_id *udma_alloc_tid(struct ubcore_device *dev, + union ubcore_token_id_flag flag, + struct ubcore_udata *udata); +int udma_free_tid(struct ubcore_token_id *token_id); + +#endif /* __UDMA_TID_H__ */ diff --git a/include/uapi/ub/urma/udma/udma_abi.h b/include/uapi/ub/urma/udma/udma_abi.h new file mode 100644 index 000000000000..02440d162c8d --- /dev/null +++ b/include/uapi/ub/urma/udma/udma_abi.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef _UAPI_UB_UMDK_URMA_UDMA_UDMA_ABI_H_ +#define _UAPI_UB_UMDK_URMA_UDMA_UDMA_ABI_H_ + +#include + +#define MAP_COMMAND_MASK 0xff +#define MAP_INDEX_MASK 0xffffff +#define MAP_INDEX_SHIFT 8 + +#define UDMA_SEGMENT_ACCESS_GUARD (1UL << 5) + +#define UDMA_CQE_COALESCE_SHIFT 10 +#define UDMA_CQE_COALESCE_CNT_MAX (1 << UDMA_CQE_COALESCE_SHIFT) + +#define UDMA_CQE_PERIOD_0 0 +#define UDMA_CQE_PERIOD_4 4 +#define UDMA_CQE_PERIOD_16 16 +#define UDMA_CQE_PERIOD_64 64 +#define UDMA_CQE_PERIOD_256 256 +#define UDMA_CQE_PERIOD_1024 1024 +#define UDMA_CQE_PERIOD_4096 4096 +#define UDMA_CQE_PERIOD_16384 16384 + +#define UDMA_JFC_HW_DB_OFFSET 0x40 + +#define UDMA_DOORBELL_OFFSET 0x80 + +#define UDMA_JETTY_DSQE_OFFSET 0x1000 + +#define UDMA_DB_SIZE 64U + +#define UDMA_SRC_IDX_SHIFT 16 +#define UDMA_IMM_DATA_SHIFT 32 +#define UDMA_JFC_DB_VALID_OWNER_M 1 +#define UDMA_ADDR_SHIFT 32 + +#define UDMA_INTER_ERR 1 +#define UDMA_CQE_DEFAULT_SUBSTATUS 0 + +#define UDMA_MAX_GRANT_SIZE 0xFFFFFFFFF000 + +#define UDMA_TID_SHIFT 8U +#define UDMA_MAX_TID 0xFFFFFU + +enum udma_jetty_type { + UDMA_CACHE_LOCK_DWQE_JETTY_TYPE, + UDMA_CCU_JETTY_TYPE, + UDMA_NORMAL_JETTY_TYPE, + UDMA_URMA_NORMAL_JETTY_TYPE, + UDMA_JETTY_TYPE_MAX +}; + +enum cr_direct { + CR_SEND, + CR_RECV, +}; + +enum cr_jetty { + CR_IS_NOT_JETTY, + CR_IS_JETTY, +}; + +struct udma_create_jetty_ucmd { + __aligned_u64 buf_addr; + __u32 buf_len; + __u32 jfr_id; + __aligned_u64 db_addr; + __aligned_u64 idx_addr; + __u32 idx_len; + __u32 sqe_bb_cnt; + __aligned_u64 jetty_addr; + __u32 pi_type : 1; + __u32 non_pin : 1; + __u32 rsv : 30; + __u32 jetty_type; + __aligned_u64 jfr_sleep_buf; + __u32 jfs_id; + __u32 rsv1; +}; + +struct udma_create_jfc_ucmd { + __aligned_u64 buf_addr; + __u32 buf_len; + __u32 mode; /* 0: normal, 1: user stars, 2: kernel stars */ + __aligned_u64 db_addr; +}; + +struct udma_create_ctx_resp { + __u32 cqe_size : 8; + __u32 dwqe_enable : 1; + __u32 reduce_enable : 1; + __u32 dump_aux_info : 1; + __u32 rsv : 21; + __u32 ue_id; + __u32 chip_id; + __u32 die_id; + __u32 jfr_sge; + __u32 rsv1; +}; + +struct udma_create_jfr_resp { + __u32 jfr_caps; + __u32 rsv; +}; + +enum db_mmap_type { + UDMA_MMAP_JFC_PAGE, + UDMA_MMAP_JETTY_DSQE, +}; + +enum { + UDMA_CQ_DB, + UDMA_CQ_ARM_DB, +}; + +struct udma_jfc_db { + __u32 ci : 24; + __u32 notify : 1; + __u32 arm_sn : 2; + __u32 type : 1; + __u32 rsv1 : 4; + __u32 jfcn : 20; + __u32 rsv2 : 12; +}; + +enum udma_db_type { + UDMA_JFR_TYPE_DB, + UDMA_JFC_TYPE_DB, + UDMA_JFR_PAYLOAD, + UDMA_DB_TYPE_NUM, +}; + +enum jfc_poll_state { + JFC_OK, + JFC_EMPTY, + JFC_POLL_ERR, +}; + +enum { + CQE_FOR_SEND, + CQE_FOR_RECEIVE, +}; + +enum { + UDMA_CQE_SUCCESS = 0x00, + UDMA_CQE_UNSUPPORTED_OPCODE = 0x01, + UDMA_CQE_LOCAL_OP_ERR = 0x02, + UDMA_CQE_REMOTE_OP_ERR = 0x03, + UDMA_CQE_TRANSACTION_RETRY_COUNTER_ERR = 0x04, + UDMA_CQE_TRANSACTION_ACK_TIMEOUT_ERR = 0x05, + UDMA_JETTY_WORK_REQUEST_FLUSH = 0x06, +}; + +enum { + UDMA_CQE_LOCAL_LENGTH_ERR = 0x01, + UDMA_CQE_LOCAL_ACCESS_ERR = 0x02, + UDMA_CQE_REM_RSP_LENGTH_ERR = 0x03, + UDMA_CQE_LOCAL_DATA_POISON = 0x04, +}; + +enum { + UDMA_CQE_REM_UNSUPPORTED_REQ_ERR = 0x01, + UDMA_CQE_REM_ACCESS_ERR = 0x02, + UDMA_CQE_REM_DATA_POISON = 0x04, +}; + +enum hw_cqe_opcode { + HW_CQE_OPC_SEND = 0x00, + HW_CQE_OPC_SEND_WITH_IMM = 0x01, + HW_CQE_OPC_SEND_WITH_INV = 0x02, + HW_CQE_OPC_WRITE_WITH_IMM = 0x03, + HW_CQE_OPC_ERR = 0xff, +}; + +#endif /* _UAPI_UB_UMDK_URMA_UDMA_UDMA_ABI_H_ */ diff --git a/include/ub/urma/udma/udma_ctl.h b/include/ub/urma/udma/udma_ctl.h new file mode 100644 index 000000000000..19898d33c14b --- /dev/null +++ b/include/ub/urma/udma/udma_ctl.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef _UB_UMDK_URMA_UDMA_UDMA_CTL_H_ +#define _UB_UMDK_URMA_UDMA_UDMA_CTL_H_ + +#include + +#define UDMA_BUS_INSTANCE_SEID_SIZE 4 +#define UDMA_EID_PAIRS_COUNT 8 + +union udma_k_jfs_flag { + struct { + uint32_t sq_cstm : 1; + uint32_t db_cstm : 1; + uint32_t db_ctl_cstm : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +struct udma_que_cfg_ex { + uint32_t buff_size; + void *buff; +}; + +struct udma_jfs_cstm_cfg { + struct udma_que_cfg_ex sq; /* PA; should be converted by phys_to_virt. */ +}; + +struct udma_jfs_cfg_ex { + struct ubcore_jfs_cfg base_cfg; + struct ubcore_udata udata; + struct udma_jfs_cstm_cfg cstm_cfg; + ubcore_event_callback_t jfae_handler; +}; + +struct udma_jfc_cstm_cfg { + struct udma_que_cfg_ex cq; /* PA; should be using stars hw register addr. */ +}; + +struct udma_jfc_cfg_ex { + struct ubcore_jfc_cfg base_cfg; + struct ubcore_udata udata; + struct udma_jfc_cstm_cfg cstm_cfg; + ubcore_comp_callback_t jfce_handler; + ubcore_event_callback_t jfae_handler; +}; + +enum udma_jfc_type { + UDMA_NORMAL_JFC_TYPE, + UDMA_STARS_JFC_TYPE, + UDMA_CCU_JFC_TYPE, + UDMA_KERNEL_STARS_JFC_TYPE, + UDMA_JFC_TYPE_NUM, +}; + +struct udma_set_cqe_ex { + uint64_t addr; + uint32_t len; + enum udma_jfc_type jfc_type; +}; + +struct udma_ue_info_ex { + uint16_t ue_id; + uint32_t chip_id; + uint32_t die_id; + uint32_t offset_len; + resource_size_t db_base_addr; + resource_size_t dwqe_addr; + resource_size_t register_base_addr; +}; + +struct udma_tp_sport_in { + uint32_t tpn; +}; + +struct udma_tp_sport_out { + uint32_t data_udp_srcport; + uint32_t ack_udp_srcport; +}; + +struct udma_cqe_info_in { + enum ubcore_cr_status status; + uint8_t s_r; +}; + +enum udma_cqe_aux_info_type { + TPP2TQEM_WR_CNT, + DEVICE_RAS_STATUS_2, + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TWP_AE_DFX_FOR_CQE, + PA_OUT_PKT_ERR_CNT, + TP_DAM_AXI_ALARM, + TP_DAM_VFT_BT_ALARM, + TP_EUM_AXI_ALARM, + TP_EUM_VFT_BT_ALARM, + TP_TPMM_AXI_ALARM, + TP_TPMM_VFT_BT_ALARM, + TP_TPGCM_AXI_ALARM, + TP_TPGCM_VFT_BT_ALARM, + TWP_ALM, + TP_RWP_INNER_ALM_FOR_CQE, + TWP_DFX21, + LQC_TA_RNR_TANACK_CNT, + FVT, + RQMT0, + RQMT1, + RQMT2, + RQMT3, + RQMT4, + RQMT5, + RQMT6, + RQMT7, + RQMT8, + RQMT9, + RQMT10, + RQMT11, + RQMT12, + RQMT13, + RQMT14, + RQMT15, + PROC_ERROR_ALM, + LQC_TA_TIMEOUT_TAACK_CNT, + TP_RRP_ERR_FLG_0_FOR_CQE, + MAX_CQE_AUX_INFO_TYPE_NUM +}; + +struct udma_cqe_aux_info_out { + enum udma_cqe_aux_info_type *aux_info_type; + uint32_t *aux_info_value; + uint32_t aux_info_num; +}; + +struct udma_ae_info_in { + uint32_t event_type; +}; + +enum udma_ae_aux_info_type { + TP_RRP_FLUSH_TIMER_PKT_CNT, + TPP_DFX5, + TWP_AE_DFX_FOR_AE, + TP_RRP_ERR_FLG_0_FOR_AE, + TP_RRP_ERR_FLG_1, + TP_RWP_INNER_ALM_FOR_AE, + TP_RCP_INNER_ALM_FOR_AE, + LQC_TA_TQEP_WQE_ERR, + LQC_TA_CQM_CQE_INNER_ALARM, + MAX_AE_AUX_INFO_TYPE_NUM +}; + +struct udma_ae_aux_info_out { + enum udma_ae_aux_info_type *aux_info_type; + uint32_t *aux_info_value; + uint32_t aux_info_num; +}; + +enum udma_user_ctl_opcode { + UDMA_USER_CTL_CREATE_JFS_EX, + UDMA_USER_CTL_DELETE_JFS_EX, + UDMA_USER_CTL_CREATE_JFC_EX, + UDMA_USER_CTL_DELETE_JFC_EX, + UDMA_USER_CTL_SET_CQE_ADDR, + UDMA_USER_CTL_QUERY_UE_INFO, + UDMA_USER_CTL_GET_DEV_RES_RATIO, + UDMA_USER_CTL_NPU_REGISTER_INFO_CB, + UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB, + UDMA_USER_CTL_QUERY_TP_SPORT, + UDMA_USER_CTL_QUERY_CQE_AUX_INFO, + UDMA_USER_CTL_QUERY_AE_AUX_INFO, + UDMA_USER_CTL_QUERY_UBMEM_INFO, + UDMA_USER_CTL_QUERY_PAIR_DEVNUM, + UDMA_USER_CTL_MAX, +}; + +struct udma_ctrlq_event_nb { + uint8_t opcode; + int (*crq_handler)(struct ubcore_device *dev, void *data, uint16_t len); +}; + +struct udma_dev_pair_info { + uint32_t peer_dev_id; + uint32_t slot_id; + uint32_t pair_num; + struct { + uint32_t local_eid[UDMA_BUS_INSTANCE_SEID_SIZE]; + uint32_t remote_eid[UDMA_BUS_INSTANCE_SEID_SIZE]; + uint32_t flag : 16; + uint32_t hop : 4; + uint32_t rsv : 12; + } eid_pairs[UDMA_EID_PAIRS_COUNT]; +}; + +static inline bool udma_check_base_param(uint64_t addr, uint32_t in_len, uint32_t len) +{ + return (addr == 0 || in_len != len); +} + +typedef int (*udma_user_ctl_ops)(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +int udma_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ctl); +int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); +int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +#endif /* _UB_UMDK_URMA_UDMA_UDMA_CTL_H_ */ -- Gitee From fef27a3d1407ec2a40ee7efa9ff05ac950073c97 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 18 Sep 2025 10:17:46 +0800 Subject: [PATCH 043/243] ub: udma: Register and unregister debugfs. commit a94ef92be17600ecdaa3f210265523a2b9049847 openEuler This patch adds the ability to register and unregister debugfs. In driver loading process, UDMA will register debugfs function. In driver unloading process, UDMA will unregister debugfs function. Signed-off-by: Wei Qin Signed-off-by: Chunzhi Hu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_debugfs.c | 262 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_debugfs.h | 83 ++++++++ drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 9 +- 5 files changed, 355 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/urma/hw/udma/udma_debugfs.c create mode 100644 drivers/ub/urma/hw/udma/udma_debugfs.h diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 0561b3f85191..416fff14aa6a 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ - udma_rct.o udma_tid.o + udma_rct.o udma_tid.o udma_debugfs.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_debugfs.c b/drivers/ub/urma/hw/udma/udma_debugfs.c new file mode 100644 index 000000000000..efc94a3d7e9e --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_debugfs.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include "udma_cmd.h" +#include "udma_debugfs.h" + +static struct dentry *g_udma_dbgfs_root; + +static struct udma_debugfs_file_info g_ta_dfx_mod[TA_MAX_SIZE] = { + {"mrd", RDONLY, UDMA_CMD_DEBUGFS_TA_INFO, UDMA_TA_MRD}, +}; + +static struct udma_debugfs_file_info g_tp_dfx_mod[TP_MAX_SIZE] = { + {"rxtx", RDONLY, UDMA_CMD_DEBUGFS_TP_INFO, UDMA_TP_RXTX}, +}; + +static void show_ta_mrd_dfx(struct udma_query_mrd_dfx *data) +{ + pr_info("****************** ta_mrd_dfx ******************\n"); + pr_info("mrd_dsqe_issue_cnt\t0x%08x\n", data->mrd_dsqe_issue_cnt); + pr_info("mrd_dsqe_exec_cnt\t0x%08x\n", data->mrd_dsqe_exec_cnt); + pr_info("mrd_dsqe_drop_cnt\t0x%08x\n", data->mrd_dsqe_drop_cnt); + pr_info("mrd_jfsdb_issue_cnt\t0x%08x\n", data->mrd_jfsdb_issue_cnt); + pr_info("mrd_jfsdb_exec_cnt\t0x%08x\n", data->mrd_jfsdb_exec_cnt); + pr_info("mrd_mb_issue_cnt\t\t0x%08x\n", data->mrd_mb_issue_cnt); + pr_info("mrd_mb_exec_cnt\t\t0x%08x\n", data->mrd_mb_exec_cnt); + pr_info("mrd_eqdb_issue_cnt\t0x%08x\n", data->mrd_eqdb_issue_cnt); + pr_info("mrd_mb_buff_full\t\t0x%08x\n", data->mrd_mb_buff_full); + pr_info("mrd_mb_buff_empty\t0x%08x\n", data->mrd_mb_buff_empty); + pr_info("mrd_mem_ecc_err_1b\t0x%08x\n", data->mrd_mem_ecc_err_1b); + pr_info("mrd_mem_ecc_1b_info\t0x%08x\n", data->mrd_mem_ecc_1b_info); + pr_info("mrd_mb_state\t\t0x%08x\n", data->mrd_mb_state); + pr_info("mrd_eqdb_exec_cnt\t0x%08x\n", data->mrd_eqdb_exec_cnt); + pr_info("****************** ta_mrd_dfx ******************\n"); +} + +static void show_tp_rxtx_dfx(struct udma_query_rxtx_dfx *data) +{ + pr_info("****************** tp_rxtx_dfx ******************\n"); + pr_info("tpp2_txdma_hdr_um_pkt_cnt\t0x%016llx\n", data->tpp2_txdma_hdr_um_pkt_cnt); + pr_info("tpp2_txdma_ctp_rm_pkt_cnt\t0x%016llx\n", data->tpp2_txdma_ctp_rm_pkt_cnt); + pr_info("tpp2_txdma_ctp_rc_pkt_cnt\t0x%016llx\n", data->tpp2_txdma_ctp_rc_pkt_cnt); + pr_info("tpp2_txdma_tp_rm_pkt_cnt\t\t0x%016llx\n", data->tpp2_txdma_tp_rm_pkt_cnt); + pr_info("tpp2_txdma_tp_rc_pkt_cnt\t\t0x%016llx\n", data->tpp2_txdma_tp_rc_pkt_cnt); + pr_info("rhp_glb_rm_pkt_cnt\t\t0x%016llx\n", data->rhp_glb_rm_pkt_cnt); + pr_info("rhp_glb_rc_pkt_cnt\t\t0x%016llx\n", data->rhp_glb_rc_pkt_cnt); + pr_info("rhp_clan_rm_pkt_cnt\t\t0x%016llx\n", data->rhp_clan_rm_pkt_cnt); + pr_info("rhp_clan_rc_pkt_cnt\t\t0x%016llx\n", data->rhp_clan_rc_pkt_cnt); + pr_info("rhp_ud_pkt_cnt\t\t\t0x%016llx\n", data->rhp_ud_pkt_cnt); + pr_info("****************** tp_rxtx_dfx ******************\n"); +} + +static int udma_query_mrd_dfx(struct file_private_data *private_data) +{ + struct udma_query_mrd_dfx out_regs; + struct ubase_cmd_buf in, out; + int ret; + + out_regs.sub_module = private_data->sub_opcode; + udma_fill_buf(&in, private_data->opcode, true, + sizeof(struct udma_query_mrd_dfx), &out_regs); + udma_fill_buf(&out, private_data->opcode, true, + sizeof(struct udma_query_mrd_dfx), &out_regs); + + ret = ubase_cmd_send_inout(private_data->udma_dev->comdev.adev, &in, &out); + if (ret) { + dev_err(private_data->udma_dev->dev, "failed to query mrd DFX, ret = %d.\n", ret); + return ret; + } + + show_ta_mrd_dfx(&out_regs); + + return 0; +} + +static int udma_query_rxtx_dfx(struct file_private_data *private_data) +{ + struct udma_query_rxtx_dfx out_regs; + struct ubase_cmd_buf in, out; + int ret; + + out_regs.sub_module = private_data->sub_opcode; + udma_fill_buf(&in, private_data->opcode, true, sizeof(struct udma_query_rxtx_dfx), + &out_regs); + udma_fill_buf(&out, private_data->opcode, true, + sizeof(struct udma_query_rxtx_dfx), &out_regs); + + ret = ubase_cmd_send_inout(private_data->udma_dev->comdev.adev, &in, &out); + if (ret) { + dev_err(private_data->udma_dev->dev, "failed to query rxtx DFX, ret = %d.\n", ret); + return ret; + } + + show_tp_rxtx_dfx(&out_regs); + + return 0; +} + +static inline int udma_debugfs_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + + return 0; +} + +static ssize_t udma_debugfs_read(struct file *filp, char __user *buf, + size_t size, loff_t *ppos) +{ + struct file_private_data *private_data = filp->private_data; + int ret; + + switch (private_data->sub_opcode) { + case UDMA_TA_MRD: + ret = udma_query_mrd_dfx(private_data); + break; + case UDMA_TP_RXTX: + ret = udma_query_rxtx_dfx(private_data); + break; + default: + dev_err(private_data->udma_dev->dev, "invalid type %u.\n", + private_data->sub_opcode); + return -EFAULT; + } + + return ret; +} + +static const struct file_operations udma_debugfs_rd_fops = { + .owner = THIS_MODULE, + .read = udma_debugfs_read, + .open = udma_debugfs_open, +}; + +static const uint16_t file_mod[FILE_MOD_SIZE] = { + 0200, 0400, +}; + +static int udma_debugfs_create_files(struct udma_dev *udma_dev, struct udma_dev_debugfs *dbgfs) +{ + struct file_private_data *private_data; + struct file_private_data *cur_p; + struct dentry *entry; + int i; + + private_data = kzalloc(sizeof(struct file_private_data) * (TA_MAX_SIZE + TP_MAX_SIZE), + GFP_KERNEL); + if (!private_data) + return -ENOMEM; + + for (i = 0; i < TA_MAX_SIZE; ++i) { + cur_p = private_data + i; + cur_p->udma_dev = udma_dev; + cur_p->opcode = g_ta_dfx_mod[i].opcode; + cur_p->sub_opcode = g_ta_dfx_mod[i].sub_opcode; + entry = debugfs_create_file(g_ta_dfx_mod[i].name, file_mod[g_ta_dfx_mod[i].fmod], + dbgfs->ta_root, cur_p, &udma_debugfs_rd_fops); + if (IS_ERR(entry)) { + dev_err(udma_dev->dev, "create %s failed.\n", g_ta_dfx_mod[i].name); + kfree(private_data); + return -EINVAL; + } + } + + for (i = 0; i < TP_MAX_SIZE; ++i) { + cur_p = private_data + i + TA_MAX_SIZE; + cur_p->udma_dev = udma_dev; + cur_p->opcode = g_tp_dfx_mod[i].opcode; + cur_p->sub_opcode = g_tp_dfx_mod[i].sub_opcode; + entry = debugfs_create_file(g_tp_dfx_mod[i].name, file_mod[g_tp_dfx_mod[i].fmod], + dbgfs->tp_root, cur_p, &udma_debugfs_rd_fops); + if (IS_ERR(entry)) { + dev_err(udma_dev->dev, "create %s failed.\n", g_tp_dfx_mod[i].name); + kfree(private_data); + return -EINVAL; + } + } + + dbgfs->private_data = private_data; + dbgfs->private_data_size = TA_MAX_SIZE + TP_MAX_SIZE; + + return 0; +} + +void udma_register_debugfs(struct udma_dev *udma_dev) +{ + struct udma_dev_debugfs *dbgfs; + + if (IS_ERR_OR_NULL(g_udma_dbgfs_root)) { + dev_err(udma_dev->dev, "Debugfs root path does not exist.\n"); + goto create_error; + } + + dbgfs = kzalloc(sizeof(*dbgfs), GFP_KERNEL); + if (!dbgfs) + goto create_error; + + dbgfs->root = debugfs_create_dir(udma_dev->dev_name, g_udma_dbgfs_root); + if (IS_ERR(dbgfs->root)) { + dev_err(udma_dev->dev, "Debugfs create dev path failed.\n"); + goto create_dev_error; + } + + dbgfs->ta_root = debugfs_create_dir("ta", dbgfs->root); + if (IS_ERR(dbgfs->ta_root)) { + dev_err(udma_dev->dev, "Debugfs create ta path failed.\n"); + goto create_path_error; + } + + dbgfs->tp_root = debugfs_create_dir("tp", dbgfs->root); + if (IS_ERR(dbgfs->tp_root)) { + dev_err(udma_dev->dev, "Debugfs create tp path failed.\n"); + goto create_path_error; + } + + if (udma_debugfs_create_files(udma_dev, dbgfs)) { + dev_err(udma_dev->dev, "Debugfs create files failed.\n"); + goto create_path_error; + } + + udma_dev->dbgfs = dbgfs; + + return; + +create_path_error: + debugfs_remove_recursive(dbgfs->root); +create_dev_error: + kfree(dbgfs); +create_error: + udma_dev->dbgfs = NULL; +} + +void udma_unregister_debugfs(struct udma_dev *udma_dev) +{ + if (IS_ERR_OR_NULL(g_udma_dbgfs_root)) + return; + + if (!udma_dev->dbgfs) + return; + + debugfs_remove_recursive(udma_dev->dbgfs->root); + kfree(udma_dev->dbgfs->private_data); + kfree(udma_dev->dbgfs); + udma_dev->dbgfs = NULL; +} + +void udma_init_debugfs(void) +{ + g_udma_dbgfs_root = debugfs_create_dir("udma", NULL); +} + +void udma_uninit_debugfs(void) +{ + debugfs_remove_recursive(g_udma_dbgfs_root); + g_udma_dbgfs_root = NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_debugfs.h b/drivers/ub/urma/hw/udma/udma_debugfs.h new file mode 100644 index 000000000000..d49440251dab --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_debugfs.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_DEBUGFS_H__ +#define __UDMA_DEBUGFS_H__ + +#include "udma_dev.h" +#include "udma_cmd.h" + +#define TA_MAX_SIZE 1 +#define TP_MAX_SIZE 1 +#define FILE_MOD_SIZE 2 + +enum udma_dfx_sub_opcode { + UDMA_TA_MRD, + UDMA_TP_RXTX, +}; + +enum udma_debugfs_file_mod { + RDONLY, +}; + +struct udma_debugfs_file_info { + const char *name; + enum udma_debugfs_file_mod fmod; + enum udma_cmd_opcode_type opcode; + enum udma_dfx_sub_opcode sub_opcode; +}; + +struct udma_query_rxtx_dfx { + uint32_t sub_module; + uint64_t tpp2_txdma_hdr_um_pkt_cnt; + uint64_t tpp2_txdma_ctp_rm_pkt_cnt; + uint64_t tpp2_txdma_ctp_rc_pkt_cnt; + uint64_t tpp2_txdma_tp_rm_pkt_cnt; + uint64_t tpp2_txdma_tp_rc_pkt_cnt; + uint64_t rhp_glb_rm_pkt_cnt; + uint64_t rhp_glb_rc_pkt_cnt; + uint64_t rhp_clan_rm_pkt_cnt; + uint64_t rhp_clan_rc_pkt_cnt; + uint64_t rhp_ud_pkt_cnt; + uint32_t rsvd[16]; +}; + +struct udma_query_mrd_dfx { + uint32_t sub_module; + uint32_t mrd_dsqe_issue_cnt; + uint32_t mrd_dsqe_exec_cnt; + uint32_t mrd_dsqe_drop_cnt; + uint32_t mrd_jfsdb_issue_cnt; + uint32_t mrd_jfsdb_exec_cnt; + uint32_t mrd_mb_issue_cnt; + uint32_t mrd_mb_exec_cnt; + uint32_t mrd_eqdb_issue_cnt; + uint32_t mrd_mb_buff_full; + uint32_t mrd_mb_buff_empty; + uint32_t mrd_mem_ecc_err_1b; + uint32_t mrd_mem_ecc_1b_info; + uint32_t mrd_mb_state; + uint32_t mrd_eqdb_exec_cnt; + uint32_t rsvd[7]; +}; + +struct udma_dev_debugfs { + struct dentry *root; + struct dentry *ta_root; + struct dentry *tp_root; + struct file_private_data *private_data; + uint32_t private_data_size; +}; + +struct file_private_data { + struct udma_dev *udma_dev; + enum udma_cmd_opcode_type opcode; + enum udma_dfx_sub_opcode sub_opcode; +}; + +void udma_init_debugfs(void); +void udma_uninit_debugfs(void); +void udma_unregister_debugfs(struct udma_dev *udma_dev); +void udma_register_debugfs(struct udma_dev *udma_dev); + +#endif /* __UDMA_DEBUGFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index a4df1f114d23..c12e390c962a 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -107,6 +107,7 @@ struct udma_dev { struct list_head db_list[UDMA_DB_TYPE_NUM]; struct mutex db_mutex; uint32_t status; + struct udma_dev_debugfs *dbgfs; uint32_t ue_num; uint32_t ue_id; struct page *db_page; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 77ade2c69f4a..15088884080c 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -21,6 +21,7 @@ #include "udma_ctx.h" #include "udma_rct.h" #include "udma_tid.h" +#include "udma_debugfs.h" #include "udma_common.h" #include "udma_ctrlq_tp.h" @@ -788,6 +789,7 @@ static int udma_init_dev(struct auxiliary_device *adev) goto err_create; } + udma_register_debugfs(udma_dev); udma_dev->status = UDMA_NORMAL; mutex_unlock(&udma_reset_mutex); dev_info(udma_dev->dev, "init udma successfully.\n"); @@ -820,6 +822,7 @@ void udma_reset_down(struct auxiliary_device *adev) udma_dev->status = UDMA_SUSPEND; + udma_unregister_debugfs(udma_dev); udma_unset_ubcore_dev(udma_dev); mutex_unlock(&udma_reset_mutex); } @@ -889,9 +892,12 @@ static int __init udma_init(void) { int ret; + udma_init_debugfs(); ret = auxiliary_driver_register(&udma_drv); - if (ret) + if (ret) { pr_err("failed to register auxiliary_driver\n"); + udma_uninit_debugfs(); + } return ret; } @@ -900,6 +906,7 @@ static void __exit udma_exit(void) { is_rmmod = true; auxiliary_driver_unregister(&udma_drv); + udma_uninit_debugfs(); } module_init(udma_init); -- Gitee From 1aea6167efb735ae8a33d1a47c4af727f9f53f8e Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 18 Sep 2025 10:25:32 +0800 Subject: [PATCH 044/243] ub: udma: Support register and unregister ae event. commit 73d92d483ecd28324f902da53838bb7c1e183a5f openEuler This patch adds the ability to register and unregister ta related ae event. In driver loading process, udma will register ae event function. In driver unloading process, udma will unregister ae event function. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_common.h | 11 + drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_eq.c | 281 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_eq.h | 10 + drivers/ub/urma/hw/udma/udma_jetty.h | 5 + drivers/ub/urma/hw/udma/udma_jfc.h | 35 ++++ drivers/ub/urma/hw/udma/udma_jfr.h | 43 ++++ drivers/ub/urma/hw/udma/udma_main.c | 28 ++- 9 files changed, 414 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/urma/hw/udma/udma_eq.c create mode 100644 drivers/ub/urma/hw/udma/udma_eq.h create mode 100644 drivers/ub/urma/hw/udma/udma_jfc.h create mode 100644 drivers/ub/urma/hw/udma/udma_jfr.h diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 416fff14aa6a..ac9f9885b1e8 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ - udma_rct.o udma_tid.o udma_debugfs.o + udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index f3f32862db0a..c6e83a0d84c3 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -8,6 +8,17 @@ #include #include "udma_dev.h" +struct udma_jetty_grp { + struct ubcore_jetty_group ubcore_jetty_grp; + uint32_t start_jetty_id; + uint32_t next_jetty_id; + uint32_t jetty_grp_id; + uint32_t valid; + struct mutex valid_lock; + refcount_t ae_refcount; + struct completion ae_comp; +}; + struct udma_jetty_queue { struct udma_buf buf; void *kva_curr; diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index c12e390c962a..cc073ec8d97d 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -96,6 +96,7 @@ struct udma_dev { struct xarray npu_nb_table; struct mutex npu_nb_mutex; struct xarray tpn_ue_idx_table; + struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; resource_size_t db_base; void __iomem *k_db_base; struct xarray ksva_table; diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c new file mode 100644 index 000000000000..8ac14585ec6c --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include +#include "udma_ctrlq_tp.h" +#include "udma_dev.h" +#include "udma_cmd.h" +#include "udma_jfs.h" +#include "udma_jfr.h" +#include "udma_jfc.h" +#include "udma_jetty.h" +#include +#include "udma_eq.h" + +static int udma_ae_jfs_check_err(struct auxiliary_device *adev, uint32_t queue_num) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubcore_jetty *ubcore_jetty; + struct udma_jetty_queue *udma_sq; + struct udma_jetty *udma_jetty; + struct ubcore_jfs *ubcore_jfs; + struct udma_jfs *udma_jfs; + struct ubcore_event ae; + + xa_lock(&udma_dev->jetty_table.xa); + udma_sq = (struct udma_jetty_queue *)xa_load(&udma_dev->jetty_table.xa, queue_num); + if (!udma_sq) { + dev_warn(udma_dev->dev, + "async event for bogus queue number = %u.\n", queue_num); + xa_unlock(&udma_dev->jetty_table.xa); + return -EINVAL; + } + + if (udma_sq->is_jetty) { + udma_jetty = to_udma_jetty_from_queue(udma_sq); + ubcore_jetty = &udma_jetty->ubcore_jetty; + if (ubcore_jetty->jfae_handler) { + refcount_inc(&udma_jetty->ae_refcount); + xa_unlock(&udma_dev->jetty_table.xa); + ae.ub_dev = ubcore_jetty->ub_dev; + ae.element.jetty = ubcore_jetty; + ae.event_type = UBCORE_EVENT_JETTY_ERR; + ubcore_jetty->jfae_handler(&ae, ubcore_jetty->uctx); + if (refcount_dec_and_test(&udma_jetty->ae_refcount)) + complete(&udma_jetty->ae_comp); + } else { + xa_unlock(&udma_dev->jetty_table.xa); + } + } else { + udma_jfs = to_udma_jfs_from_queue(udma_sq); + ubcore_jfs = &udma_jfs->ubcore_jfs; + if (ubcore_jfs->jfae_handler) { + refcount_inc(&udma_jfs->ae_refcount); + xa_unlock(&udma_dev->jetty_table.xa); + ae.ub_dev = ubcore_jfs->ub_dev; + ae.element.jfs = ubcore_jfs; + ae.event_type = UBCORE_EVENT_JFS_ERR; + ubcore_jfs->jfae_handler(&ae, ubcore_jfs->uctx); + if (refcount_dec_and_test(&udma_jfs->ae_refcount)) + complete(&udma_jfs->ae_comp); + } else { + xa_unlock(&udma_dev->jetty_table.xa); + } + } + + return 0; +} + +static int udma_ae_jfr_check_err(struct auxiliary_device *adev, uint32_t queue_num, + enum ubcore_event_type ubcore_etype) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubcore_jfr *ubcore_jfr; + struct udma_jfr *udma_jfr; + struct ubcore_event ae; + + xa_lock(&udma_dev->jfr_table.xa); + udma_jfr = (struct udma_jfr *)xa_load(&udma_dev->jfr_table.xa, queue_num); + if (!udma_jfr) { + dev_warn(udma_dev->dev, + "async event for bogus jfr number = %u.\n", queue_num); + xa_unlock(&udma_dev->jfr_table.xa); + return -EINVAL; + } + + ubcore_jfr = &udma_jfr->ubcore_jfr; + if (ubcore_jfr->jfae_handler) { + refcount_inc(&udma_jfr->ae_refcount); + xa_unlock(&udma_dev->jfr_table.xa); + ae.ub_dev = ubcore_jfr->ub_dev; + ae.element.jfr = ubcore_jfr; + ae.event_type = ubcore_etype; + ubcore_jfr->jfae_handler(&ae, ubcore_jfr->uctx); + if (refcount_dec_and_test(&udma_jfr->ae_refcount)) + complete(&udma_jfr->ae_comp); + } else { + xa_unlock(&udma_dev->jfr_table.xa); + } + + return 0; +} + +static int udma_ae_jfc_check_err(struct auxiliary_device *adev, uint32_t queue_num) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubcore_jfc *ubcore_jfc; + struct udma_jfc *udma_jfc; + struct ubcore_event ae; + unsigned long flags; + + xa_lock_irqsave(&udma_dev->jfc_table.xa, flags); + udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, queue_num); + if (!udma_jfc) { + dev_warn(udma_dev->dev, + "async event for bogus jfc number = %u.\n", queue_num); + xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); + return -EINVAL; + } + + ubcore_jfc = &udma_jfc->base; + if (ubcore_jfc->jfae_handler) { + refcount_inc(&udma_jfc->event_refcount); + xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); + ae.ub_dev = ubcore_jfc->ub_dev; + ae.element.jfc = ubcore_jfc; + ae.event_type = UBCORE_EVENT_JFC_ERR; + ubcore_jfc->jfae_handler(&ae, ubcore_jfc->uctx); + if (refcount_dec_and_test(&udma_jfc->event_refcount)) + complete(&udma_jfc->event_comp); + } else { + xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); + } + + return 0; +} + +static int udma_ae_jetty_group_check_err(struct auxiliary_device *adev, uint32_t queue_num) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubcore_jetty_group *ubcore_jetty_grp; + struct udma_jetty_grp *udma_jetty_grp; + struct ubcore_event ae; + + xa_lock(&udma_dev->jetty_grp_table.xa); + udma_jetty_grp = (struct udma_jetty_grp *)xa_load(&udma_dev->jetty_grp_table.xa, queue_num); + if (!udma_jetty_grp) { + dev_warn(udma_dev->dev, + "async event for bogus jetty group number = %u.\n", queue_num); + xa_unlock(&udma_dev->jetty_grp_table.xa); + return -EINVAL; + } + + ubcore_jetty_grp = &udma_jetty_grp->ubcore_jetty_grp; + if (ubcore_jetty_grp->jfae_handler) { + refcount_inc(&udma_jetty_grp->ae_refcount); + xa_unlock(&udma_dev->jetty_grp_table.xa); + ae.ub_dev = ubcore_jetty_grp->ub_dev; + ae.element.jetty_grp = ubcore_jetty_grp; + ae.event_type = UBCORE_EVENT_JETTY_GRP_ERR; + ubcore_jetty_grp->jfae_handler(&ae, ubcore_jetty_grp->uctx); + if (refcount_dec_and_test(&udma_jetty_grp->ae_refcount)) + complete(&udma_jetty_grp->ae_comp); + } else { + xa_unlock(&udma_dev->jetty_grp_table.xa); + } + + return 0; +} + +static int udma_ae_jetty_level_error(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct ubase_event_nb *ev_nb = container_of(nb, struct ubase_event_nb, nb); + struct auxiliary_device *adev = (struct auxiliary_device *)ev_nb->back; + struct ubase_aeq_notify_info *info = data; + uint32_t queue_num; + + queue_num = info->aeqe->event.queue_event.num; + + dev_warn(&adev->dev, + "trigger jetty level ae, event type is %d, sub type is %d, queue_num is %u.\n", + info->event_type, info->sub_type, queue_num); + + if (info->event_type == UBASE_EVENT_TYPE_JFR_LIMIT_REACHED) + return udma_ae_jfr_check_err(adev, queue_num, UBCORE_EVENT_JFR_LIMIT_REACHED); + + switch (info->sub_type) { + case UBASE_SUBEVENT_TYPE_JFS_CHECK_ERROR: + return udma_ae_jfs_check_err(adev, queue_num); + case UBASE_SUBEVENT_TYPE_JFR_CHECK_ERROR: + return udma_ae_jfr_check_err(adev, queue_num, UBCORE_EVENT_JFR_ERR); + case UBASE_SUBEVENT_TYPE_JFC_CHECK_ERROR: + return udma_ae_jfc_check_err(adev, queue_num); + case UBASE_SUBEVENT_TYPE_JETTY_GROUP_CHECK_ERROR: + return udma_ae_jetty_group_check_err(adev, queue_num); + default: + dev_warn(&adev->dev, + "udma get unsupported async event.\n"); + return -EINVAL; + } +} + +struct ae_operation { + uint32_t op_code; + notifier_fn_t call; +}; + +static struct ae_operation udma_ae_opts[] = { + {UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR, udma_ae_jetty_level_error}, + {UBASE_EVENT_TYPE_JFR_LIMIT_REACHED, udma_ae_jetty_level_error}, +}; + +void udma_unregister_ae_event(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + int i; + + for (i = 0; i < UBASE_EVENT_TYPE_MAX; i++) { + if (udma_dev->ae_event_addr[i]) { + ubase_event_unregister(adev, udma_dev->ae_event_addr[i]); + kfree(udma_dev->ae_event_addr[i]); + udma_dev->ae_event_addr[i] = NULL; + } + } +} + +static int +udma_event_register(struct auxiliary_device *adev, enum ubase_event_type event_type, + int (*call)(struct notifier_block *nb, + unsigned long action, void *data)) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubase_event_nb *cb; + int ret = 0; + + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; + + cb->drv_type = UBASE_DRV_UDMA; + cb->event_type = event_type; + cb->back = (void *)adev; + cb->nb.notifier_call = call; + + ret = ubase_event_register(adev, cb); + if (ret) { + dev_err(&adev->dev, + "failed to register async event, event type = %u, ret = %d.\n", + cb->event_type, ret); + kfree(cb); + return ret; + } + udma_dev->ae_event_addr[event_type] = cb; + + return 0; +} + +/* thanks to drivers/infiniband/hw/erdma/erdma_eq.c */ +int udma_register_ae_event(struct auxiliary_device *adev) +{ + uint32_t i, opt_num; + int ret; + + opt_num = sizeof(udma_ae_opts) / sizeof(struct ae_operation); + for (i = 0; i < opt_num; ++i) { + ret = udma_event_register(adev, udma_ae_opts[i].op_code, udma_ae_opts[i].call); + if (ret) { + udma_unregister_ae_event(adev); + break; + } + } + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_eq.h b/drivers/ub/urma/hw/udma/udma_eq.h new file mode 100644 index 000000000000..f771483e168f --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_eq.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_EQ_H__ +#define __UDMA_EQ_H__ + +int udma_register_ae_event(struct auxiliary_device *adev); +void udma_unregister_ae_event(struct auxiliary_device *adev); + +#endif /* __UDMA_EQ_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index e213278bcca3..00a3c41b39b6 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -22,6 +22,11 @@ static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) return container_of(jetty, struct udma_jetty, ubcore_jetty); } +static inline struct udma_jetty_grp *to_udma_jetty_grp(struct ubcore_jetty_group *jetty_grp) +{ + return container_of(jetty_grp, struct udma_jetty_grp, ubcore_jetty_grp); +} + static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queue *queue) { return container_of(queue, struct udma_jetty, sq); diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h new file mode 100644 index 000000000000..3841049c28a0 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_JFC_H__ +#define __UDMA_JFC_H__ + +#include "udma_dev.h" +#include "udma_ctx.h" + +struct udma_jfc { + struct ubcore_jfc base; + uint32_t jfcn; + uint32_t ceqn; + uint32_t tid; + struct udma_buf buf; + struct udma_sw_db db; + uint32_t ci; + uint32_t arm_sn; /* only kernel mode use */ + spinlock_t lock; + refcount_t event_refcount; + struct completion event_comp; + uint32_t lock_free; + uint32_t inline_en; + uint32_t mode; + uint64_t stars_chnl_addr; + bool stars_en; + uint32_t cq_shift; +}; + +static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) +{ + return container_of(jfc, struct udma_jfc, base); +} + +#endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h new file mode 100644 index 000000000000..cb1ecbaf3572 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_JFR_H__ +#define __UDMA_JFR_H__ + +#include "udma_dev.h" +#include "udma_ctx.h" +#include "udma_common.h" + +struct udma_jfr_idx_que { + struct udma_buf buf; + struct udma_table jfr_idx_table; +}; + +struct udma_jfr { + struct ubcore_jfr ubcore_jfr; + struct udma_jetty_queue rq; + struct udma_jfr_idx_que idx_que; + struct udma_sw_db sw_db; + struct udma_sw_db jfr_sleep_buf; + struct udma_context *udma_ctx; + uint32_t rx_threshold; + uint32_t wqe_cnt; + uint64_t jetty_addr; + enum ubcore_jfr_state state; + uint32_t max_sge; + spinlock_t lock; + refcount_t ae_refcount; + struct completion ae_comp; +}; + +static inline struct udma_jfr *to_udma_jfr(struct ubcore_jfr *jfr) +{ + return container_of(jfr, struct udma_jfr, ubcore_jfr); +} + +static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *queue) +{ + return container_of(queue, struct udma_jfr, rq); +} + +#endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 15088884080c..2088da125e0b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -17,6 +17,7 @@ #include #include #include "udma_dev.h" +#include "udma_eq.h" #include "udma_cmd.h" #include "udma_ctx.h" #include "udma_rct.h" @@ -726,6 +727,22 @@ static struct udma_dev *udma_create_dev(struct auxiliary_device *adev) return NULL; } +static int udma_register_event(struct auxiliary_device *adev) +{ + int ret; + + ret = udma_register_ae_event(adev); + if (ret) + return ret; + + return 0; +} + +static void udma_unregister_event(struct auxiliary_device *adev) +{ + udma_unregister_ae_event(adev); +} + static bool udma_is_need_probe(struct auxiliary_device *adev) { struct udma_dev *udma_dev; @@ -783,10 +800,14 @@ static int udma_init_dev(struct auxiliary_device *adev) if (!udma_dev) goto err_create; + ret = udma_register_event(adev); + if (ret) + goto err_event_register; + ret = udma_set_ubcore_dev(udma_dev); if (ret) { dev_err(udma_dev->dev, "failed to set ubcore dev, ret is %d.\n", ret); - goto err_create; + goto err_set_ubcore_dev; } udma_register_debugfs(udma_dev); @@ -796,6 +817,10 @@ static int udma_init_dev(struct auxiliary_device *adev) return 0; +err_set_ubcore_dev: + udma_unregister_event(adev); +err_event_register: + udma_destroy_dev(udma_dev); err_create: mutex_unlock(&udma_reset_mutex); @@ -824,6 +849,7 @@ void udma_reset_down(struct auxiliary_device *adev) udma_unregister_debugfs(udma_dev); udma_unset_ubcore_dev(udma_dev); + udma_unregister_event(adev); mutex_unlock(&udma_reset_mutex); } -- Gitee From 7b0dbd3feb9ac3bb065f7278820a5ac5faa97626 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 18 Sep 2025 10:35:27 +0800 Subject: [PATCH 045/243] ub: udma: Support register and unregister ce and crq event. commit 354915ac2f4aad3d40da1d5e0a70ef8482d8de37 openEuler This patch adds the ability to register and unregister ce and crq event. In driver loading process, udma will register ce and crq event function. In driver unloading process, udma will unregister ce and crq event function. In addition, this patch adds the function of registering and unregistering workqueue. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_cmd.c | 55 +++++ drivers/ub/urma/hw/udma/udma_cmd.h | 4 + drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 + drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_eq.c | 273 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_eq.h | 15 ++ drivers/ub/urma/hw/udma/udma_jfc.c | 52 +++++ drivers/ub/urma/hw/udma/udma_jfc.h | 3 + drivers/ub/urma/hw/udma/udma_main.c | 104 ++++++++- 10 files changed, 511 insertions(+), 9 deletions(-) create mode 100644 drivers/ub/urma/hw/udma/udma_jfc.c diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index ac9f9885b1e8..3a3e3f18467f 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ - udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o + udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_cmd.c b/drivers/ub/urma/hw/udma/udma_cmd.c index 244646f86a42..6e4c66af0537 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.c +++ b/drivers/ub/urma/hw/udma/udma_cmd.c @@ -204,5 +204,60 @@ struct ubase_cmd_mailbox *udma_mailbox_query_ctx(struct udma_dev *udma_dev, return mailbox; } +int udma_close_ue_rx(struct udma_dev *dev, bool check_feature_enable, bool check_ta_flush, + bool is_reset, uint32_t tp_num) +{ + int ret = 0; + + if (check_ta_flush) + return ret; + + if (check_feature_enable && !(dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE)) + return ret; + + mutex_lock(&dev->disable_ue_rx_mutex); + if (dev->disable_ue_rx_count == 0 && !is_reset) { + ret = ubase_deactivate_dev(dev->comdev.adev); + if (ret) { + dev_err(dev->dev, "failed to close ue rx, ret = %d.\n", ret); + goto out; + } + } + if (tp_num) + dev->disable_ue_rx_count += tp_num; + else + dev->disable_ue_rx_count++; +out: + mutex_unlock(&dev->disable_ue_rx_mutex); + + return ret; +} + +int udma_open_ue_rx(struct udma_dev *dev, bool check_feature_enable, bool check_ta_flush, + bool is_reset, uint32_t tp_num) +{ + int ret = 0; + + if (check_ta_flush) + return ret; + + if (check_feature_enable && !(dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE)) + return ret; + + mutex_lock(&dev->disable_ue_rx_mutex); + if (tp_num) + dev->disable_ue_rx_count -= tp_num; + else + dev->disable_ue_rx_count--; + if (dev->disable_ue_rx_count == 0 && !is_reset) { + ret = ubase_activate_dev(dev->comdev.adev); + if (ret) + dev_err(dev->dev, "failed to open ue rx, ret = %d.\n", ret); + } + mutex_unlock(&dev->disable_ue_rx_mutex); + + return ret; +} + module_param(debug_switch, bool, 0444); MODULE_PARM_DESC(debug_switch, "set debug print ON, default: true"); diff --git a/drivers/ub/urma/hw/udma/udma_cmd.h b/drivers/ub/urma/hw/udma/udma_cmd.h index 3dd27765fb56..fb1476350e07 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.h +++ b/drivers/ub/urma/hw/udma/udma_cmd.h @@ -237,5 +237,9 @@ int post_mailbox_update_ctx(struct udma_dev *udma_dev, void *ctx, uint32_t size, struct ubase_mbx_attr *attr); struct ubase_cmd_mailbox *udma_mailbox_query_ctx(struct udma_dev *udma_dev, struct ubase_mbx_attr *attr); +int udma_close_ue_rx(struct udma_dev *dev, bool check_feature_enable, bool check_ta_flush, + bool is_reset, uint32_t tp_num); +int udma_open_ue_rx(struct udma_dev *dev, bool check_feature_enable, bool check_ta_flush, + bool is_reset, uint32_t tp_num); #endif /* __UDMA_CMD_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 93898a153a98..560f96a17919 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -8,6 +8,17 @@ #define UDMA_UE_NUM 64 +enum udma_cmd_ue_opcode { + UDMA_CMD_UBCORE_COMMAND = 0x1, + UDMA_CMD_NOTIFY_MUE_SAVE_TP = 0x2, + UDMA_CMD_NOTIFY_UE_FLUSH_DONE = 0x3, +}; + +struct udma_ue_tp_info { + uint32_t tp_cnt : 8; + uint32_t start_tpn : 24; +}; + struct udma_ue_idx_table { uint32_t num; uint8_t ue_idx[UDMA_UE_NUM]; diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index cc073ec8d97d..fd71dfe71ed1 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -99,6 +99,7 @@ struct udma_dev { struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; resource_size_t db_base; void __iomem *k_db_base; + struct workqueue_struct *act_workq; struct xarray ksva_table; struct mutex ksva_mutex; struct xarray eid_table; diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index 8ac14585ec6c..90c949c14375 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -279,3 +279,276 @@ int udma_register_ae_event(struct auxiliary_device *adev) return ret; } + +/* thanks to drivers/infiniband/hw/erdma/erdma_eq.c */ +int udma_register_ce_event(struct auxiliary_device *adev) +{ + int ret; + + ret = ubase_comp_register(adev, udma_jfc_completion); + if (ret) + dev_err(&adev->dev, + "failed to register ce event, ret: %d.\n", ret); + + return ret; +} + +static inline bool udma_check_tpn_ue_idx(struct udma_ue_idx_table *tp_ue_idx_info, + uint8_t ue_idx) +{ + int i; + + for (i = 0; i < tp_ue_idx_info->num; i++) { + if (tp_ue_idx_info->ue_idx[i] == ue_idx) + return true; + } + + return false; +} + +static int udma_save_tpn_ue_idx_info(struct udma_dev *udma_dev, uint8_t ue_idx, + uint32_t tpn) +{ + struct udma_ue_idx_table *tp_ue_idx_info; + int ret; + + xa_lock(&udma_dev->tpn_ue_idx_table); + tp_ue_idx_info = xa_load(&udma_dev->tpn_ue_idx_table, tpn); + if (tp_ue_idx_info) { + if (tp_ue_idx_info->num >= UDMA_UE_NUM) { + dev_err(udma_dev->dev, + "num exceeds the maximum value.\n"); + xa_unlock(&udma_dev->tpn_ue_idx_table); + + return -EINVAL; + } + + if (!udma_check_tpn_ue_idx(tp_ue_idx_info, ue_idx)) + tp_ue_idx_info->ue_idx[tp_ue_idx_info->num++] = ue_idx; + + xa_unlock(&udma_dev->tpn_ue_idx_table); + + return 0; + } + xa_unlock(&udma_dev->tpn_ue_idx_table); + + tp_ue_idx_info = kzalloc(sizeof(*tp_ue_idx_info), GFP_KERNEL); + if (!tp_ue_idx_info) + return -ENOMEM; + + tp_ue_idx_info->ue_idx[tp_ue_idx_info->num++] = ue_idx; + ret = xa_err(xa_store(&udma_dev->tpn_ue_idx_table, tpn, tp_ue_idx_info, + GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, + "store tpn ue idx table failed, ret is %d.\n", ret); + goto err_store_ue_id; + } + + return ret; + +err_store_ue_id: + kfree(tp_ue_idx_info); + return ret; +} + +static void udma_delete_tpn_ue_idx_info(struct udma_dev *udma_dev, uint32_t tpn) +{ + struct udma_ue_idx_table *tp_ue_idx_info; + + xa_lock(&udma_dev->tpn_ue_idx_table); + tp_ue_idx_info = xa_load(&udma_dev->tpn_ue_idx_table, tpn); + if (tp_ue_idx_info) { + tp_ue_idx_info->num--; + if (tp_ue_idx_info->num == 0) { + __xa_erase(&udma_dev->tpn_ue_idx_table, tpn); + kfree(tp_ue_idx_info); + } + } + xa_unlock(&udma_dev->tpn_ue_idx_table); +} + +static int udma_save_tp_info(struct udma_dev *udma_dev, struct udma_ue_tp_info *info, + uint8_t ue_idx) +{ +#define UDMA_RSP_TP_MUL 2 + uint32_t tpn; + int ret = 0; + int i; + + for (i = 0; i < info->tp_cnt * UDMA_RSP_TP_MUL; i++) { + tpn = info->start_tpn + i; + ret = udma_save_tpn_ue_idx_info(udma_dev, ue_idx, tpn); + if (ret) { + dev_err(udma_dev->dev, "save tpn info fail, ret = %d, tpn = %u.\n", + ret, tpn); + goto err_save_ue_id; + } + } + + return ret; + +err_save_ue_id: + for (i--; i >= 0; i--) { + tpn = info->start_tpn + i; + udma_delete_tpn_ue_idx_info(udma_dev, tpn); + } + + return ret; +} + +static int udma_crq_recv_req_msg(void *dev, void *data, uint32_t len) +{ + struct udma_dev *udma_dev = get_udma_dev((struct auxiliary_device *)dev); + struct udma_ue_tp_info *info; + struct udma_req_msg *req; + + if (len < sizeof(*req) + sizeof(*info)) { + dev_err(udma_dev->dev, "len of crq req is too small, len = %u.\n", len); + return -EINVAL; + } + req = (struct udma_req_msg *)data; + + if (req->resp_code != UDMA_CMD_NOTIFY_MUE_SAVE_TP) { + dev_err(udma_dev->dev, "ue to mue opcode error, opcode = %u.\n", + req->resp_code); + return -EINVAL; + } + info = (struct udma_ue_tp_info *)req->req.data; + + return udma_save_tp_info(udma_dev, info, req->dst_ue_idx); +} + +static void udma_activate_dev_work(struct work_struct *work) +{ + struct udma_flush_work *flush_work = container_of(work, struct udma_flush_work, work); + struct udma_dev *udev = flush_work->udev; + int ret; + + ret = udma_open_ue_rx(udev, true, false, false, 0); + if (ret) + dev_err(udev->dev, "udma open ue rx failed, ret = %d.\n", ret); + + kfree(flush_work); +} + +static int udma_crq_recv_resp_msg(void *dev, void *data, uint32_t len) +{ + struct udma_dev *udma_dev = get_udma_dev((struct auxiliary_device *)dev); + struct udma_flush_work *flush_work; + struct udma_resp_msg *udma_resp; + + if (len < sizeof(*udma_resp)) { + dev_err(udma_dev->dev, "len of crq resp is too small, len = %u.\n", len); + return -EINVAL; + } + udma_resp = (struct udma_resp_msg *)data; + if (udma_resp->resp_code != UDMA_CMD_NOTIFY_UE_FLUSH_DONE) { + dev_err(udma_dev->dev, "mue to ue opcode err, opcode = %u.\n", + udma_resp->resp_code); + return -EINVAL; + } + + flush_work = kzalloc(sizeof(*flush_work), GFP_ATOMIC); + if (!flush_work) + return -ENOMEM; + + flush_work->udev = udma_dev; + INIT_WORK(&flush_work->work, udma_activate_dev_work); + queue_work(udma_dev->act_workq, &flush_work->work); + + return 0; +} + +static struct ubase_crq_event_nb udma_crq_opts[] = { + {UBASE_OPC_UE_TO_MUE, NULL, udma_crq_recv_req_msg}, + {UBASE_OPC_MUE_TO_UE, NULL, udma_crq_recv_resp_msg}, +}; + +void udma_unregister_crq_event(struct auxiliary_device *adev) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubase_crq_event_nb *nb = NULL; + size_t index; + + xa_for_each(&udma_dev->crq_nb_table, index, nb) { + xa_erase(&udma_dev->crq_nb_table, index); + ubase_unregister_crq_event(adev, nb->opcode); + kfree(nb); + nb = NULL; + } +} + +static int udma_register_one_crq_event(struct auxiliary_device *adev, + struct ubase_crq_event_nb *crq_nb, + uint32_t index) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubase_crq_event_nb *nb; + int ret; + + nb = kzalloc(sizeof(*nb), GFP_KERNEL); + if (!nb) + return -ENOMEM; + + nb->opcode = crq_nb->opcode; + nb->back = adev; + nb->crq_handler = crq_nb->crq_handler; + ret = ubase_register_crq_event(adev, nb); + if (ret) { + dev_err(udma_dev->dev, + "register crq event failed, opcode is %u, ret is %d.\n", + nb->opcode, ret); + goto err_register_crq_event; + } + + ret = xa_err(xa_store(&udma_dev->crq_nb_table, index, nb, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, + "save crq nb entry failed, opcode is %u, ret is %d.\n", + nb->opcode, ret); + goto err_store_crq_nb; + } + + return ret; + +err_store_crq_nb: + ubase_unregister_crq_event(adev, nb->opcode); +err_register_crq_event: + kfree(nb); + return ret; +} + +int udma_register_crq_event(struct auxiliary_device *adev) +{ + uint32_t opt_num = sizeof(udma_crq_opts) / sizeof(struct ubase_crq_event_nb); + uint32_t index; + int ret = 0; + + for (index = 0; index < opt_num; ++index) { + ret = udma_register_one_crq_event(adev, &udma_crq_opts[index], index); + if (ret) { + udma_unregister_crq_event(adev); + break; + } + } + + return ret; +} + +int udma_register_activate_workqueue(struct udma_dev *udma_dev) +{ + udma_dev->act_workq = alloc_workqueue("udma_activate_workq", WQ_UNBOUND, 0); + if (!udma_dev->act_workq) { + dev_err(udma_dev->dev, "failed to create activate workqueue.\n"); + return -ENOMEM; + } + + return 0; +} + +void udma_unregister_activate_workqueue(struct udma_dev *udma_dev) +{ + flush_workqueue(udma_dev->act_workq); + destroy_workqueue(udma_dev->act_workq); +} diff --git a/drivers/ub/urma/hw/udma/udma_eq.h b/drivers/ub/urma/hw/udma/udma_eq.h index f771483e168f..336a8544cb9d 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.h +++ b/drivers/ub/urma/hw/udma/udma_eq.h @@ -6,5 +6,20 @@ int udma_register_ae_event(struct auxiliary_device *adev); void udma_unregister_ae_event(struct auxiliary_device *adev); +int udma_register_ce_event(struct auxiliary_device *adev); +void udma_unregister_crq_event(struct auxiliary_device *adev); +int udma_register_crq_event(struct auxiliary_device *adev); +int udma_register_activate_workqueue(struct udma_dev *udma_dev); +void udma_unregister_activate_workqueue(struct udma_dev *udma_dev); + +static inline void udma_unregister_ce_event(struct auxiliary_device *adev) +{ + ubase_comp_unregister(adev); +} + +struct udma_flush_work { + struct udma_dev *udev; + struct work_struct work; +}; #endif /* __UDMA_EQ_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c new file mode 100644 index 000000000000..ee223bb923f6 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include +#include "udma_cmd.h" +#include "udma_common.h" +#include "udma_jetty.h" +#include "udma_jfr.h" +#include "udma_jfs.h" +#include "udma_ctx.h" +#include "udma_db.h" +#include +#include "udma_jfc.h" + +int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, + void *data) +{ + struct auxiliary_device *adev = (struct auxiliary_device *)data; + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubcore_jfc *ubcore_jfc; + struct udma_jfc *udma_jfc; + + xa_lock(&udma_dev->jfc_table.xa); + udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, jfcn); + if (!udma_jfc) { + dev_warn(udma_dev->dev, + "Completion event for bogus jfcn %lu.\n", jfcn); + xa_unlock(&udma_dev->jfc_table.xa); + return -EINVAL; + } + + ++udma_jfc->arm_sn; + + ubcore_jfc = &udma_jfc->base; + if (ubcore_jfc->jfce_handler) { + refcount_inc(&udma_jfc->event_refcount); + xa_unlock(&udma_dev->jfc_table.xa); + ubcore_jfc->jfce_handler(ubcore_jfc); + if (refcount_dec_and_test(&udma_jfc->event_refcount)) + complete(&udma_jfc->event_comp); + } else { + xa_unlock(&udma_dev->jfc_table.xa); + } + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 3841049c28a0..e225efdece4c 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -32,4 +32,7 @@ static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) return container_of(jfc, struct udma_jfc, base); } +int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, + void *data); + #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2088da125e0b..0393cc0ba18b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -735,11 +735,28 @@ static int udma_register_event(struct auxiliary_device *adev) if (ret) return ret; + ret = udma_register_ce_event(adev); + if (ret) + goto err_ce_register; + + ret = udma_register_crq_event(adev); + if (ret) + goto err_crq_register; + return 0; + +err_crq_register: + udma_unregister_ce_event(adev); +err_ce_register: + udma_unregister_ae_event(adev); + + return ret; } static void udma_unregister_event(struct auxiliary_device *adev) { + udma_unregister_crq_event(adev); + udma_unregister_ce_event(adev); udma_unregister_ae_event(adev); } @@ -765,6 +782,26 @@ static bool udma_is_need_probe(struct auxiliary_device *adev) return true; } +static void udma_report_reset_event(enum ubcore_event_type event_type, + struct udma_dev *udma_dev) +{ + struct ubcore_event ae = {}; + + ae.ub_dev = &udma_dev->ub_dev; + ae.event_type = event_type; + + if (event_type == UBCORE_EVENT_ELR_ERR) + dev_info(udma_dev->dev, + "udma report reset event elr_err, matched udma dev(%s).\n", + udma_dev->dev_name); + else if (event_type == UBCORE_EVENT_ELR_DONE) + dev_info(udma_dev->dev, + "udma report reset event elr_done, matched udma dev(%s).\n", + udma_dev->dev_name); + + ubcore_dispatch_async_event(&ae); +} + static void udma_reset_handler(struct auxiliary_device *adev, enum ubase_reset_stage stage) { @@ -804,6 +841,12 @@ static int udma_init_dev(struct auxiliary_device *adev) if (ret) goto err_event_register; + ret = udma_register_activate_workqueue(udma_dev); + if (ret) { + dev_err(udma_dev->dev, "UDMA activate workqueue failed.\n"); + goto err_register_act_init; + } + ret = udma_set_ubcore_dev(udma_dev); if (ret) { dev_err(udma_dev->dev, "failed to set ubcore dev, ret is %d.\n", ret); @@ -818,6 +861,8 @@ static int udma_init_dev(struct auxiliary_device *adev) return 0; err_set_ubcore_dev: + udma_unregister_activate_workqueue(udma_dev); +err_register_act_init: udma_unregister_event(adev); err_event_register: udma_destroy_dev(udma_dev); @@ -827,6 +872,24 @@ static int udma_init_dev(struct auxiliary_device *adev) return -EINVAL; } +static void check_and_wait_flush_done(struct udma_dev *udma_dev) +{ +#define WAIT_MAX_TIMES 15 + uint32_t wait_times = 0; + + while (true) { + if (udma_dev->disable_ue_rx_count == 1) + break; + + if (wait_times > WAIT_MAX_TIMES) { + dev_warn(udma_dev->dev, "wait flush done timeout.\n"); + break; + } + msleep(1 << wait_times); + wait_times++; + } +} + void udma_reset_down(struct auxiliary_device *adev) { struct udma_dev *udma_dev; @@ -845,11 +908,15 @@ void udma_reset_down(struct auxiliary_device *adev) return; } - udma_dev->status = UDMA_SUSPEND; + ubcore_stop_requests(&udma_dev->ub_dev); + if (udma_close_ue_rx(udma_dev, false, false, true, 0)) { + mutex_unlock(&udma_reset_mutex); + dev_err(&adev->dev, "udma close ue rx failed in reset down process.\n"); + return; + } - udma_unregister_debugfs(udma_dev); - udma_unset_ubcore_dev(udma_dev); - udma_unregister_event(adev); + udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); + udma_dev->status = UDMA_SUSPEND; mutex_unlock(&udma_reset_mutex); } @@ -871,6 +938,11 @@ void udma_reset_uninit(struct auxiliary_device *adev) return; } + udma_unset_ubcore_dev(udma_dev); + udma_unregister_debugfs(udma_dev); + udma_unregister_activate_workqueue(udma_dev); + udma_open_ue_rx(udma_dev, false, false, true, 0); + udma_unregister_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); } @@ -894,17 +966,33 @@ void udma_remove(struct auxiliary_device *adev) { struct udma_dev *udma_dev; + mutex_lock(&udma_reset_mutex); + ubase_reset_unregister(adev); udma_dev = get_udma_dev(adev); if (!udma_dev) { + mutex_unlock(&udma_reset_mutex); dev_info(&adev->dev, "udma device is not exist.\n"); - ubase_reset_unregister(adev); return; } - udma_reset_down(adev); - udma_reset_uninit(adev); + ubcore_stop_requests(&udma_dev->ub_dev); + if (udma_close_ue_rx(udma_dev, false, false, false, 0)) { + mutex_unlock(&udma_reset_mutex); + dev_err(&adev->dev, "udma close ue rx failed in remove process.\n"); + return; + } - ubase_reset_unregister(adev); + udma_dev->status = UDMA_SUSPEND; + udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); + + udma_unset_ubcore_dev(udma_dev); + udma_unregister_debugfs(udma_dev); + udma_unregister_activate_workqueue(udma_dev); + check_and_wait_flush_done(udma_dev); + (void)ubase_activate_dev(adev); + udma_unregister_event(adev); + udma_destroy_dev(udma_dev); + mutex_unlock(&udma_reset_mutex); } static struct auxiliary_driver udma_drv = { -- Gitee From 30f966046d963cbecda44cd40b30a4b932d2dd4c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 18 Sep 2025 10:49:46 +0800 Subject: [PATCH 046/243] ub: udma: Support register and unregister ctrlq event. commit af4f2c7d6ea264e66f6e3410cbb1b8b2e9ea59a3 openEuler This patch adds the ability to register and unregister ctrlq event. In driver loading process, udma will register ctrlq event function. In driver unloading process, udma will unregister ctrlq event function. In addition, this patch adds the function of querying ae aux info from ubase. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 3 +- drivers/ub/urma/hw/udma/udma_cmd.h | 46 ++++ drivers/ub/urma/hw/udma/udma_common.c | 8 + drivers/ub/urma/hw/udma/udma_common.h | 9 + drivers/ub/urma/hw/udma/udma_ctl.c | 210 +++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 147 ++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 80 +++++++ drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_eid.c | 98 ++++++++ drivers/ub/urma/hw/udma/udma_eid.h | 13 ++ drivers/ub/urma/hw/udma/udma_eq.c | 295 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_eq.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 30 +++ 13 files changed, 942 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_ctl.c create mode 100644 drivers/ub/urma/hw/udma/udma_ctrlq_tp.c create mode 100644 drivers/ub/urma/hw/udma/udma_eid.c create mode 100644 drivers/ub/urma/hw/udma/udma_eid.h diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 3a3e3f18467f..2739d92c115e 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0+ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ - udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o + udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ + udma_ctrlq_tp.o udma_eid.o udma_ctl.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_cmd.h b/drivers/ub/urma/hw/udma/udma_cmd.h index fb1476350e07..6ec531913033 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.h +++ b/drivers/ub/urma/hw/udma/udma_cmd.h @@ -19,12 +19,44 @@ extern bool debug_switch; #define SPEED_50G 50000 #define SPEED_25G 25000 +#define UDMA_CTRLQ_SEID_NUM 64 + struct udma_ctrlq_eid_info { uint32_t eid_idx; union ubcore_eid eid; uint32_t upi; } __packed; +struct udma_ctrlq_eid_in_query { + uint32_t cmd : 8; + uint32_t rsv : 24; +}; + +struct udma_ctrlq_eid_out_query { + uint32_t seid_num : 8; + uint32_t rsv : 24; + struct udma_ctrlq_eid_info eids[UDMA_CTRLQ_SEID_NUM]; +} __packed; + +struct udma_ctrlq_eid_out_update { + struct udma_ctrlq_eid_info eid_info; + uint32_t op_type : 4; + uint32_t rsv : 28; +} __packed; + +enum udma_ctrlq_eid_update_op { + UDMA_CTRLQ_EID_ADD = 0, + UDMA_CTRLQ_EID_DEL, +}; + +enum udma_ctrlq_dev_mgmt_opcode { + UDMA_CTRLQ_GET_SEID_INFO = 0x1, + UDMA_CTRLQ_UPDATE_SEID_INFO = 0x2, + UDMA_CTRLQ_GET_DEV_RESOURCE_COUNT = 0x11, + UDMA_CTRLQ_GET_DEV_RESOURCE_RATIO = 0x12, + UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO = 0x13, +}; + enum udma_cmd_opcode_type { UDMA_CMD_QUERY_UE_RES = 0x0002, UDMA_CMD_QUERY_UE_INDEX = 0x241d, @@ -213,6 +245,20 @@ struct udma_cmd_wqebb_va { uint32_t ue_num; }; +struct udma_cmd_query_cqe_aux_info { + uint32_t status : 8; + uint32_t is_client : 1; + uint32_t rsvd : 23; + uint32_t cqe_aux_info[MAX_CQE_AUX_INFO_TYPE_NUM]; +}; + +struct udma_cmd_query_ae_aux_info { + uint32_t event_type : 8; + uint32_t sub_type : 8; + uint32_t rsvd : 16; + uint32_t ae_aux_info[MAX_AE_AUX_INFO_TYPE_NUM]; +}; + static inline void udma_fill_buf(struct ubase_cmd_buf *buf, u16 opcode, bool is_read, u32 data_size, void *data) { diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index d313e1d17443..4b4ccc22124a 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -578,3 +578,11 @@ void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_ dma_free_iova(slot); } + +void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size) +{ + uint32_t i; + + for (i = 0; i < res_size; i++) + res[i] = arr[res_size - i - 1]; +} diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index c6e83a0d84c3..3cec74f9ec8e 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -46,6 +46,13 @@ struct udma_jetty_queue { enum udma_jetty_type jetty_type; }; +enum tp_state { + TP_INVALID = 0x0, + TP_VALID = 0x1, + TP_RTS = 0x3, + TP_ERROR = 0x6, +}; + int pin_queue_addr(struct udma_dev *dev, uint64_t addr, uint32_t len, struct udma_buf *buf); void unpin_queue_addr(struct ubcore_umem *umem); @@ -75,4 +82,6 @@ static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; } +void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size); + #endif /* __UDMA_COMM_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c new file mode 100644 index 000000000000..8b709dc10a20 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include "udma_common.h" +#include "udma_dev.h" +#include +#include "udma_cmd.h" +#include "udma_jetty.h" +#include "udma_jfs.h" +#include "udma_jfc.h" +#include "udma_db.h" +#include "udma_ctrlq_tp.h" +#include +#include "udma_def.h" + +static int to_hw_ae_event_type(struct udma_dev *udma_dev, uint32_t event_type, + struct udma_cmd_query_ae_aux_info *info) +{ + switch (event_type) { + case UBCORE_EVENT_TP_FLUSH_DONE: + info->event_type = UBASE_EVENT_TYPE_TP_FLUSH_DONE; + break; + case UBCORE_EVENT_TP_ERR: + info->event_type = UBASE_EVENT_TYPE_TP_LEVEL_ERROR; + break; + case UBCORE_EVENT_JFS_ERR: + case UBCORE_EVENT_JETTY_ERR: + info->event_type = UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR; + info->sub_type = UBASE_SUBEVENT_TYPE_JFS_CHECK_ERROR; + break; + case UBCORE_EVENT_JFC_ERR: + info->event_type = UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR; + info->sub_type = UBASE_SUBEVENT_TYPE_JFC_CHECK_ERROR; + break; + default: + dev_err(udma_dev->dev, "Invalid event type %u.\n", event_type); + return -EINVAL; + } + + return 0; +} + +static int send_cmd_query_ae_aux_info(struct udma_dev *udma_dev, + struct udma_cmd_query_ae_aux_info *info) +{ + struct ubase_cmd_buf cmd_in, cmd_out; + int ret; + + udma_fill_buf(&cmd_in, UDMA_CMD_GET_AE_AUX_INFO, true, + sizeof(struct udma_cmd_query_ae_aux_info), info); + udma_fill_buf(&cmd_out, UDMA_CMD_GET_AE_AUX_INFO, true, + sizeof(struct udma_cmd_query_ae_aux_info), info); + + ret = ubase_cmd_send_inout(udma_dev->comdev.adev, &cmd_in, &cmd_out); + if (ret) + dev_err(udma_dev->dev, + "failed to query ae aux info, ret = %d.\n", ret); + + return ret; +} + +static void free_kernel_ae_aux_info(struct udma_ae_aux_info_out *user_aux_info_out, + struct udma_ae_aux_info_out *aux_info_out) +{ + if (!user_aux_info_out->aux_info_type) + return; + + kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; + + kfree(aux_info_out->aux_info_value); + aux_info_out->aux_info_value = NULL; +} + +static int copy_out_ae_data_from_user(struct udma_dev *udma_dev, + struct ubcore_user_ctl_out *out, + struct udma_ae_aux_info_out *aux_info_out, + struct ubcore_ucontext *uctx, + struct udma_ae_aux_info_out *user_aux_info_out) +{ + if (out->addr != 0 && out->len == sizeof(struct udma_ae_aux_info_out)) { + memcpy(aux_info_out, (void *)(uintptr_t)out->addr, + sizeof(struct udma_ae_aux_info_out)); + if (uctx && aux_info_out->aux_info_num > 0 && + aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL) { + if (aux_info_out->aux_info_num > MAX_AE_AUX_INFO_TYPE_NUM) { + dev_err(udma_dev->dev, + "invalid ae aux info num %u.\n", + aux_info_out->aux_info_num); + return -EINVAL; + } + + user_aux_info_out->aux_info_type = aux_info_out->aux_info_type; + user_aux_info_out->aux_info_value = aux_info_out->aux_info_value; + aux_info_out->aux_info_type = + kcalloc(aux_info_out->aux_info_num, + sizeof(enum udma_ae_aux_info_type), GFP_KERNEL); + if (!aux_info_out->aux_info_type) + return -ENOMEM; + + aux_info_out->aux_info_value = + kcalloc(aux_info_out->aux_info_num, + sizeof(uint32_t), GFP_KERNEL); + if (!aux_info_out->aux_info_value) { + kfree(aux_info_out->aux_info_type); + return -ENOMEM; + } + } + } + + return 0; +} + +static int copy_out_ae_data_to_user(struct udma_dev *udma_dev, + struct ubcore_user_ctl_out *out, + struct udma_ae_aux_info_out *aux_info_out, + struct ubcore_ucontext *uctx, + struct udma_ae_aux_info_out *user_aux_info_out) +{ + unsigned long byte; + + if (out->addr != 0 && out->len == sizeof(struct udma_ae_aux_info_out)) { + if (uctx && aux_info_out->aux_info_num > 0 && + aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL) { + byte = copy_to_user((void __user *)user_aux_info_out->aux_info_type, + (void *)aux_info_out->aux_info_type, + aux_info_out->aux_info_num * + sizeof(enum udma_ae_aux_info_type)); + if (byte) { + dev_err(udma_dev->dev, + "copy resp to aux info type failed, byte = %lu.\n", byte); + return -EFAULT; + } + + byte = copy_to_user((void __user *)user_aux_info_out->aux_info_value, + (void *)aux_info_out->aux_info_value, + aux_info_out->aux_info_num * + sizeof(uint32_t)); + if (byte) { + dev_err(udma_dev->dev, + "copy resp to aux info value failed, byte = %lu.\n", byte); + return -EFAULT; + } + + kfree(aux_info_out->aux_info_type); + kfree(aux_info_out->aux_info_value); + aux_info_out->aux_info_type = user_aux_info_out->aux_info_type; + aux_info_out->aux_info_value = user_aux_info_out->aux_info_value; + } + memcpy((void *)(uintptr_t)out->addr, aux_info_out, + sizeof(struct udma_ae_aux_info_out)); + } + + return 0; +} + +int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out) +{ + struct udma_ae_aux_info_out user_aux_info_out = {}; + struct udma_ae_aux_info_out aux_info_out = {}; + struct udma_dev *udma_dev = to_udma_dev(dev); + struct udma_cmd_query_ae_aux_info info = {}; + struct udma_ae_info_in ae_info_in = {}; + int ret; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_ae_info_in))) { + dev_err(udma_dev->dev, "parameter invalid in query ae aux info, in_len = %u.\n", + in->len); + return -EINVAL; + } + memcpy(&ae_info_in, (void *)(uintptr_t)in->addr, + sizeof(struct udma_ae_info_in)); + ret = to_hw_ae_event_type(udma_dev, ae_info_in.event_type, &info); + if (ret) + return ret; + + ret = copy_out_ae_data_from_user(udma_dev, out, &aux_info_out, uctx, &user_aux_info_out); + if (ret) { + dev_err(udma_dev->dev, + "copy out data from user failed, ret = %d.\n", ret); + return ret; + } + + ret = send_cmd_query_ae_aux_info(udma_dev, &info); + if (ret) { + dev_err(udma_dev->dev, + "send cmd query aux info failed, ret = %d.\n", + ret); + free_kernel_ae_aux_info(&user_aux_info_out, &aux_info_out); + return ret; + } + + ret = copy_out_ae_data_to_user(udma_dev, out, &aux_info_out, uctx, &user_aux_info_out); + if (ret) { + dev_err(udma_dev->dev, + "copy out data to user failed, ret = %d.\n", ret); + free_kernel_ae_aux_info(&user_aux_info_out, &aux_info_out); + } + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c new file mode 100644 index 000000000000..af1732e1629b --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include "udma_cmd.h" +#include +#include "udma_common.h" +#include "udma_ctrlq_tp.h" + +static void udma_ctrlq_set_tp_msg(struct ubase_ctrlq_msg *msg, void *in, + uint16_t in_len, void *out, uint16_t out_len) +{ + msg->service_ver = UBASE_CTRLQ_SER_VER_01; + msg->service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg->need_resp = 1; + msg->is_resp = 0; + msg->in_size = in_len; + msg->in = in; + msg->out_size = out_len; + msg->out = out; +} + +int udma_ctrlq_remove_single_tp(struct udma_dev *udev, uint32_t tpn, int status) +{ + struct udma_ctrlq_remove_single_tp_req_data tp_cfg_req = {}; + struct ubase_ctrlq_msg msg = {}; + int r_status = 0; + int ret; + + tp_cfg_req.tpn = tpn; + tp_cfg_req.tp_status = (uint32_t)status; + msg.opcode = UDMA_CMD_CTRLQ_REMOVE_SINGLE_TP; + udma_ctrlq_set_tp_msg(&msg, (void *)&tp_cfg_req, + sizeof(tp_cfg_req), &r_status, sizeof(int)); + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "remove single tp %u failed, ret %d status %d.\n", + tpn, ret, r_status); + + return ret; +} + +static int udma_send_req_to_ue(struct udma_dev *udma_dev, uint8_t ue_idx) +{ + struct ubcore_resp *ubcore_req; + int ret; + + ubcore_req = kzalloc(sizeof(*ubcore_req), GFP_KERNEL); + if (!ubcore_req) + return -ENOMEM; + + ret = send_resp_to_ue(udma_dev, ubcore_req, ue_idx, + UDMA_CMD_NOTIFY_UE_FLUSH_DONE); + if (ret) + dev_err(udma_dev->dev, "fail to notify ue the tp flush done, ret %d.\n", ret); + + kfree(ubcore_req); + + return ret; +} + +static struct udma_ue_idx_table *udma_find_ue_idx_by_tpn(struct udma_dev *udev, + uint32_t tpn) +{ + struct udma_ue_idx_table *tp_ue_idx_info; + + xa_lock(&udev->tpn_ue_idx_table); + tp_ue_idx_info = xa_load(&udev->tpn_ue_idx_table, tpn); + if (!tp_ue_idx_info) { + dev_warn(udev->dev, "ue idx info not exist, tpn %u.\n", tpn); + xa_unlock(&udev->tpn_ue_idx_table); + + return NULL; + } + + __xa_erase(&udev->tpn_ue_idx_table, tpn); + xa_unlock(&udev->tpn_ue_idx_table); + + return tp_ue_idx_info; +} + +int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn) +{ + struct udma_ctrlq_tp_flush_done_req_data tp_cfg_req = {}; + struct udma_ue_idx_table *tp_ue_idx_info; + struct ubase_ctrlq_msg msg = {}; + int ret = 0; + uint32_t i; + + tp_ue_idx_info = udma_find_ue_idx_by_tpn(udev, tpn); + if (tp_ue_idx_info) { + for (i = 0; i < tp_ue_idx_info->num; i++) + (void)udma_send_req_to_ue(udev, tp_ue_idx_info->ue_idx[i]); + + kfree(tp_ue_idx_info); + } else { + ret = udma_open_ue_rx(udev, true, false, false, 0); + if (ret) + dev_err(udev->dev, "udma open ue rx failed in tp flush done.\n"); + } + + tp_cfg_req.tpn = tpn; + msg.opcode = UDMA_CMD_CTRLQ_TP_FLUSH_DONE; + udma_ctrlq_set_tp_msg(&msg, (void *)&tp_cfg_req, sizeof(tp_cfg_req), NULL, 0); + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "tp flush done ctrlq tp %u failed, ret %d.\n", tpn, ret); + + return ret; +} + +int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, + uint8_t dst_ue_idx, uint16_t opcode) +{ + struct udma_resp_msg *udma_req; + struct ubase_cmd_buf in; + uint32_t msg_len; + int ret; + + msg_len = sizeof(*udma_req) + req_host->len; + udma_req = kzalloc(msg_len, GFP_KERNEL); + if (!udma_req) + return -ENOMEM; + + udma_req->dst_ue_idx = dst_ue_idx; + udma_req->resp_code = opcode; + + (void)memcpy(&udma_req->resp, req_host, sizeof(*req_host)); + (void)memcpy(udma_req->resp.data, req_host->data, req_host->len); + + udma_fill_buf(&in, UBASE_OPC_MUE_TO_UE, false, msg_len, udma_req); + + ret = ubase_cmd_send_in(udma_dev->comdev.adev, &in); + if (ret) + dev_err(udma_dev->dev, + "send resp msg cmd failed, ret is %d.\n", ret); + + kfree(udma_req); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 560f96a17919..6672f8ea01ec 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -6,8 +6,79 @@ #include "udma_common.h" +#define UDMA_EID_SIZE 16 +#define UDMA_CNA_SIZE 16 #define UDMA_UE_NUM 64 +enum udma_ctrlq_cmd_code_type { + UDMA_CMD_CTRLQ_REMOVE_SINGLE_TP = 0x13, + UDMA_CMD_CTRLQ_TP_FLUSH_DONE, + UDMA_CMD_CTRLQ_CHECK_TP_ACTIVE, + UDMA_CMD_CTRLQ_GET_TP_LIST = 0x21, + UDMA_CMD_CTRLQ_ACTIVE_TP, + UDMA_CMD_CTRLQ_DEACTIVE_TP, + UDMA_CMD_CTRLQ_SET_TP_ATTR, + UDMA_CMD_CTRLQ_GET_TP_ATTR, + UDMA_CMD_CTRLQ_MAX +}; + +enum udma_ctrlq_trans_type { + UDMA_CTRLQ_TRANS_TYPE_TP_RM = 0, + UDMA_CTRLQ_TRANS_TYPE_CTP, + UDMA_CTRLQ_TRANS_TYPE_TP_UM, + UDMA_CTRLQ_TRANS_TYPE_TP_RC = 4, + UDMA_CTRLQ_TRANS_TYPE_MAX +}; + +enum udma_ctrlq_tpid_status { + UDMA_CTRLQ_TPID_IN_USE = 0, + UDMA_CTRLQ_TPID_EXITED, + UDMA_CTRLQ_TPID_IDLE, +}; + +struct udma_ctrlq_tp_flush_done_req_data { + uint32_t tpn : 24; + uint32_t rsv : 8; +}; + +struct udma_ctrlq_remove_single_tp_req_data { + uint32_t tpn : 24; + uint32_t tp_status : 8; +}; + +struct udma_ctrlq_tpn_data { + uint32_t tpg_flag : 8; + uint32_t rsv : 24; + uint32_t tpgn : 24; + uint32_t rsv1 : 8; + uint32_t tpn_cnt : 8; + uint32_t start_tpn : 24; +}; + +struct udma_ctrlq_check_tp_active_req_data { + uint32_t tp_id : 24; + uint32_t rsv : 8; + uint32_t pid_flag : 24; + uint32_t rsv1 : 8; +}; + +struct udma_ctrlq_check_tp_active_req_info { + uint32_t num : 8; + uint32_t rsv : 24; + struct udma_ctrlq_check_tp_active_req_data data[]; +}; + +struct udma_ctrlq_check_tp_active_rsp_data { + uint32_t tp_id : 24; + uint32_t result : 8; +}; + +struct udma_ctrlq_check_tp_active_rsp_info { + uint32_t num : 8; + uint32_t rsv : 24; + struct udma_ctrlq_check_tp_active_rsp_data data[]; +}; + enum udma_cmd_ue_opcode { UDMA_CMD_UBCORE_COMMAND = 0x1, UDMA_CMD_NOTIFY_MUE_SAVE_TP = 0x2, @@ -24,4 +95,13 @@ struct udma_ue_idx_table { uint8_t ue_idx[UDMA_UE_NUM]; }; +struct udma_notify_flush_done { + uint32_t tpn; +}; + +int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn); +int udma_ctrlq_remove_single_tp(struct udma_dev *udev, uint32_t tpn, int status); +int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, + uint8_t dst_ue_idx, uint16_t opcode); + #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index fd71dfe71ed1..89b91ff08e79 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -27,6 +27,8 @@ extern bool dump_aux_info; #define UDMA_DEV_UE_NUM 47 +#define SEID_TABLE_SIZE 1024 + #define UDMA_MAX_SL_NUM 16 #define UDMA_DEFAULT_SL_NUM 0 diff --git a/drivers/ub/urma/hw/udma/udma_eid.c b/drivers/ub/urma/hw/udma/udma_eid.c new file mode 100644 index 000000000000..ad88d7eec976 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_eid.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include "udma_dev.h" +#include "udma_cmd.h" +#include "udma_common.h" +#include +#include "udma_eid.h" + +static void udma_dispatch_eid_event(struct udma_dev *udma_dev, + struct udma_ctrlq_eid_info *eid_entry, + enum ubcore_mgmt_event_type type) +{ + struct ubcore_mgmt_event event = {}; + struct ubcore_eid_info info = {}; + + udma_swap_endian(eid_entry->eid.raw, info.eid.raw, sizeof(union ubcore_eid)); + info.eid_index = eid_entry->eid_idx; + + event.ub_dev = &udma_dev->ub_dev; + event.element.eid_info = &info; + event.event_type = type; + ubcore_dispatch_mgmt_event(&event); +} + +int udma_add_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info) +{ + struct udma_ctrlq_eid_info *eid_entry; + eid_t ummu_eid = 0; + guid_t guid = {}; + int ret; + + eid_entry = kzalloc(sizeof(struct udma_ctrlq_eid_info), GFP_KERNEL); + if (!eid_entry) + return -ENOMEM; + + memcpy(eid_entry, eid_info, sizeof(struct udma_ctrlq_eid_info)); + ret = xa_err(xa_store(&udma_dev->eid_table, eid_info->eid_idx, eid_entry, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, + "save eid entry failed, ret = %d, eid index = %u.\n", + ret, eid_info->eid_idx); + goto store_err; + } + + if (!udma_dev->is_ue) { + (void)memcpy(&ummu_eid, eid_info->eid.raw, sizeof(ummu_eid)); + ret = ummu_core_add_eid(&guid, ummu_eid, EID_NONE); + if (ret) { + dev_err(udma_dev->dev, + "set ummu eid entry failed, ret is %d.\n", ret); + goto err_add_ummu_eid; + } + } + udma_dispatch_eid_event(udma_dev, eid_entry, UBCORE_MGMT_EVENT_EID_ADD); + + return ret; +err_add_ummu_eid: + xa_erase(&udma_dev->eid_table, eid_info->eid_idx); +store_err: + kfree(eid_entry); + + return ret; +} + +int udma_del_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info) +{ + struct udma_ctrlq_eid_info *eid_entry; + uint32_t index = eid_info->eid_idx; + eid_t ummu_eid = 0; + guid_t guid = {}; + + eid_entry = (struct udma_ctrlq_eid_info *)xa_load(&udma_dev->eid_table, index); + if (!eid_entry) { + dev_err(udma_dev->dev, "get eid entry failed, eid index = %u.\n", + index); + return -EINVAL; + } + if (memcmp(eid_entry->eid.raw, eid_info->eid.raw, sizeof(eid_entry->eid.raw))) { + dev_err(udma_dev->dev, "eid is not match, index = %u.\n", index); + return -EINVAL; + } + xa_erase(&udma_dev->eid_table, index); + + if (!udma_dev->is_ue) { + (void)memcpy(&ummu_eid, eid_entry->eid.raw, sizeof(ummu_eid)); + ummu_core_del_eid(&guid, ummu_eid, EID_NONE); + } + udma_dispatch_eid_event(udma_dev, eid_entry, UBCORE_MGMT_EVENT_EID_RMV); + kfree(eid_entry); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_eid.h b/drivers/ub/urma/hw/udma/udma_eid.h new file mode 100644 index 000000000000..0e9e676524bc --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_eid.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_EID_H__ +#define __UDMA_EID_H__ + +#include +#include "udma_cmd.h" + +int udma_add_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info); +int udma_del_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info); + +#endif /* __UDMA_EID_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index 90c949c14375..53dfb2bdebd5 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -16,9 +16,61 @@ #include "udma_jfr.h" #include "udma_jfc.h" #include "udma_jetty.h" +#include "udma_eid.h" #include #include "udma_eq.h" +static inline int udma_ae_tp_ctrlq_msg_deal(struct udma_dev *udma_dev, + struct ubase_aeq_notify_info *info, + uint32_t queue_num) +{ + switch (info->event_type) { + case UBASE_EVENT_TYPE_TP_FLUSH_DONE: + return udma_ctrlq_tp_flush_done(udma_dev, queue_num); + case UBASE_EVENT_TYPE_TP_LEVEL_ERROR: + return udma_ctrlq_remove_single_tp(udma_dev, queue_num, TP_ERROR); + default: + dev_warn(udma_dev->dev, "udma get unsupported async event.\n"); + return 0; + } +} + +static void dump_ae_aux_info(struct udma_dev *dev, uint8_t event_type) +{ + struct ubcore_user_ctl_out out = {}; + struct ubcore_user_ctl_in in = {}; + struct udma_ae_info_in info_in; + + if (!dump_aux_info) + return; + + info_in.event_type = event_type; + in.addr = (uint64_t)&info_in; + in.len = sizeof(struct udma_ae_info_in); + in.opcode = UDMA_USER_CTL_QUERY_AE_AUX_INFO; + + (void)udma_query_ae_aux_info(&dev->ub_dev, NULL, &in, &out); +} + +static int udma_ae_tp_level_error(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct ubase_event_nb *ev_nb = container_of(nb, struct ubase_event_nb, nb); + struct auxiliary_device *adev = (struct auxiliary_device *)ev_nb->back; + struct ubase_aeq_notify_info *info = data; + struct udma_dev *udma_dev; + uint32_t queue_num; + + queue_num = info->aeqe->event.queue_event.num; + udma_dev = get_udma_dev(adev); + + dev_warn(udma_dev->dev, + "trigger tp level ae, event type is %d, sub type is %d, queue_num is %u.\n", + info->event_type, info->sub_type, queue_num); + + return udma_ae_tp_ctrlq_msg_deal(udma_dev, info, queue_num); +} + static int udma_ae_jfs_check_err(struct auxiliary_device *adev, uint32_t queue_num) { struct udma_dev *udma_dev = get_udma_dev(adev); @@ -47,6 +99,7 @@ static int udma_ae_jfs_check_err(struct auxiliary_device *adev, uint32_t queue_n ae.ub_dev = ubcore_jetty->ub_dev; ae.element.jetty = ubcore_jetty; ae.event_type = UBCORE_EVENT_JETTY_ERR; + dump_ae_aux_info(udma_dev, ae.event_type); ubcore_jetty->jfae_handler(&ae, ubcore_jetty->uctx); if (refcount_dec_and_test(&udma_jetty->ae_refcount)) complete(&udma_jetty->ae_comp); @@ -62,6 +115,7 @@ static int udma_ae_jfs_check_err(struct auxiliary_device *adev, uint32_t queue_n ae.ub_dev = ubcore_jfs->ub_dev; ae.element.jfs = ubcore_jfs; ae.event_type = UBCORE_EVENT_JFS_ERR; + dump_ae_aux_info(udma_dev, ae.event_type); ubcore_jfs->jfae_handler(&ae, ubcore_jfs->uctx); if (refcount_dec_and_test(&udma_jfs->ae_refcount)) complete(&udma_jfs->ae_comp); @@ -131,6 +185,7 @@ static int udma_ae_jfc_check_err(struct auxiliary_device *adev, uint32_t queue_n ae.ub_dev = ubcore_jfc->ub_dev; ae.element.jfc = ubcore_jfc; ae.event_type = UBCORE_EVENT_JFC_ERR; + dump_ae_aux_info(udma_dev, ae.event_type); ubcore_jfc->jfae_handler(&ae, ubcore_jfc->uctx); if (refcount_dec_and_test(&udma_jfc->event_refcount)) complete(&udma_jfc->event_comp); @@ -215,6 +270,8 @@ struct ae_operation { static struct ae_operation udma_ae_opts[] = { {UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR, udma_ae_jetty_level_error}, {UBASE_EVENT_TYPE_JFR_LIMIT_REACHED, udma_ae_jetty_level_error}, + {UBASE_EVENT_TYPE_TP_LEVEL_ERROR, udma_ae_tp_level_error}, + {UBASE_EVENT_TYPE_TP_FLUSH_DONE, udma_ae_tp_level_error}, }; void udma_unregister_ae_event(struct auxiliary_device *adev) @@ -536,6 +593,244 @@ int udma_register_crq_event(struct auxiliary_device *adev) return ret; } +static int udma_ctrlq_send_eid_update_response(struct udma_dev *udma_dev, uint16_t seq, int ret_val) +{ + struct ubase_ctrlq_msg msg = {}; + int inbuf = 0; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + msg.opcode = UDMA_CTRLQ_UPDATE_SEID_INFO; + msg.need_resp = 0; + msg.is_resp = 1; + msg.resp_seq = seq; + msg.resp_ret = (uint8_t)(-ret_val); + msg.in = (void *)&inbuf; + msg.in_size = sizeof(inbuf); + + ret = ubase_ctrlq_send_msg(udma_dev->comdev.adev, &msg); + if (ret) + dev_err(udma_dev->dev, "send eid update response failed, ret = %d, ret_val = %d.\n", + ret, ret_val); + return ret; +} + +static int udma_ctrlq_eid_update(struct auxiliary_device *adev, uint8_t service_ver, + void *data, uint16_t len, uint16_t seq) +{ + struct udma_ctrlq_eid_out_update eid_entry = {}; + struct udma_dev *udma_dev; + int ret; + + if (adev == NULL || data == NULL) { + pr_err("adev or data is NULL.\n"); + return -EINVAL; + } + + udma_dev = get_udma_dev(adev); + if (len < sizeof(struct udma_ctrlq_eid_out_update)) { + dev_err(udma_dev->dev, "msg len(%u) is invalid.\n", len); + return udma_ctrlq_send_eid_update_response(udma_dev, seq, -EINVAL); + } + memcpy(&eid_entry, data, sizeof(eid_entry)); + if (eid_entry.op_type != UDMA_CTRLQ_EID_ADD && eid_entry.op_type != UDMA_CTRLQ_EID_DEL) { + dev_err(udma_dev->dev, "update eid op type(%u) is invalid.\n", eid_entry.op_type); + return udma_ctrlq_send_eid_update_response(udma_dev, seq, -EINVAL); + } + if (eid_entry.eid_info.eid_idx >= SEID_TABLE_SIZE) { + dev_err(udma_dev->dev, "update invalid eid_idx = %u.\n", + eid_entry.eid_info.eid_idx); + return udma_ctrlq_send_eid_update_response(udma_dev, seq, -EINVAL); + } + mutex_lock(&udma_dev->eid_mutex); + if (eid_entry.op_type == UDMA_CTRLQ_EID_ADD) + ret = udma_add_one_eid(udma_dev, &(eid_entry.eid_info)); + else + ret = udma_del_one_eid(udma_dev, &(eid_entry.eid_info)); + if (ret) + dev_err(udma_dev->dev, "update eid failed, op = %u, index = %u, ret = %d.\n", + eid_entry.op_type, eid_entry.eid_info.eid_idx, ret); + mutex_unlock(&udma_dev->eid_mutex); + + return udma_ctrlq_send_eid_update_response(udma_dev, seq, ret); +} + +static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, + uint16_t len, uint32_t tp_num, + struct udma_ctrlq_check_tp_active_rsp_info *rsp_info) +{ + struct udma_ctrlq_check_tp_active_req_info *req_info = NULL; + uint32_t req_info_len = 0; + int i; + + req_info_len = sizeof(uint32_t) + + sizeof(struct udma_ctrlq_check_tp_active_req_data) * tp_num; + if (len < req_info_len) { + dev_err(udev->dev, "msg param num(%u) is invalid.\n", tp_num); + return -EINVAL; + } + req_info = kzalloc(req_info_len, GFP_KERNEL); + if (!req_info) + return -ENOMEM; + memcpy(req_info, data, req_info_len); + + rcu_read_lock(); + for (i = 0; i < req_info->num; i++) { + if (find_vpid(req_info->data[i].pid_flag)) + rsp_info->data[i].result = UDMA_CTRLQ_TPID_IN_USE; + else + rsp_info->data[i].result = UDMA_CTRLQ_TPID_EXITED; + + rsp_info->data[i].tp_id = req_info->data[i].tp_id; + } + rsp_info->num = tp_num; + rcu_read_unlock(); + kfree(req_info); + + return 0; +} + +static int udma_ctrlq_check_param(struct udma_dev *udev, void *data, uint16_t len) +{ +#define UDMA_CTRLQ_HDR_LEN 12 +#define UDMA_CTRLQ_MAX_BB 32 +#define UDMA_CTRLQ_BB_LEN 32 + + if (data == NULL) { + dev_err(udev->dev, "data is NULL.\n"); + return -EINVAL; + } + + if ((len < UDMA_CTRLQ_BB_LEN - UDMA_CTRLQ_HDR_LEN) || + len > (UDMA_CTRLQ_BB_LEN * UDMA_CTRLQ_MAX_BB - UDMA_CTRLQ_HDR_LEN)) { + dev_err(udev->dev, "msg data len(%u) is invalid.\n", len); + return -EINVAL; + } + + return 0; +} + +static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, + uint8_t service_ver, void *data, + uint16_t len, uint16_t seq) +{ +#define UDMA_CTRLQ_CHECK_TP_OFFSET 0xFF + struct udma_ctrlq_check_tp_active_rsp_info *rsp_info = NULL; + struct udma_dev *udev = get_udma_dev(adev); + struct ubase_ctrlq_msg msg = {}; + uint32_t rsp_info_len = 0; + uint32_t tp_num = 0; + int ret_val; + int ret; + + ret_val = udma_ctrlq_check_param(udev, data, len); + if (ret_val == 0) { + tp_num = *((uint32_t *)data) & UDMA_CTRLQ_CHECK_TP_OFFSET; + rsp_info_len = sizeof(uint32_t) + + sizeof(struct udma_ctrlq_check_tp_active_rsp_data) * tp_num; + rsp_info = kzalloc(rsp_info_len, GFP_KERNEL); + if (!rsp_info) { + dev_err(udev->dev, "check tp mag malloc failed.\n"); + return -ENOMEM; + } + + ret_val = udma_ctrlq_check_tp_status(udev, data, len, tp_num, rsp_info); + if (ret_val) + dev_err(udev->dev, "check tp status failed, ret_val(%d).\n", ret_val); + } + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UDMA_CMD_CTRLQ_CHECK_TP_ACTIVE; + msg.need_resp = 0; + msg.is_resp = 1; + msg.in_size = (uint16_t)rsp_info_len; + msg.in = (void *)rsp_info; + msg.resp_seq = seq; + msg.resp_ret = (uint8_t)(-ret_val); + + ret = ubase_ctrlq_send_msg(adev, &msg); + if (ret) { + kfree(rsp_info); + dev_err(udev->dev, "send check tp active ctrlq msg failed, ret(%d).\n", ret); + return ret; + } + kfree(rsp_info); + + return (ret_val) ? ret_val : 0; +} + +static struct ubase_ctrlq_event_nb udma_ctrlq_opts[] = { + {UBASE_CTRLQ_SER_TYPE_TP_ACL, UDMA_CMD_CTRLQ_CHECK_TP_ACTIVE, NULL, + udma_ctrlq_check_tp_active}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UDMA_CTRLQ_UPDATE_SEID_INFO, NULL, + udma_ctrlq_eid_update}, +}; + +static int udma_register_one_ctrlq_event(struct auxiliary_device *adev, + struct ubase_ctrlq_event_nb *ctrlq_nb, + uint32_t index) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubase_ctrlq_event_nb *nb; + int ret; + + nb = kzalloc(sizeof(*nb), GFP_KERNEL); + if (nb == NULL) + return -ENOMEM; + + nb->service_type = ctrlq_nb->service_type; + nb->opcode = ctrlq_nb->opcode; + nb->back = adev; + nb->crq_handler = ctrlq_nb->crq_handler; + ret = ubase_ctrlq_register_crq_event(adev, nb); + if (ret) + dev_err(udma_dev->dev, + "ubase register ctrlq event failed, opcode = %u, ret is %d.\n", + nb->opcode, ret); + + kfree(nb); + + return ret; +} + +void udma_unregister_ctrlq_event(struct auxiliary_device *adev) +{ + int opt_num; + int index; + + opt_num = ARRAY_SIZE(udma_ctrlq_opts); + for (index = 0; index < opt_num; ++index) + ubase_ctrlq_unregister_crq_event(adev, udma_ctrlq_opts[index].service_type, + udma_ctrlq_opts[index].opcode); +} + +int udma_register_ctrlq_event(struct auxiliary_device *adev) +{ + int opt_num; + int index; + int ret; + + opt_num = ARRAY_SIZE(udma_ctrlq_opts); + for (index = 0; index < opt_num; ++index) { + ret = udma_register_one_ctrlq_event(adev, &udma_ctrlq_opts[index], index); + if (ret) + goto err_register_one_ctrlq_event; + } + + return ret; + +err_register_one_ctrlq_event: + for (index--; index >= 0; index--) { + ubase_ctrlq_unregister_crq_event(adev, + udma_ctrlq_opts[index].service_type, + udma_ctrlq_opts[index].opcode); + } + + return ret; +} + int udma_register_activate_workqueue(struct udma_dev *udma_dev) { udma_dev->act_workq = alloc_workqueue("udma_activate_workq", WQ_UNBOUND, 0); diff --git a/drivers/ub/urma/hw/udma/udma_eq.h b/drivers/ub/urma/hw/udma/udma_eq.h index 336a8544cb9d..c0d727de78b6 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.h +++ b/drivers/ub/urma/hw/udma/udma_eq.h @@ -9,6 +9,8 @@ void udma_unregister_ae_event(struct auxiliary_device *adev); int udma_register_ce_event(struct auxiliary_device *adev); void udma_unregister_crq_event(struct auxiliary_device *adev); int udma_register_crq_event(struct auxiliary_device *adev); +int udma_register_ctrlq_event(struct auxiliary_device *adev); +void udma_unregister_ctrlq_event(struct auxiliary_device *adev); int udma_register_activate_workqueue(struct udma_dev *udma_dev); void udma_unregister_activate_workqueue(struct udma_dev *udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 0393cc0ba18b..5ebfeccf454e 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -727,6 +727,26 @@ static struct udma_dev *udma_create_dev(struct auxiliary_device *adev) return NULL; } +static void udma_port_handler(struct auxiliary_device *adev, bool link_up) +{ + struct udma_dev *udma_dev = get_udma_dev(adev); + struct ubcore_event ae = {}; + + ae.ub_dev = &udma_dev->ub_dev; + + if (link_up) + ae.event_type = UBCORE_EVENT_PORT_ACTIVE; + else + ae.event_type = UBCORE_EVENT_PORT_DOWN; + + ae.element.port_id = udma_dev->port_id; + dev_info(udma_dev->dev, + "udma report port event %s, matched udma dev(%s).\n", + link_up ? "ACTIVE" : "DOWN", udma_dev->dev_name); + + ubcore_dispatch_async_event(&ae); +} + static int udma_register_event(struct auxiliary_device *adev) { int ret; @@ -743,8 +763,16 @@ static int udma_register_event(struct auxiliary_device *adev) if (ret) goto err_crq_register; + ret = udma_register_ctrlq_event(adev); + if (ret) + goto err_ctrlq_register; + + ubase_port_register(adev, udma_port_handler); + return 0; +err_ctrlq_register: + udma_unregister_crq_event(adev); err_crq_register: udma_unregister_ce_event(adev); err_ce_register: @@ -755,6 +783,8 @@ static int udma_register_event(struct auxiliary_device *adev) static void udma_unregister_event(struct auxiliary_device *adev) { + ubase_port_unregister(adev); + udma_unregister_ctrlq_event(adev); udma_unregister_crq_event(adev); udma_unregister_ce_event(adev); udma_unregister_ae_event(adev); -- Gitee From 33846e8ecdebccac879e59088901228a5d0521d6 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 09:45:26 +0800 Subject: [PATCH 047/243] ub: udma: Support set eid function. commit b2eb67d690c6c4ba5ce5d7e651ef8c60e4dc33b1 openEuler This patch adds the ability to set eid in driver loading process. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_eid.c | 72 +++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_eid.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 19 ++++++++ 3 files changed, 92 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_eid.c b/drivers/ub/urma/hw/udma/udma_eid.c index ad88d7eec976..3ff649f343be 100644 --- a/drivers/ub/urma/hw/udma/udma_eid.c +++ b/drivers/ub/urma/hw/udma/udma_eid.c @@ -96,3 +96,75 @@ int udma_del_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_ return 0; } + +static int udma_send_query_eid_cmd(struct udma_dev *udma_dev, + struct udma_ctrlq_eid_out_query *eid_out_query) +{ +#define UDMA_CMD_CTRLQ_QUERY_SEID 0xb5 + struct udma_ctrlq_eid_in_query eid_in_query = {}; + struct ubase_ctrlq_msg msg = {}; + int ret; + + msg.opcode = UDMA_CTRLQ_GET_SEID_INFO; + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + msg.need_resp = 1; + msg.is_resp = 0; + msg.in_size = sizeof(eid_in_query); + msg.in = &eid_in_query; + msg.out_size = sizeof(*eid_out_query); + msg.out = eid_out_query; + eid_in_query.cmd = UDMA_CMD_CTRLQ_QUERY_SEID; + + ret = ubase_ctrlq_send_msg(udma_dev->comdev.adev, &msg); + if (ret) + dev_err(udma_dev->dev, + "query seid from ctrl cpu failed, ret = %d.\n", ret); + + return ret; +} + +int udma_query_eid_from_ctrl_cpu(struct udma_dev *udma_dev) +{ + struct udma_ctrlq_eid_out_query eid_out_query = {}; + int ret, ret_tmp, i; + + ret = udma_send_query_eid_cmd(udma_dev, &eid_out_query); + if (ret) { + dev_err(udma_dev->dev, "query eid failed, ret = %d.\n", ret); + return ret; + } + + if (eid_out_query.seid_num > UDMA_CTRLQ_SEID_NUM) { + dev_err(udma_dev->dev, "Invalid param: seid num is %u.\n", eid_out_query.seid_num); + return -EINVAL; + } + + mutex_lock(&udma_dev->eid_mutex); + for (i = 0; i < (int)eid_out_query.seid_num; i++) { + if (eid_out_query.eids[i].eid_idx >= SEID_TABLE_SIZE) { + dev_err(udma_dev->dev, "invalid eid_idx = %u.\n", + eid_out_query.eids[i].eid_idx); + goto err_add_ummu_eid; + } + ret = udma_add_one_eid(udma_dev, &(eid_out_query.eids[i])); + if (ret) { + dev_err(udma_dev->dev, "Add eid failed, ret = %d, eid_idx = %u.\n", + ret, eid_out_query.eids[i].eid_idx); + goto err_add_ummu_eid; + } + } + mutex_unlock(&udma_dev->eid_mutex); + + return 0; +err_add_ummu_eid: + for (i--; i >= 0; i--) { + ret_tmp = udma_del_one_eid(udma_dev, &eid_out_query.eids[i]); + if (ret_tmp) + dev_err(udma_dev->dev, "Del eid failed, ret = %d, idx = %u.\n", + ret_tmp, eid_out_query.eids[i].eid_idx); + } + mutex_unlock(&udma_dev->eid_mutex); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_eid.h b/drivers/ub/urma/hw/udma/udma_eid.h index 0e9e676524bc..dc9b9bb47270 100644 --- a/drivers/ub/urma/hw/udma/udma_eid.h +++ b/drivers/ub/urma/hw/udma/udma_eid.h @@ -9,5 +9,6 @@ int udma_add_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info); int udma_del_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info); +int udma_query_eid_from_ctrl_cpu(struct udma_dev *udma_dev); #endif /* __UDMA_EID_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5ebfeccf454e..2da4ef51ec67 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -22,6 +22,7 @@ #include "udma_ctx.h" #include "udma_rct.h" #include "udma_tid.h" +#include "udma_eid.h" #include "udma_debugfs.h" #include "udma_common.h" #include "udma_ctrlq_tp.h" @@ -850,6 +851,17 @@ static void udma_reset_handler(struct auxiliary_device *adev, } } +static int udma_init_eid_table(struct udma_dev *udma_dev) +{ + int ret; + + ret = udma_query_eid_from_ctrl_cpu(udma_dev); + if (ret) + dev_err(udma_dev->dev, "query eid info failed, ret = %d.\n", ret); + + return ret; +} + static int udma_init_dev(struct auxiliary_device *adev) { struct udma_dev *udma_dev; @@ -883,6 +895,11 @@ static int udma_init_dev(struct auxiliary_device *adev) goto err_set_ubcore_dev; } + ret = udma_init_eid_table(udma_dev); + if (ret) { + dev_err(udma_dev->dev, "init eid table failed.\n"); + goto err_init_eid; + } udma_register_debugfs(udma_dev); udma_dev->status = UDMA_NORMAL; mutex_unlock(&udma_reset_mutex); @@ -890,6 +907,8 @@ static int udma_init_dev(struct auxiliary_device *adev) return 0; +err_init_eid: + udma_unset_ubcore_dev(udma_dev); err_set_ubcore_dev: udma_unregister_activate_workqueue(udma_dev); err_register_act_init: -- Gitee From 7b87b814ba551c16dc9abab0d2b38a3844c540ab Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 18 Sep 2025 11:43:28 +0800 Subject: [PATCH 048/243] ub: udma: Support register and unregister segment function. commit 257c03e42137bb3c00ad742d1703e8674eed9d30 openEuler This patch adds the ability to register and unregister segment. In addition, this patch adds the function of import and unimport segment. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_eid.h | 6 + drivers/ub/urma/hw/udma/udma_main.c | 5 + drivers/ub/urma/hw/udma/udma_segment.c | 251 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_segment.h | 32 ++++ 5 files changed, 295 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_segment.c create mode 100644 drivers/ub/urma/hw/udma/udma_segment.h diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 2739d92c115e..a5358afd8e47 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -2,6 +2,6 @@ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ - udma_ctrlq_tp.o udma_eid.o udma_ctl.o + udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_eid.h b/drivers/ub/urma/hw/udma/udma_eid.h index dc9b9bb47270..0bb5d626503c 100644 --- a/drivers/ub/urma/hw/udma/udma_eid.h +++ b/drivers/ub/urma/hw/udma/udma_eid.h @@ -7,6 +7,12 @@ #include #include "udma_cmd.h" +struct udma_seid_upi { + union ubcore_eid seid; + uint32_t upi; + uint32_t rsvd0[3]; +}; + int udma_add_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info); int udma_del_one_eid(struct udma_dev *udma_dev, struct udma_ctrlq_eid_info *eid_info); int udma_query_eid_from_ctrl_cpu(struct udma_dev *udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2da4ef51ec67..8ae10ba7fbaa 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -18,6 +18,7 @@ #include #include "udma_dev.h" #include "udma_eq.h" +#include "udma_segment.h" #include "udma_cmd.h" #include "udma_ctx.h" #include "udma_rct.h" @@ -166,6 +167,10 @@ static struct ubcore_ops g_dev_ops = { .mmap = udma_mmap, .alloc_token_id = udma_alloc_tid, .free_token_id = udma_free_tid, + .register_seg = udma_register_seg, + .unregister_seg = udma_unregister_seg, + .import_seg = udma_import_seg, + .unimport_seg = udma_unimport_seg, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) diff --git a/drivers/ub/urma/hw/udma/udma_segment.c b/drivers/ub/urma/hw/udma/udma_segment.c new file mode 100644 index 000000000000..69098a2f3a67 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_segment.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include +#include "udma_common.h" +#include "udma_cmd.h" +#include "udma_eid.h" +#include "udma_tid.h" +#include "udma_segment.h" + +static int udma_pin_segment(struct ubcore_device *ub_dev, struct ubcore_seg_cfg *cfg, + struct udma_segment *seg) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_umem_param param; + int ret = 0; + + if (cfg->flag.bs.non_pin) + return 0; + + param.ub_dev = ub_dev; + param.va = cfg->va; + param.len = cfg->len; + + param.flag.bs.writable = + !!(cfg->flag.bs.access & UBCORE_ACCESS_WRITE); + param.flag.bs.non_pin = cfg->flag.bs.non_pin; + param.is_kernel = seg->kernel_mode; + + seg->umem = udma_umem_get(¶m); + if (IS_ERR(seg->umem)) { + ret = PTR_ERR(seg->umem); + dev_err(udma_dev->dev, + "failed to get segment umem, ret = %d.\n", ret); + } + + return ret; +} + +static void udma_unpin_seg(struct udma_segment *seg) +{ + if (!seg->core_tseg.seg.attr.bs.non_pin) + udma_umem_release(seg->umem, seg->kernel_mode); +} + +static int udma_check_seg_cfg(struct udma_dev *udma_dev, struct ubcore_seg_cfg *cfg) +{ + if (!cfg->token_id || cfg->flag.bs.access >= UDMA_SEGMENT_ACCESS_GUARD || + cfg->flag.bs.token_policy >= UBCORE_TOKEN_SIGNED) { + dev_err(udma_dev->dev, + "error segment input, access = %d, token_policy = %d, or null key_id.\n", + cfg->flag.bs.access, cfg->flag.bs.token_policy); + return -EINVAL; + } + + return 0; +} + +static void udma_init_seg_cfg(struct udma_segment *seg, struct ubcore_seg_cfg *cfg) +{ + seg->core_tseg.token_id = cfg->token_id; + seg->core_tseg.seg.token_id = cfg->token_id->token_id; + seg->core_tseg.seg.attr.value = cfg->flag.value; + seg->token_value = cfg->token_value.token; + seg->token_value_valid = cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE; +} + +static int udma_u_get_seg_perm(struct ubcore_seg_cfg *cfg) +{ + if (cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_ONLY || + cfg->flag.bs.access & UBCORE_ACCESS_ATOMIC) + return UMMU_DEV_ATOMIC | UMMU_DEV_WRITE | UMMU_DEV_READ; + + if (cfg->flag.bs.access & UBCORE_ACCESS_WRITE) + return UMMU_DEV_WRITE | UMMU_DEV_READ; + + return UMMU_DEV_READ; +} + +static int udma_sva_grant(struct ubcore_seg_cfg *cfg, struct iommu_sva *ksva) +{ +#define UDMA_TOKEN_VALUE_INPUT 0 + struct ummu_seg_attr seg_attr = {.token = NULL, .e_bit = UMMU_EBIT_ON}; + struct ummu_token_info token_info; + int perm; + int ret; + + perm = udma_u_get_seg_perm(cfg); + seg_attr.e_bit = (enum ummu_ebit_state)cfg->flag.bs.access & + UBCORE_ACCESS_LOCAL_ONLY; + + if (cfg->flag.bs.token_policy == UBCORE_TOKEN_NONE) { + return ummu_sva_grant_range(ksva, (void *)(uintptr_t)cfg->va, cfg->len, + perm, (void *)&seg_attr); + } else { + seg_attr.token = &token_info; + token_info.input = UDMA_TOKEN_VALUE_INPUT; + token_info.tokenVal = cfg->token_value.token; + ret = ummu_sva_grant_range(ksva, (void *)(uintptr_t)cfg->va, cfg->len, + perm, (void *)&seg_attr); + token_info.tokenVal = 0; + + return ret; + } +} + +struct ubcore_target_seg *udma_register_seg(struct ubcore_device *ub_dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_tid *udma_tid; + struct udma_segment *seg; + struct iommu_sva *ksva; + int ret = 0; + + ret = udma_check_seg_cfg(udma_dev, cfg); + if (ret) + return NULL; + + seg = kzalloc(sizeof(*seg), GFP_KERNEL); + if (!seg) + return NULL; + + seg->kernel_mode = udata == NULL; + udma_init_seg_cfg(seg, cfg); + + ret = udma_pin_segment(ub_dev, cfg, seg); + if (ret) { + dev_err(udma_dev->dev, "pin segment failed, ret = %d.\n", ret); + goto err_pin_seg; + } + + if (udata) + return &seg->core_tseg; + + udma_tid = to_udma_tid(seg->core_tseg.token_id); + + mutex_lock(&udma_dev->ksva_mutex); + ksva = (struct iommu_sva *)xa_load(&udma_dev->ksva_table, udma_tid->tid); + if (!ksva) { + dev_err(udma_dev->dev, + "unable to get ksva while register segment, token maybe is free.\n"); + goto err_load_ksva; + } + + ret = udma_sva_grant(cfg, ksva); + if (ret) { + dev_err(udma_dev->dev, + "ksva grant failed with token policy %d, ret = %d.\n", + cfg->flag.bs.token_policy, ret); + goto err_load_ksva; + } + mutex_unlock(&udma_dev->ksva_mutex); + + return &seg->core_tseg; + +err_load_ksva: + mutex_unlock(&udma_dev->ksva_mutex); + udma_unpin_seg(seg); +err_pin_seg: + kfree(seg); + return NULL; +} + +int udma_unregister_seg(struct ubcore_target_seg *ubcore_seg) +{ + struct udma_tid *udma_tid = to_udma_tid(ubcore_seg->token_id); + struct udma_dev *udma_dev = to_udma_dev(ubcore_seg->ub_dev); + struct udma_segment *seg = to_udma_seg(ubcore_seg); + struct ummu_token_info token_info; + struct iommu_sva *ksva; + int ret; + + if (!seg->kernel_mode) + goto common_process; + + mutex_lock(&udma_dev->ksva_mutex); + ksva = (struct iommu_sva *)xa_load(&udma_dev->ksva_table, udma_tid->tid); + + if (!ksva) { + dev_warn(udma_dev->dev, + "unable to get ksva while unregister segment, token maybe is free.\n"); + } else { + if (seg->token_value_valid) { + token_info.tokenVal = seg->token_value; + ret = ummu_sva_ungrant_range(ksva, + (void *)(uintptr_t)ubcore_seg->seg.ubva.va, + ubcore_seg->seg.len, &token_info); + token_info.tokenVal = 0; + } else { + ret = ummu_sva_ungrant_range(ksva, + (void *)(uintptr_t)ubcore_seg->seg.ubva.va, + ubcore_seg->seg.len, NULL); + } + if (ret) { + mutex_unlock(&udma_dev->ksva_mutex); + dev_err(udma_dev->dev, "unregister segment failed, ret = %d.\n", ret); + return ret; + } + } + mutex_unlock(&udma_dev->ksva_mutex); + +common_process: + udma_unpin_seg(seg); + seg->token_value = 0; + kfree(seg); + + return 0; +} + +struct ubcore_target_seg *udma_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + struct udma_segment *seg; + + if (cfg->seg.attr.bs.token_policy > UBCORE_TOKEN_PLAIN_TEXT) { + dev_err(udma_dev->dev, "invalid token policy = %d.\n", + cfg->seg.attr.bs.token_policy); + return NULL; + } + + seg = kzalloc(sizeof(*seg), GFP_KERNEL); + if (!seg) + return NULL; + + if (cfg->seg.attr.bs.token_policy != UBCORE_TOKEN_NONE) { + seg->token_value = cfg->token_value.token; + seg->token_value_valid = true; + } + + seg->tid = cfg->seg.token_id >> UDMA_TID_SHIFT; + + return &seg->core_tseg; +} + +int udma_unimport_seg(struct ubcore_target_seg *tseg) +{ + struct udma_segment *seg; + + seg = to_udma_seg(tseg); + seg->token_value = 0; + kfree(seg); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_segment.h b/drivers/ub/urma/hw/udma/udma_segment.h new file mode 100644 index 000000000000..7dba1fbf385e --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_segment.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_SEGMENT_H__ +#define __UDMA_SEGMENT_H__ + +#include "udma_dev.h" + +struct udma_segment { + struct ubcore_target_seg core_tseg; + struct ubcore_umem *umem; + uint32_t token_value; + bool token_value_valid; + bool kernel_mode; + uint32_t tid; +}; + +static inline struct udma_segment *to_udma_seg(struct ubcore_target_seg *seg) +{ + return container_of(seg, struct udma_segment, core_tseg); +} + +struct ubcore_target_seg *udma_register_seg(struct ubcore_device *ub_dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); +int udma_unregister_seg(struct ubcore_target_seg *seg); +struct ubcore_target_seg *udma_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); +int udma_unimport_seg(struct ubcore_target_seg *tseg); + +#endif /* __UDMA_SEGMENT_H__ */ -- Gitee From c038648bfaa62419dceddc23a21a80f1f725cca6 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:01:20 +0800 Subject: [PATCH 049/243] ub: udma: Support init and uninit dfx function. commit dbb754b5f3278f3143607cc57f734fe273a605b8 openEuler This patch adds the ability to init and uninit dfx function. In addition, this patch enables the dfx function for rc and segment. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 3 +- drivers/ub/urma/hw/udma/udma_common.c | 64 ++++++ drivers/ub/urma/hw/udma/udma_common.h | 6 + drivers/ub/urma/hw/udma/udma_def.h | 33 +++ drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_dfx.c | 299 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_dfx.h | 55 +++++ drivers/ub/urma/hw/udma/udma_main.c | 3 + drivers/ub/urma/hw/udma/udma_rct.c | 6 + drivers/ub/urma/hw/udma/udma_segment.c | 102 +++++++++ 10 files changed, 572 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_dfx.c create mode 100644 drivers/ub/urma/hw/udma/udma_dfx.h diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index a5358afd8e47..a087da421b2e 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -2,6 +2,7 @@ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ - udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o + udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o \ + udma_dfx.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 4b4ccc22124a..375ed4826f6a 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -470,6 +470,55 @@ void udma_destroy_eid_table(struct udma_dev *udma_dev) mutex_destroy(&udma_dev->eid_mutex); } +void udma_dfx_store_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, + uint32_t id, const char *name) +{ + uint32_t *entry; + int ret; + + entry = (uint32_t *)xa_load(&entity->table, id); + if (entry) { + dev_warn(udma_dev->dev, "%s(%u) already exists in DFX.\n", name, id); + return; + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + *entry = id; + + write_lock(&entity->rwlock); + ret = xa_err(xa_store(&entity->table, id, entry, GFP_KERNEL)); + if (ret) { + write_unlock(&entity->rwlock); + dev_err(udma_dev->dev, "store %s to table failed in DFX.\n", name); + kfree(entry); + return; + } + + ++entity->cnt; + write_unlock(&entity->rwlock); +} + +void udma_dfx_delete_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, + uint32_t id) +{ + void *entry; + + write_lock(&entity->rwlock); + entry = xa_load(&entity->table, id); + if (!entry) { + write_unlock(&entity->rwlock); + return; + } + + xa_erase(&entity->table, id); + kfree(entry); + --entity->cnt; + write_unlock(&entity->rwlock); +} + static struct ubcore_umem *udma_pin_k_addr(struct ubcore_device *ub_dev, uint64_t va, uint64_t len) { @@ -579,6 +628,21 @@ void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_ dma_free_iova(slot); } +void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, uint32_t len, + uint32_t *ctx) +{ + uint32_t i; + + pr_info("*************udma%u %s(%u) CONTEXT INFO *************\n", + udev->adev_id, name, id); + + for (i = 0; i < len; ++i) + pr_info("udma%u %s(%u) CONTEXT(byte%4lu): %08x\n", + udev->adev_id, name, id, (i + 1) * sizeof(uint32_t), ctx[i]); + + pr_info("**************************************************\n"); +} + void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size) { uint32_t i; diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 3cec74f9ec8e..aba7b4afddb3 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -72,6 +72,10 @@ void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name); void udma_destroy_eid_table(struct udma_dev *udma_dev); +void udma_dfx_store_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, + uint32_t id, const char *name); +void udma_dfx_delete_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, + uint32_t id); int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); @@ -82,6 +86,8 @@ static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; } +void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, uint32_t len, + uint32_t *ctx); void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size); #endif /* __UDMA_COMM_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h index c45c69cd0271..ca107e34a37c 100644 --- a/drivers/ub/urma/hw/udma/udma_def.h +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -69,6 +69,39 @@ struct udma_caps { struct udma_tbl seid; }; +struct udma_dfx_jetty { + uint32_t id; + uint32_t jfs_depth; +}; + +struct udma_dfx_jfs { + uint32_t id; + uint32_t depth; +}; + +struct udma_dfx_seg { + uint32_t id; + struct ubcore_ubva ubva; + uint64_t len; + struct ubcore_token token_value; +}; + +struct udma_dfx_entity { + uint32_t cnt; + struct xarray table; + rwlock_t rwlock; +}; + +struct udma_dfx_info { + struct udma_dfx_entity rc; + struct udma_dfx_entity jetty; + struct udma_dfx_entity jetty_grp; + struct udma_dfx_entity jfs; + struct udma_dfx_entity jfr; + struct udma_dfx_entity jfc; + struct udma_dfx_entity seg; +}; + struct udma_sw_db_page { struct list_head list; struct ubcore_umem *umem; diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 89b91ff08e79..67e8847d66a6 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -11,6 +11,7 @@ #include #include +extern bool dfx_switch; extern uint32_t jfc_arm_mode; extern bool dump_aux_info; @@ -110,6 +111,7 @@ struct udma_dev { struct iommu_sva *ksva; struct list_head db_list[UDMA_DB_TYPE_NUM]; struct mutex db_mutex; + struct udma_dfx_info *dfx_info; uint32_t status; struct udma_dev_debugfs *dbgfs; uint32_t ue_num; diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c new file mode 100644 index 000000000000..f6920f879eea --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include "udma_cmd.h" +#include "udma_jfr.h" +#include "udma_jfs.h" +#include "udma_jfc.h" +#include "udma_jetty.h" +#include "udma_rct.h" +#include "udma_dfx.h" + +bool dfx_switch; + +static int udma_query_res_list(struct udma_dev *udma_dev, + struct udma_dfx_entity *entity, + struct ubcore_res_val *val, + const char *name) +{ + struct ubcore_res_list_val *res_list = (struct ubcore_res_list_val *)val->addr; + size_t idx = 0; + uint32_t *id; + + res_list->cnt = 0; + + read_lock(&entity->rwlock); + if (entity->cnt == 0) { + read_unlock(&entity->rwlock); + return 0; + } + + res_list->list = vmalloc(sizeof(uint32_t) * entity->cnt); + if (!res_list->list) { + read_unlock(&entity->rwlock); + dev_err(udma_dev->dev, "failed to vmalloc %s_list, %s_cnt = %u!\n", + name, name, entity->cnt); + return -ENOMEM; + } + + xa_for_each(&entity->table, idx, id) { + if (res_list->cnt >= entity->cnt) { + read_unlock(&entity->rwlock); + vfree(res_list->list); + dev_err(udma_dev->dev, + "failed to query %s_id, %s_cnt = %u!\n", + name, name, entity->cnt); + return -EINVAL; + } + res_list->list[res_list->cnt] = idx; + ++res_list->cnt; + } + read_unlock(&entity->rwlock); + + return 0; +} + +static int udma_query_res_dev_seg(struct udma_dev *udma_dev, + struct ubcore_res_val *val) +{ + struct ubcore_res_seg_val *res_list = (struct ubcore_res_seg_val *)val->addr; + struct ubcore_seg_info *seg_list; + struct udma_dfx_seg *seg = NULL; + size_t idx; + + res_list->seg_cnt = 0; + + read_lock(&udma_dev->dfx_info->seg.rwlock); + if (udma_dev->dfx_info->seg.cnt == 0) { + read_unlock(&udma_dev->dfx_info->seg.rwlock); + return 0; + } + + seg_list = vmalloc(sizeof(*seg_list) * udma_dev->dfx_info->seg.cnt); + if (!seg_list) { + read_unlock(&udma_dev->dfx_info->seg.rwlock); + return -ENOMEM; + } + + xa_for_each(&udma_dev->dfx_info->seg.table, idx, seg) { + if (res_list->seg_cnt >= udma_dev->dfx_info->seg.cnt) { + read_unlock(&udma_dev->dfx_info->seg.rwlock); + vfree(seg_list); + dev_err(udma_dev->dev, + "failed to query seg_list, seg_cnt = %u!\n", + udma_dev->dfx_info->seg.cnt); + return -EINVAL; + } + seg_list[res_list->seg_cnt].token_id = seg->id; + seg_list[res_list->seg_cnt].len = seg->len; + seg_list[res_list->seg_cnt].ubva = seg->ubva; + seg_list[res_list->seg_cnt].ubva.va = 0; + ++res_list->seg_cnt; + } + read_unlock(&udma_dev->dfx_info->seg.rwlock); + + res_list->seg_list = seg_list; + + return 0; +} + +static int udma_query_res_rc(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_rc_val *res_rc = (struct ubcore_res_rc_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_rc_ctx *rcc; + uint32_t *rc_id; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->rc, val, "rc"); + + rc_id = (uint32_t *)xa_load(&udma_dev->dfx_info->rc.table, key->key); + if (!rc_id) { + dev_err(udma_dev->dev, "failed to query rc, rc_id = %u.\n", + key->key); + return -EINVAL; + } + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_RC_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + rcc = (struct udma_rc_ctx *)mailbox->buf; + res_rc->rc_id = key->key; + res_rc->depth = 1 << rcc->rce_shift; + res_rc->type = 0; + res_rc->state = 0; + rcc->rce_base_addr_l = 0; + rcc->rce_base_addr_h = 0; + + udma_dfx_ctx_print(udma_dev, "RC", key->key, sizeof(*rcc) / sizeof(uint32_t), + (uint32_t *)rcc); + udma_free_cmd_mailbox(udma_dev, mailbox); + + return 0; +} + +static int udma_query_res_seg(struct udma_dev *udma_dev, struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_seg_val *res_seg = (struct ubcore_res_seg_val *)val->addr; + struct udma_dfx_seg *seg; + + if (key->key_cnt == 0) + return udma_query_res_dev_seg(udma_dev, val); + + read_lock(&udma_dev->dfx_info->seg.rwlock); + seg = (struct udma_dfx_seg *)xa_load(&udma_dev->dfx_info->seg.table, key->key); + if (!seg) { + read_unlock(&udma_dev->dfx_info->seg.rwlock); + dev_err(udma_dev->dev, "failed to query seg, token_id = %u.\n", + key->key); + return -EINVAL; + } + + res_seg->seg_list = vmalloc(sizeof(struct ubcore_seg_info)); + if (!res_seg->seg_list) { + read_unlock(&udma_dev->dfx_info->seg.rwlock); + return -ENOMEM; + } + + res_seg->seg_cnt = 1; + res_seg->seg_list->token_id = seg->id; + res_seg->seg_list->len = seg->len; + res_seg->seg_list->ubva = seg->ubva; + res_seg->seg_list->ubva.va = 0; + read_unlock(&udma_dev->dfx_info->seg.rwlock); + + return 0; +} + +static int udma_query_res_dev_ta(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_dev_ta_val *res_ta = (struct ubcore_res_dev_ta_val *)val->addr; + struct udma_dfx_info *dfx = udma_dev->dfx_info; + struct udma_dfx_entity_cnt udma_dfx_entity_cnt_ta[] = { + {&dfx->rc.rwlock, &dfx->rc, &res_ta->rc_cnt}, + {&dfx->jetty.rwlock, &dfx->jetty, &res_ta->jetty_cnt}, + {&dfx->jetty_grp.rwlock, &dfx->jetty_grp, &res_ta->jetty_group_cnt}, + {&dfx->jfs.rwlock, &dfx->jfs, &res_ta->jfs_cnt}, + {&dfx->jfr.rwlock, &dfx->jfr, &res_ta->jfr_cnt}, + {&dfx->jfc.rwlock, &dfx->jfc, &res_ta->jfc_cnt}, + {&dfx->seg.rwlock, &dfx->seg, &res_ta->seg_cnt}, + }; + + int size = ARRAY_SIZE(udma_dfx_entity_cnt_ta); + int i; + + for (i = 0; i < size; i++) { + read_lock(udma_dfx_entity_cnt_ta[i].rwlock); + *udma_dfx_entity_cnt_ta[i].res_cnt = + udma_dfx_entity_cnt_ta[i].entity->cnt; + read_unlock(udma_dfx_entity_cnt_ta[i].rwlock); + } + + return 0; +} + +typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val); + +static udma_query_res_handler g_udma_query_res_handlers[] = { + [0] = NULL, + [UBCORE_RES_KEY_RC] = udma_query_res_rc, + [UBCORE_RES_KEY_SEG] = udma_query_res_seg, + [UBCORE_RES_KEY_DEV_TA] = udma_query_res_dev_ta, +}; + +int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + + if (!dfx_switch) { + dev_warn(udma_dev->dev, "the dfx_switch is not enabled.\n"); + return -EPERM; + } + + if (key->type < UBCORE_RES_KEY_VTP || key->type > UBCORE_RES_KEY_DEV_TP || + g_udma_query_res_handlers[key->type] == NULL) { + dev_err(udma_dev->dev, "key type(%u) invalid.\n", key->type); + return -EINVAL; + } + + return g_udma_query_res_handlers[key->type](udma_dev, key, val); +} + +static void list_lock_init(struct udma_dfx_info *dfx) +{ + struct udma_dfx_entity_initialization udma_dfx_entity_initialization_arr[] = { + {&dfx->rc.rwlock, &dfx->rc.table}, + {&dfx->jetty.rwlock, &dfx->jetty.table}, + {&dfx->jetty_grp.rwlock, &dfx->jetty_grp.table}, + {&dfx->jfs.rwlock, &dfx->jfs.table}, + {&dfx->jfr.rwlock, &dfx->jfr.table}, + {&dfx->jfc.rwlock, &dfx->jfc.table}, + {&dfx->seg.rwlock, &dfx->seg.table}, + }; + int size = ARRAY_SIZE(udma_dfx_entity_initialization_arr); + int i; + + for (i = 0; i < size; i++) { + rwlock_init(udma_dfx_entity_initialization_arr[i].rwlock); + xa_init(udma_dfx_entity_initialization_arr[i].table); + } +} + +int udma_dfx_init(struct udma_dev *udma_dev) +{ + if (!dfx_switch) + return 0; + + udma_dev->dfx_info = kzalloc(sizeof(struct udma_dfx_info), GFP_KERNEL); + if (!udma_dev->dfx_info) + return -ENOMEM; + + list_lock_init(udma_dev->dfx_info); + + return 0; +} + +static void udma_dfx_destroy_xa(struct udma_dev *udma_dev, struct xarray *table, + const char *name) +{ + if (!xa_empty(table)) + dev_err(udma_dev->dev, "%s table is not empty.\n", name); + xa_destroy(table); +} + +static void udma_dfx_table_free(struct udma_dev *dev) +{ + udma_dfx_destroy_xa(dev, &dev->dfx_info->rc.table, "rc"); + udma_dfx_destroy_xa(dev, &dev->dfx_info->jetty.table, "jetty"); + udma_dfx_destroy_xa(dev, &dev->dfx_info->jetty_grp.table, "jetty_grp"); + udma_dfx_destroy_xa(dev, &dev->dfx_info->jfs.table, "jfs"); + udma_dfx_destroy_xa(dev, &dev->dfx_info->jfr.table, "jfr"); + udma_dfx_destroy_xa(dev, &dev->dfx_info->jfc.table, "jfc"); + udma_dfx_destroy_xa(dev, &dev->dfx_info->seg.table, "seg"); +} + +void udma_dfx_uninit(struct udma_dev *udma_dev) +{ + if (!dfx_switch) + return; + + udma_dfx_table_free(udma_dev); + kfree(udma_dev->dfx_info); + udma_dev->dfx_info = NULL; +} + +module_param(dfx_switch, bool, 0444); +MODULE_PARM_DESC(dfx_switch, "Set whether to enable the udma_dfx function, default: 0(0:off, 1:on)"); diff --git a/drivers/ub/urma/hw/udma/udma_dfx.h b/drivers/ub/urma/hw/udma/udma_dfx.h new file mode 100644 index 000000000000..92c0db1aa744 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_dfx.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#ifndef __UDMA_DFX_H__ +#define __UDMA_DFX_H__ + +#include "udma_dev.h" +#include "udma_ctx.h" +#include "udma_jetty.h" + +#define UDMA_RX_THRESHOLD_0 0 +#define UDMA_RX_THRESHOLD_64 64 +#define UDMA_RX_THRESHOLD_512 512 +#define UDMA_RX_THRESHOLD_4096 4096 + +#define UDMA_RX_REQ_EPSN_H_SHIFT 16 + +enum udma_limit_wl { + UDMA_LIMIT_WL_0, + UDMA_LIMIT_WL_64, + UDMA_LIMIT_WL_512, + UDMA_LIMIT_WL_4096, +}; + +struct udma_dfx_entity_initialization { + rwlock_t *rwlock; + struct xarray *table; +}; + +struct udma_dfx_entity_cnt { + rwlock_t *rwlock; + struct udma_dfx_entity *entity; + uint32_t *res_cnt; +}; + +static inline uint32_t to_udma_rx_threshold(uint32_t limit_wl) +{ + switch (limit_wl) { + case UDMA_LIMIT_WL_0: + return UDMA_RX_THRESHOLD_0; + case UDMA_LIMIT_WL_64: + return UDMA_RX_THRESHOLD_64; + case UDMA_LIMIT_WL_512: + return UDMA_RX_THRESHOLD_512; + default: + return UDMA_RX_THRESHOLD_4096; + } +} + +int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val); +int udma_dfx_init(struct udma_dev *udma_dev); +void udma_dfx_uninit(struct udma_dev *udma_dev); + +#endif /* __UDMA_DFX_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 8ae10ba7fbaa..5bf8631260a5 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -23,6 +23,7 @@ #include "udma_ctx.h" #include "udma_rct.h" #include "udma_tid.h" +#include "udma_dfx.h" #include "udma_eid.h" #include "udma_debugfs.h" #include "udma_common.h" @@ -161,6 +162,7 @@ static struct ubcore_ops g_dev_ops = { .abi_version = 0, .query_device_attr = udma_query_device_attr, .query_device_status = udma_query_device_status, + .query_res = udma_query_res, .config_device = udma_config_device, .alloc_ucontext = udma_alloc_ucontext, .free_ucontext = udma_free_ucontext, @@ -686,6 +688,7 @@ static const struct udma_func_map udma_dev_func_map[] = { {"dev param", udma_init_dev_param, udma_uninit_dev_param}, {"cmd", udma_cmd_init, udma_cmd_cleanup}, {"dev tid", udma_alloc_dev_tid, udma_free_dev_tid}, + {"dfx", udma_dfx_init, udma_dfx_uninit}, {"db page", udma_create_db_page, udma_destroy_db_page}, }; diff --git a/drivers/ub/urma/hw/udma/udma_rct.c b/drivers/ub/urma/hw/udma/udma_rct.c index 149b9b6f27b4..599c80c118fd 100644 --- a/drivers/ub/urma/hw/udma/udma_rct.c +++ b/drivers/ub/urma/hw/udma/udma_rct.c @@ -91,6 +91,9 @@ static int udma_alloc_rc_queue(struct udma_dev *dev, goto err_store_rcq_id; } + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->rc, rcq->id, "rc"); + return ret; err_store_rcq_id: @@ -125,6 +128,9 @@ void udma_free_rc_queue(struct udma_dev *dev, int rc_queue_id) dev_err(dev->dev, "udma destroy rc queue ctx failed, ret = %d.\n", ret); + if (dfx_switch) + udma_dfx_delete_id(dev, &dev->dfx_info->rc, rc_queue_id); + udma_free_iova(dev, rcq->buf.entry_size * rcq->buf.entry_cnt, rcq->buf.kva_or_slot, rcq->buf.addr); rcq->buf.kva_or_slot = NULL; diff --git a/drivers/ub/urma/hw/udma/udma_segment.c b/drivers/ub/urma/hw/udma/udma_segment.c index 69098a2f3a67..90615d1ae2b4 100644 --- a/drivers/ub/urma/hw/udma/udma_segment.c +++ b/drivers/ub/urma/hw/udma/udma_segment.c @@ -107,6 +107,101 @@ static int udma_sva_grant(struct ubcore_seg_cfg *cfg, struct iommu_sva *ksva) } } +static int udma_set_seg_eid(struct udma_dev *udma_dev, uint32_t eid_index, + union ubcore_eid *eid) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_seid_upi *seid_upi; + int ret; + + if (udma_dev->is_ue) { + dev_info(udma_dev->dev, + "The ue does not support the delivery of seid(%u) mailbox.\n", + eid_index); + return 0; + } + + mailbox = udma_alloc_cmd_mailbox(udma_dev); + if (!mailbox) { + dev_err(udma_dev->dev, + "failed to alloc mailbox for get tp seid.\n"); + return -ENOMEM; + } + + mbox_attr.tag = eid_index; + mbox_attr.op = UDMA_CMD_READ_SEID_UPI; + ret = udma_post_mbox(udma_dev, mailbox, &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, + "send tp eid table mailbox cmd failed, ret is %d.\n", ret); + } else { + seid_upi = (struct udma_seid_upi *)mailbox->buf; + *eid = seid_upi->seid; + } + + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + +static void udma_dfx_store_seg(struct udma_dev *udma_dev, + struct ubcore_seg_cfg *cfg) +{ + struct udma_dfx_seg *seg; + int ret; + + seg = (struct udma_dfx_seg *)xa_load(&udma_dev->dfx_info->seg.table, + cfg->token_id->token_id); + if (seg) { + dev_warn(udma_dev->dev, "segment already exists in DFX.\n"); + return; + } + + seg = kzalloc(sizeof(*seg), GFP_KERNEL); + if (!seg) + return; + + seg->id = cfg->token_id->token_id; + seg->ubva.va = cfg->va; + seg->len = cfg->len; + seg->token_value = cfg->token_value; + if (udma_set_seg_eid(udma_dev, cfg->eid_index, &seg->ubva.eid)) { + kfree(seg); + return; + } + + write_lock(&udma_dev->dfx_info->seg.rwlock); + ret = xa_err(xa_store(&udma_dev->dfx_info->seg.table, cfg->token_id->token_id, + seg, GFP_KERNEL)); + if (ret) { + write_unlock(&udma_dev->dfx_info->seg.rwlock); + dev_err(udma_dev->dev, "store segment to table failed in DFX.\n"); + kfree(seg); + return; + } + + ++udma_dev->dfx_info->seg.cnt; + write_unlock(&udma_dev->dfx_info->seg.rwlock); +} + +static void udma_dfx_delete_seg(struct udma_dev *udma_dev, uint32_t token_id, + uint64_t va) +{ + struct udma_dfx_seg *seg; + + write_lock(&udma_dev->dfx_info->seg.rwlock); + seg = (struct udma_dfx_seg *)xa_load(&udma_dev->dfx_info->seg.table, + token_id); + if (seg && seg->id == token_id && seg->ubva.va == va) { + xa_erase(&udma_dev->dfx_info->seg.table, token_id); + seg->token_value.token = 0; + kfree(seg); + --udma_dev->dfx_info->seg.cnt; + } + write_unlock(&udma_dev->dfx_info->seg.rwlock); +} + struct ubcore_target_seg *udma_register_seg(struct ubcore_device *ub_dev, struct ubcore_seg_cfg *cfg, struct ubcore_udata *udata) @@ -156,6 +251,9 @@ struct ubcore_target_seg *udma_register_seg(struct ubcore_device *ub_dev, } mutex_unlock(&udma_dev->ksva_mutex); + if (dfx_switch) + udma_dfx_store_seg(udma_dev, cfg); + return &seg->core_tseg; err_load_ksva: @@ -204,6 +302,10 @@ int udma_unregister_seg(struct ubcore_target_seg *ubcore_seg) } mutex_unlock(&udma_dev->ksva_mutex); + if (dfx_switch) + udma_dfx_delete_seg(udma_dev, ubcore_seg->token_id->token_id, + ubcore_seg->seg.ubva.va); + common_process: udma_unpin_seg(seg); seg->token_value = 0; -- Gitee From 9449d5469b70db915cd5aeab7c288c543ab303c1 Mon Sep 17 00:00:00 2001 From: Lizhi He Date: Tue, 2 Sep 2025 14:14:15 +0800 Subject: [PATCH 050/243] dma-mapping: benchmark: add support for UB devices commit 72204766a0326d01d1c357357038bd74c1e73825 openEuler The current dma_map benchmark only supports platform devices and PCI devices. This patch adds support for UB devices. Signed-off-by: Xiaofeng Liu Signed-off-by: Li Wentao Signed-off-by: Lizhi He Signed-off-by: zhaolichang <943677312@qq.com> --- kernel/dma/map_benchmark.c | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index cc19a3efea89..23aeba267cfa 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -18,6 +18,9 @@ #include #include #include +#ifdef CONFIG_UB_UBUS +#include +#endif struct map_benchmark_data { struct map_benchmark bparam; @@ -351,6 +354,19 @@ static struct pci_driver map_benchmark_pci_driver = { .probe = map_benchmark_pci_probe, }; +#ifdef CONFIG_UB_UBUS +static int map_benchmark_ub_probe(struct ub_entity *uent, + const struct ub_device_id *id) +{ + return __map_benchmark_probe(&uent->dev); +} + +static struct ub_driver map_benchmark_ub_driver = { + .name = "dma_map_benchmark", + .probe = map_benchmark_ub_probe, +}; +#endif + static int __init map_benchmark_init(void) { int ret; @@ -360,16 +376,30 @@ static int __init map_benchmark_init(void) return ret; ret = platform_driver_register(&map_benchmark_platform_driver); - if (ret) { - pci_unregister_driver(&map_benchmark_pci_driver); - return ret; - } + if (ret) + goto err_reg_platform; + +#ifdef CONFIG_UB_UBUS + ret = ub_register_driver(&map_benchmark_ub_driver); + if (ret) + goto err_reg_ub; +#endif return 0; +#ifdef CONFIG_UB_UBUS +err_reg_ub: + platform_driver_unregister(&map_benchmark_platform_driver); +#endif +err_reg_platform: + pci_unregister_driver(&map_benchmark_pci_driver); + return ret; } static void __exit map_benchmark_cleanup(void) { +#ifdef CONFIG_UB_UBUS + ub_unregister_driver(&map_benchmark_ub_driver); +#endif platform_driver_unregister(&map_benchmark_platform_driver); pci_unregister_driver(&map_benchmark_pci_driver); } -- Gitee From 72f8e0a376961ce85a6adb8e5af2e31f3101ea8c Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Fri, 14 Nov 2025 14:18:53 +0800 Subject: [PATCH 051/243] ub:ubus: call ub_host_probe inside register_ub_manage_subsystem_ops commit 59de5029039dc2a046518ce1cdf5e3bf7bc3ac02 openEuler Call ub_host_probe inside register_ub_manage_subsystem_ops so that ub_host_probe can become static function, same to ub_host_remove. Query entity and port na first, then do the config read in ub_fm_flush_ubc_info because config read will check na. Fix some comment description issues. Signed-off-by: Yahui Liu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubus/pool.c | 20 ++-- drivers/ub/ubus/ubus.h | 2 - drivers/ub/ubus/ubus_driver.c | 100 +++++++++++-------- drivers/ub/ubus/ubus_driver.h | 2 - drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c | 7 -- include/ub/ubus/ubus.h | 98 +++++++++--------- 6 files changed, 121 insertions(+), 108 deletions(-) diff --git a/drivers/ub/ubus/pool.c b/drivers/ub/ubus/pool.c index 2aeb8d57ee9a..e86b19b58f63 100644 --- a/drivers/ub/ubus/pool.c +++ b/drivers/ub/ubus/pool.c @@ -456,6 +456,18 @@ int ub_fm_flush_ubc_info(struct ub_bus_controller *ubc) if (!buf) goto out; + ret = ub_query_ent_na(ubc->uent, buf); + if (ret) { + dev_err(dev, "update cluster ubc cna failed, ret=%d\n", ret); + goto free_buf; + } + + ret = ub_query_port_na(ubc->uent, buf); + if (ret) { + dev_err(dev, "update cluster ubc port cna failed, ret=%d\n", ret); + goto free_buf; + } + ret = ub_cfg_read_word(ubc->uent, UB_UPI, &upi); if (ret) { dev_err(dev, "update cluster upi failed, ret=%d\n", ret); @@ -489,14 +501,6 @@ int ub_fm_flush_ubc_info(struct ub_bus_controller *ubc) ubc->uent->fm_cna = fm_cna & UB_FM_CNA_MASK; dev_info(dev, "update cluster ubc fm cna to %#x\n", ubc->uent->fm_cna); - ret = ub_query_ent_na(ubc->uent, buf); - if (ret) { - dev_err(dev, "update cluster ubc cna failed, ret=%d\n", ret); - goto free_buf; - } - - ret = ub_query_port_na(ubc->uent, buf); - free_buf: kfree(buf); out: diff --git a/drivers/ub/ubus/ubus.h b/drivers/ub/ubus/ubus.h index a4b46402e32d..d26e0816b89b 100644 --- a/drivers/ub/ubus/ubus.h +++ b/drivers/ub/ubus/ubus.h @@ -56,8 +56,6 @@ static inline bool ub_entity_test_priv_flag(struct ub_entity *uent, int bit) return test_bit(bit, &uent->priv_flags); } -int ub_host_probe(void); -void ub_host_remove(void); struct ub_bus_controller *ub_find_bus_controller(u32 ctl_no); struct ub_manage_subsystem_ops { diff --git a/drivers/ub/ubus/ubus_driver.c b/drivers/ub/ubus/ubus_driver.c index b4c6f0d3b760..9431bbccd3b0 100644 --- a/drivers/ub/ubus/ubus_driver.c +++ b/drivers/ub/ubus/ubus_driver.c @@ -33,45 +33,12 @@ MODULE_PARM_DESC(entity_flex_en, "Entity Flexible enable: default: 0"); DECLARE_RWSEM(ub_bus_sem); +#define UBC_GUID_VENDOR_SHIFT 48 +#define UBC_GUID_VENDOR_MASK GENMASK(15, 0) + static DEFINE_MUTEX(manage_subsystem_ops_mutex); static const struct ub_manage_subsystem_ops *manage_subsystem_ops; -int register_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) -{ - if (!ops) - return -EINVAL; - - mutex_lock(&manage_subsystem_ops_mutex); - if (!manage_subsystem_ops) { - manage_subsystem_ops = ops; - mutex_unlock(&manage_subsystem_ops_mutex); - pr_info("ub manage subsystem ops register successfully\n"); - return 0; - } - - pr_warn("ub manage subsystem ops has been registered\n"); - mutex_unlock(&manage_subsystem_ops_mutex); - - return -EINVAL; -} -EXPORT_SYMBOL_GPL(register_ub_manage_subsystem_ops); - -void unregister_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) -{ - if (!ops) - return; - - mutex_lock(&manage_subsystem_ops_mutex); - if (manage_subsystem_ops == ops) { - manage_subsystem_ops = NULL; - pr_info("ub manage subsystem ops unregister successfully\n"); - } else { - pr_warn("ub manage subsystem ops is not registered by this vendor\n"); - } - mutex_unlock(&manage_subsystem_ops_mutex); -} -EXPORT_SYMBOL_GPL(unregister_ub_manage_subsystem_ops); - const struct ub_manage_subsystem_ops *get_ub_manage_subsystem_ops(void) { return manage_subsystem_ops; @@ -653,7 +620,7 @@ static void ubus_driver_resource_drain(void) ub_static_cluster_instance_drain(); } -int ub_host_probe(void) +static int ub_host_probe(void) { int ret; @@ -724,9 +691,8 @@ int ub_host_probe(void) ub_bus_type_uninit(); return ret; } -EXPORT_SYMBOL_GPL(ub_host_probe); -void ub_host_remove(void) +static void ub_host_remove(void) { message_rx_uninit(); if (manage_subsystem_ops && manage_subsystem_ops->ras_handler_remove) @@ -741,7 +707,61 @@ void ub_host_remove(void) unregister_ub_cfg_ops(); ub_bus_type_uninit(); } -EXPORT_SYMBOL_GPL(ub_host_remove); + +int register_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) +{ + struct ub_bus_controller *ubc; + int ret; + + if (!ops) { + pr_err("ub manage subsystem ops is NULL\n"); + return -EINVAL; + } + + mutex_lock(&manage_subsystem_ops_mutex); + if (!manage_subsystem_ops) { + list_for_each_entry(ubc, &ubc_list, node) { + if (((ubc->attr.ubc_guid_high >> UBC_GUID_VENDOR_SHIFT) & + UBC_GUID_VENDOR_MASK) == ops->vendor) { + manage_subsystem_ops = ops; + ret = ub_host_probe(); + if (ret) + manage_subsystem_ops = NULL; + else + pr_info("ub manage subsystem ops register successfully\n"); + + mutex_unlock(&manage_subsystem_ops_mutex); + return ret; + } + } + pr_warn("ub manage subsystem ops is not match with any of ub controller\n"); + } else { + pr_warn("ub manage subsystem ops has been registered\n"); + } + mutex_unlock(&manage_subsystem_ops_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(register_ub_manage_subsystem_ops); + +void unregister_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) +{ + if (!ops) { + pr_err("ub manage subsystem ops is NULL\n"); + return; + } + + mutex_lock(&manage_subsystem_ops_mutex); + if (manage_subsystem_ops == ops) { + ub_host_remove(); + manage_subsystem_ops = NULL; + pr_info("ub manage subsystem ops unregister successfully\n"); + } else { + pr_warn("ub manage subsystem ops is not registered by this vendor\n"); + } + mutex_unlock(&manage_subsystem_ops_mutex); +} +EXPORT_SYMBOL_GPL(unregister_ub_manage_subsystem_ops); static int __init ubus_driver_init(void) { diff --git a/drivers/ub/ubus/ubus_driver.h b/drivers/ub/ubus/ubus_driver.h index f2bff32bbee9..b2eab906fa31 100644 --- a/drivers/ub/ubus/ubus_driver.h +++ b/drivers/ub/ubus/ubus_driver.h @@ -8,7 +8,5 @@ extern struct rw_semaphore ub_bus_sem; extern struct bus_type ub_service_bus_type; -int ub_host_probe(void); -void ub_host_remove(void); #endif /* __UBUS_DRIVER_H__ */ diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c index be86f055cb34..3627b0e8f018 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c @@ -53,10 +53,6 @@ static int __init hisi_ubus_driver_register(struct platform_driver *drv) if (ret) return ret; - ret = ub_host_probe(); - if (ret) - goto host_probe_fail; - ret = platform_driver_register(drv); if (ret) goto platform_driver_register_fail; @@ -64,8 +60,6 @@ static int __init hisi_ubus_driver_register(struct platform_driver *drv) return 0; platform_driver_register_fail: - ub_host_remove(); -host_probe_fail: unregister_ub_manage_subsystem_ops(&hisi_ub_manage_subsystem_ops); return ret; } @@ -73,7 +67,6 @@ static int __init hisi_ubus_driver_register(struct platform_driver *drv) static void __exit hisi_ubus_driver_unregister(struct platform_driver *drv) { platform_driver_unregister(drv); - ub_host_remove(); unregister_ub_manage_subsystem_ops(&hisi_ub_manage_subsystem_ops); } diff --git a/include/ub/ubus/ubus.h b/include/ub/ubus/ubus.h index a81d652a18ff..ca3ba63c226a 100644 --- a/include/ub/ubus/ubus.h +++ b/include/ub/ubus/ubus.h @@ -193,7 +193,7 @@ struct ub_entity { u32 token_id; u32 token_value; - /* mue & ue info */ + /* MUE & UE info */ u8 is_mue; u16 total_ues; u16 num_ues; @@ -205,7 +205,7 @@ struct ub_entity { /* entity topology info */ struct list_head node; struct ub_bus_controller *ubc; - struct ub_entity *pue; /* ue/mue connected to their mue */ + struct ub_entity *pue; /* UE/MUE connected to their MUE */ int topo_rank; /* The levels of Breadth-First Search */ /* entity port info */ @@ -334,10 +334,10 @@ struct ub_dynids { * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). * Intended to stop any idling operations. * @virt_configure: Optional driver callback to allow configuration of - * ues. This function is called to enable or disable ues. + * UEs. This function is called to enable or disable UEs. * @virt_notify: Optional driver callback to notify the driver about - * changes in ue status. This function is called - * when the status of a ue changes. + * changes in UE status. This function is called + * when the status of a UE changes. * @activate: Activate a specific entity. This function is called to * activate an entity by its index. * @deactivate: Deactivate a specific entity. This function is called to @@ -510,14 +510,14 @@ void ub_bus_type_iommu_ops_set(const struct iommu_ops *ops); const struct iommu_ops *ub_bus_type_iommu_ops_get(void); /** - * ub_get_ent_by_eid() - Searching for UB Devices by EID. + * ub_get_ent_by_eid() - Searching for UB entity by EID. * @eid: entity EID. * * Traverse the UB bus device linked list and search for the device with * the target EID. You need to call ub_entity_put() after using it. * * Context: Any context. - * Return: The device found, or NULL if not found. + * Return: The entity found, or NULL if not found. */ struct ub_entity *ub_get_ent_by_eid(unsigned int eid); @@ -566,8 +566,7 @@ struct ub_entity *ub_get_entity(unsigned int vendor, unsigned int entity, * @uent: UB entity. * @enable: Enable or disable. * - * Enables or disables the entity access bus and the path through which - * the bus accesses the entity. + * Enable or disable the communication channel between entity and user host. * * Context: Any context. */ @@ -588,31 +587,31 @@ int ub_set_user_info(struct ub_entity *uent); * ub_unset_user_info() - Deinitialize host information for the entity. * @uent: UB entity. * - * Clearing the Host Information of a entity. + * Clearing the host information of an entity. * * Context: Any context. */ void ub_unset_user_info(struct ub_entity *uent); /** - * ub_enable_entities() - Enable ues of mue in batches. - * @pue: UB mue. + * ub_enable_entities() - Enable UEs of MUE in batches. + * @pue: UB MUE. * @nums: Number of enabled entities. * - * Create ues in batches, initialize them, and add them to the system. + * Create and initialize UEs in batches and add to the system. * * Context: Any context. - * Return: 0 if success, or %-EINVAL if @pue type is not mue or nums over - * mue's total ue nums, or %-ENOMEM if the system is out of memory, + * Return: 0 if success, or %-EINVAL if @pue type is not MUE or nums over + * MUE's total UE nums, or %-ENOMEM if the system is out of memory, * or other failed negative values. */ int ub_enable_entities(struct ub_entity *pue, int nums); /** - * ub_disable_entities() - Disable ues of mue in batches. - * @pue: UB mue. + * ub_disable_entities() - Disable UEs of MUE in batches. + * @pue: UB MUE. * - * Remove all enabled ues under the mue from the system. + * Remove all enabled UEs under the MUE from the system. * * Context: Any context. */ @@ -620,29 +619,29 @@ void ub_disable_entities(struct ub_entity *pue); /** * ub_enable_ue() - Enable a single ue. - * @pue: UB mue. + * @pue: UB MUE. * @entity_idx: Number of the entity to be enabled. * - * Create a specified ue under mue, initialize the ue, + * Create a specified UE under MUE, initialize the ue, * and add it to the system. * * Context: Any context. - * Return: 0 if success, or %-EINVAL if @pue type is not mue or @entity_idx - * is no longer in the ue range of mue, or %-EEXIST if entity has been + * Return: 0 if success, or %-EINVAL if @pue type is not MUE or @entity_idx + * is no longer in the UE range of MUE, or %-EEXIST if entity has been * enabled, or other failed negative values. */ int ub_enable_ue(struct ub_entity *pue, int entity_idx); /** * ub_disable_ue() - Disable a single ue. - * @pue: UB mue. + * @pue: UB MUE. * @entity_idx: Number of the entity to be disabled. * - * Remove a specified ue. + * Remove a specified UE. * * Context: Any context. - * Return: 0 if success, or %-EINVAL if @pue type is not mue or @entity_idx - * is no longer in the ue range of mue, or %-ENODEV if entity hasn't + * Return: 0 if success, or %-EINVAL if @pue type is not MUE or @entity_idx + * is no longer in the UE range of MUE, or %-ENODEV if entity hasn't * been enabled. */ int ub_disable_ue(struct ub_entity *pue, int entity_idx); @@ -662,7 +661,7 @@ bool ub_get_entity_flex_en(void); * @uent: UB entity. * * Return the EID of bus instance if the entity has already been bound, - * or controller's EID. + * otherwise return controller's EID. * * Context: Any context. * Return: positive number if success, or %-EINVAL if @dev is %NULL, @@ -743,7 +742,7 @@ void ub_unregister_share_port(struct ub_entity *uent, u16 port_id, * ub_reset_entity() - Function entity level reset. * @ent: UB entity. * - * Reset a single entity without affecting other entities, If you want to reuse + * Reset a single entity without affecting other entities. If you want to reuse * the entity after reset, you need to re-initialize it. * * Context: Any context @@ -757,7 +756,7 @@ int ub_reset_entity(struct ub_entity *ent); * ub_device_reset() - Device level reset. * @ent: UB entity. * - * Reset Device, include all entities under the device, If you want to reuse + * Reset device, including all entities under the device. If you want to reuse * the device after reset, you need to re-initialize it. * * Context: Any context @@ -771,10 +770,10 @@ int ub_device_reset(struct ub_entity *ent); * @uent: UB entity. * @vdm_pld: Vendor private message payload context. * - * Send a vendor private message to the entity. Response will put in - * vdm_pld->rsp_pld, and will fill in vdm->rsp_pld_len. + * Send a vendor private message to the entity. Response will be put in + * vdm_pld->rsp_pld, and response length is stored in vdm->rsp_pld_len. * - * Context: Any context, It will take spin_lock_irqsave()/spin_unlock_restore() + * Context: Any context, it will take spin_lock_irqsave()/spin_unlock_restore() * Return: 0 if success, or %-EINVAL if parameters invalid, * or %-ENOMEM if system out of memory, or other failed negative values. */ @@ -796,10 +795,10 @@ unsigned int ub_irq_calc_affinity_vectors(unsigned int minvec, void ub_disable_intr(struct ub_entity *uent); /** - * ub_intr_vec_count() - Interrupt Vectors Supported by a entity. + * ub_intr_vec_count() - Interrupt Vectors Supported by an entity. * @uent: UB entity. * - * Querying the Number of Interrupt Vectors Supported by a entity. + * Querying the Number of Interrupt Vectors Supported by an entity. * For interrupt type 2. * * Context: Any context. @@ -808,10 +807,10 @@ void ub_disable_intr(struct ub_entity *uent); u32 ub_intr_vec_count(struct ub_entity *uent); /** - * ub_int_type1_vec_count() - Interrupt Vectors Supported by a entity. + * ub_int_type1_vec_count() - Interrupt Vectors Supported by an entity. * @uent: UB entity. * - * Querying the Number of Interrupt Vectors Supported by a entity. + * Querying the Number of Interrupt Vectors Supported by an entity. * For interrupt type 1. * * Context: Any context. @@ -864,7 +863,7 @@ static inline int ub_alloc_irq_vectors(struct ub_entity *uent, int ub_irq_vector(struct ub_entity *uent, unsigned int nr); /** - * ub_irq_get_affinity() - Get a entity interrupt vector affinity + * ub_irq_get_affinity() - Get an entity interrupt vector affinity * @uent: the UB entity to operate on * @nr: entity-relative interrupt vector index (0-based); has different * meanings, depending on interrupt mode: @@ -884,7 +883,7 @@ const struct cpumask *ub_irq_get_affinity(struct ub_entity *uent, int nr); * @uent: UB entity. * @entity_idx: Number of the entity to be activated. * - * Context: Any context, It will take device_trylock()/device_unlock() + * Context: Any context, it will take device_trylock()/device_unlock() * Return: 0 if success, or %-EINVAL if the device doesn't match the driver, * or %-EBUSY if can't get device_trylock(), or other failed negative values. */ @@ -895,7 +894,7 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx); * @uent: UB entity. * @entity_idx: Number of the entity to be deactivated. * - * Context: Any context, It will take device_trylock()/device_unlock() + * Context: Any context, it will take device_trylock()/device_unlock() * Return: 0 if success, or %-EINVAL if the entity doesn't match the driver, * or %-EBUSY if can't get device_trylock(), or other failed negative values. */ @@ -910,7 +909,7 @@ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx); * Initiate configuration access to the specified address of the entity * configuration space and read 1 byte. * - * Context: Any context, It will take spin_lock_irqsave()/spin_unlock_restore() + * Context: Any context, it will take spin_lock_irqsave()/spin_unlock_restore() * Return: 0 if success, or negative value if failed. */ int ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val); @@ -925,7 +924,7 @@ int ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val); * Initiate configuration access to the specified address of the entity * configuration space and write 1 byte. * - * Context: Any context, It will take spin_lock_irqsave()/spin_unlock_restore() + * Context: Any context, it will take spin_lock_irqsave()/spin_unlock_restore() * Return: 0 if success, or negative value if failed. */ int ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val); @@ -937,7 +936,7 @@ int ub_cfg_write_dword(struct ub_entity *uent, u64 pos, u32 val); * @uent: UB entity pointer. * * Context: Any context. - * Return: uent, or NULL if @uent is NULL. + * Return: @uent itself, or NULL if @uent is NULL. */ struct ub_entity *ub_entity_get(struct ub_entity *uent); @@ -955,9 +954,9 @@ void ub_entity_put(struct ub_entity *uent); * @max_num: Buffer size. * @real_num: Real entities num. * - * All ub bus controllers in the system are returned. Increase the reference - * counting of all entities by 1. Remember to call ub_put_bus_controller() after - * using it. + * All ub bus controllers in the system are collected in @uents. Increase the + * reference counting of all entities by 1. Remember to call + * ub_put_bus_controller() after using it. * * Context: Any context. * Return: 0 if success, or %-EINVAL if input parameter is NULL, @@ -1020,8 +1019,8 @@ void ub_unregister_driver(struct ub_driver *drv); * ub_stop_ent() - Stop the entity. * @uent: UB entity. * - * Call device_release_driver(), user can't use it again, if it's a mue, - * will stop all ues under it, if it's entity0, will stop all entity under it. + * Call device_release_driver(), user can't use it again. If it's a MUE, + * will stop all UEs under it. If it's entity0, will stop all entities under it. * * Context: Any context. */ @@ -1031,8 +1030,9 @@ void ub_stop_ent(struct ub_entity *uent); * ub_stop_and_remove_ent() - Stop and remove the entity from system. * @uent: UB entity. * - * Call device_release_driver() and device_unregister(), if it's a mue, - * will remove all ues under it, if it's entity0, will remove all entity under it. + * Call device_release_driver() and device_unregister(). If it's a MUE, + * will remove all UEs under it. If it's entity0, will remove all entities + * under it. * * Context: Any context. */ -- Gitee From ebd7fc0aca8adc064b30c823da4e33420cdc7e75 Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Wed, 24 Sep 2025 15:26:09 +0800 Subject: [PATCH 052/243] net: unic: Support driver dump register. commit 8ca3dec3d53e6bd47e70f9f599ace2a704aaf85e openEuler Currently, the commit operation contains only the framework of dump registers and does not contain the specific register address of the bar space. This patch complete functions: 1.Add tlv format data to registers data. 2.Register information can be obtained from the firmware and bar space. Signed-off-by: Jianqiang Li Signed-off-by: Xiongchuan Zhou Signed-off-by: Haibin Lu Signed-off-by: Junxin Chen Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_dev.h | 17 ++ drivers/net/ub/unic/unic_ethtool.c | 2 + drivers/net/ub/unic/unic_stats.c | 286 +++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_stats.h | 70 +++++++ include/ub/ubase/ubase_comm_cmd.h | 6 + 5 files changed, 381 insertions(+) diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index f0df795ad011..8654aa9a819e 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -262,6 +262,11 @@ static inline bool unic_dev_ubl_supported(struct unic_dev *unic_dev) return ubase_adev_ubl_supported(unic_dev->comdev.adev); } +static inline bool unic_dev_eth_mac_supported(struct unic_dev *unic_dev) +{ + return ubase_adev_eth_mac_supported(unic_dev->comdev.adev); +} + static inline bool unic_dev_ets_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_ETS_B); @@ -327,6 +332,18 @@ static inline bool unic_is_initing_or_resetting(struct unic_dev *unic_dev) return __unic_resetting(unic_dev) || __unic_initing(unic_dev); } +static inline u32 unic_read_reg(struct unic_dev *unic_dev, u32 reg) +{ + struct ubase_resource_space *io_base = ubase_get_io_base(unic_dev->comdev.adev); + u8 __iomem *reg_addr; + + if (!io_base) + return 0; + + reg_addr = READ_ONCE(io_base->addr); + return readl(reg_addr + reg); +} + static inline u32 unic_get_sq_cqe_mask(struct unic_dev *unic_dev) { return unic_dev->channels.sq_cqe_depth - 1; diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 9e50f0332d4e..474626833d89 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -122,6 +122,8 @@ static const struct ethtool_ops unic_ethtool_ops = { .get_link = unic_get_link_status, .get_link_ksettings = unic_get_link_ksettings, .get_drvinfo = unic_get_driver_info, + .get_regs_len = unic_get_regs_len, + .get_regs = unic_get_regs, .get_channels = unic_get_channels, .set_channels = unic_set_channels, .get_ringparam = unic_get_channels_param, diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index 8167bb6a6fca..dcefeab1bb2d 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -14,6 +14,292 @@ #include "unic_netdev.h" #include "unic_stats.h" +static u32 cmdq_regs_addr[] = { + UNIC_TX_CMDQ_DEPTH, + UNIC_TX_CMDQ_TAIL, + UNIC_TX_CMDQ_HEAD, + UNIC_RX_CMDQ_DEPTH, + UNIC_RX_CMDQ_TAIL, + UNIC_RX_CMDQ_HEAD, + UNIC_CMDQ_INT_GEN, + UNIC_CMDQ_INT_SCR, + UNIC_CMDQ_INT_MASK, + UNIC_CMDQ_INT_STS, +}; + +static u32 ctrlq_regs_addr[] = { + UNIC_TX_CTRLQ_DEPTH, + UNIC_TX_CTRLQ_TAIL, + UNIC_TX_CTRLQ_HEAD, + UNIC_RX_CTRLQ_DEPTH, + UNIC_RX_CTRLQ_TAIL, + UNIC_RX_CTRLQ_HEAD, + UNIC_CTRLQ_INT_GEN, + UNIC_CTRLQ_INT_SCR, + UNIC_CTRLQ_INT_MASK, + UNIC_CTRLQ_INT_STS, +}; + +static const struct unic_res_regs_group unic_res_reg_arr[] = { + { + UNIC_TAG_CMDQ, cmdq_regs_addr, ARRAY_SIZE(cmdq_regs_addr), + NULL + }, { + UNIC_TAG_CTRLQ, ctrlq_regs_addr, ARRAY_SIZE(ctrlq_regs_addr), + ubase_adev_ctrlq_supported + }, +}; + +static bool unic_dfx_reg_support(struct unic_dev *unic_dev, u32 property) +{ + if (((property & UBASE_SUP_UBL) && unic_dev_ubl_supported(unic_dev)) || + ((property & UBASE_SUP_ETH) && unic_dev_eth_mac_supported(unic_dev))) + return true; + + return false; +} + +static struct unic_dfx_regs_group unic_dfx_reg_arr[] = { + { + UNIC_REG_NUM_IDX_TA, UNIC_TAG_TA, UBASE_OPC_DFX_TA_REG, + UBASE_SUP_UBL_ETH, unic_dfx_reg_support + }, { + UNIC_REG_NUM_IDX_TP, UNIC_TAG_TP, UBASE_OPC_DFX_TP_REG, + UBASE_SUP_UBL_ETH, unic_dfx_reg_support + }, { + UNIC_REG_NUM_IDX_BA, UNIC_TAG_BA, UBASE_OPC_DFX_BA_REG, + UBASE_SUP_UBL_ETH, unic_dfx_reg_support + }, { + UNIC_REG_NUM_IDX_NL, UNIC_TAG_NL, UBASE_OPC_DFX_NL_REG, + UBASE_SUP_UBL_ETH, unic_dfx_reg_support + }, { + UNIC_REG_NUM_IDX_DL, UNIC_TAG_DL, UBASE_OPC_DFX_DL_REG, + UBASE_SUP_UBL, unic_dfx_reg_support + }, +}; + +static int unic_get_dfx_reg_num(struct unic_dev *unic_dev, u32 *reg_num, + u32 reg_arr_size) +{ + struct ubase_cmd_buf in, out; + int ret; + + ubase_fill_inout_buf(&in, UBASE_OPC_DFX_REG_NUM, true, 0, NULL); + ubase_fill_inout_buf(&out, UBASE_OPC_DFX_REG_NUM, true, + reg_arr_size * sizeof(u32), reg_num); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret && ret != -EPERM) + unic_err(unic_dev, + "failed to query dfx reg num, ret = %d.\n", ret); + + return ret; +} + +static int unic_get_res_regs_len(struct unic_dev *unic_dev, + const struct unic_res_regs_group *reg_arr, + u32 reg_arr_size) +{ + u32 i, count = 0; + + for (i = 0; i < reg_arr_size; i++) { + if (reg_arr[i].is_supported && + !reg_arr[i].is_supported(unic_dev->comdev.adev)) + continue; + + count += reg_arr[i].regs_count * sizeof(u32) + + sizeof(struct unic_tlv_hdr); + } + + return count; +} + +static int unic_get_dfx_regs_len(struct unic_dev *unic_dev, + struct unic_dfx_regs_group *reg_arr, + u32 reg_arr_size, u32 *reg_num) +{ + u32 i, count = 0; + + for (i = 0; i < reg_arr_size; i++) { + if (!reg_arr[i].is_supported(unic_dev, reg_arr[i].property)) + continue; + + count += sizeof(struct unic_tlv_hdr) + sizeof(u32) * + reg_num[reg_arr[i].regs_idx]; + } + + return count; +} + +int unic_get_regs_len(struct net_device *netdev) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + u32 reg_arr_size; + int count = 0; + u32 *reg_num; + int ret; + + if (unic_resetting(netdev)) + return -EBUSY; + + count += unic_get_res_regs_len(unic_dev, unic_res_reg_arr, + ARRAY_SIZE(unic_res_reg_arr)); + reg_arr_size = ARRAY_SIZE(unic_dfx_reg_arr); + reg_num = kcalloc(reg_arr_size, sizeof(u32), GFP_KERNEL); + if (!reg_num) + return -ENOMEM; + + ret = unic_get_dfx_reg_num(unic_dev, reg_num, reg_arr_size); + if (!ret) { + count += unic_get_dfx_regs_len(unic_dev, unic_dfx_reg_arr, + reg_arr_size, reg_num); + } else if (ret != -EPERM) { + unic_err(unic_dev, + "failed to get dfx regs length, ret = %d.\n", ret); + kfree(reg_num); + + return -EBUSY; + } + + kfree(reg_num); + + return count; +} + +static u16 unic_fetch_res_regs(struct unic_dev *unic_dev, u8 *data, u16 tag, + u32 *regs_addr_arr, u32 reg_num) +{ + struct unic_tlv_hdr *tlv = (struct unic_tlv_hdr *)data; + u32 *reg = (u32 *)(data + sizeof(struct unic_tlv_hdr)); + u32 i; + + tlv->tag = tag; + tlv->len = sizeof(struct unic_tlv_hdr) + reg_num * sizeof(u32); + + for (i = 0; i < reg_num; i++) + *reg++ = unic_read_reg(unic_dev, regs_addr_arr[i]); + + return tlv->len; +} + +static u32 unic_get_res_regs(struct unic_dev *unic_dev, u8 *data) +{ + u32 i, data_len = 0; + + for (i = 0; i < ARRAY_SIZE(unic_res_reg_arr); i++) { + if (unic_res_reg_arr[i].is_supported && + !unic_res_reg_arr[i].is_supported(unic_dev->comdev.adev)) + continue; + + data_len += unic_fetch_res_regs(unic_dev, data + data_len, + unic_res_reg_arr[i].tag, + unic_res_reg_arr[i].regs_addr, + unic_res_reg_arr[i].regs_count); + } + + return data_len; +} + +static int unic_query_regs_data(struct unic_dev *unic_dev, u8 *data, + u32 reg_num, u16 opcode) +{ + u32 *reg = (u32 *)(data + sizeof(struct unic_tlv_hdr)); + struct ubase_cmd_buf in, out; + u32 *out_regs; + int ret; + u32 i; + + out_regs = kcalloc(reg_num, sizeof(u32), GFP_KERNEL); + if (!out_regs) + return -ENOMEM; + + ubase_fill_inout_buf(&in, opcode, true, 0, NULL); + ubase_fill_inout_buf(&out, opcode, true, reg_num * sizeof(u32), + out_regs); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + unic_err(unic_dev, + "failed to send getting reg cmd(0x%x), ret = %d.\n", + opcode, ret); + goto err_send_cmd; + } + + for (i = 0; i < reg_num; i++) + *reg++ = le32_to_cpu(*(out_regs + i)); + +err_send_cmd: + kfree(out_regs); + + return ret; +} + +static int unic_get_dfx_regs(struct unic_dev *unic_dev, u8 *data, + struct unic_dfx_regs_group *reg_arr, + u32 reg_arr_size, u32 *reg_num) +{ + struct unic_tlv_hdr *tlv; + u16 idx; + int ret; + u32 i; + + for (i = 0; i < reg_arr_size; i++) { + if (!reg_arr[i].is_supported(unic_dev, reg_arr[i].property)) + continue; + + idx = reg_arr[i].regs_idx; + ret = unic_query_regs_data(unic_dev, data, reg_num[idx], + reg_arr[i].opcode); + if (ret) { + unic_err(unic_dev, + "failed to query dfx regs, ret = %d.\n", ret); + return ret; + } + + tlv = (struct unic_tlv_hdr *)data; + tlv->tag = reg_arr[i].tag; + tlv->len = sizeof(*tlv) + reg_num[idx] * sizeof(u32); + data += tlv->len; + } + + return 0; +} + +void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, + void *data) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + u8 *pdata = (u8 *)data; + u32 reg_arr_size; + u32 *reg_num; + int ret; + + if (unic_resetting(netdev)) { + unic_err(unic_dev, "dev resetting, could not get regs.\n"); + return; + } + + reg_arr_size = ARRAY_SIZE(unic_dfx_reg_arr); + reg_num = kcalloc(reg_arr_size, sizeof(u32), GFP_KERNEL); + if (!reg_num) { + unic_err(unic_dev, "failed to alloc reg num array.\n"); + return; + } + + pdata += unic_get_res_regs(unic_dev, pdata); + ret = unic_get_dfx_reg_num(unic_dev, reg_num, reg_arr_size); + if (!ret) { + ret = unic_get_dfx_regs(unic_dev, pdata, unic_dfx_reg_arr, + reg_arr_size, reg_num); + if (ret) + unic_err(unic_dev, + "failed to get dfx regs, ret = %d.\n", ret); + } else if (ret != -EPERM) { + unic_err(unic_dev, + "failed to get dfx reg num, ret = %d.\n", ret); + } + + kfree(reg_num); +} + static void unic_get_fec_stats_total(struct unic_dev *unic_dev, u8 stats_flags, struct ethtool_fec_stats *fec_stats) { diff --git a/drivers/net/ub/unic/unic_stats.h b/drivers/net/ub/unic/unic_stats.h index d9d9f0b0ee3b..2a2a8746d838 100644 --- a/drivers/net/ub/unic/unic_stats.h +++ b/drivers/net/ub/unic/unic_stats.h @@ -16,6 +16,76 @@ #define UNIC_FEC_UNCORR_BLOCKS BIT(1) #define UNIC_FEC_CORR_BITS BIT(2) +#define UNIC_TX_CMDQ_DEPTH UBASE_CSQ_DEPTH_REG +#define UNIC_TX_CMDQ_TAIL UBASE_CSQ_TAIL_REG +#define UNIC_TX_CMDQ_HEAD UBASE_CSQ_HEAD_REG +#define UNIC_RX_CMDQ_DEPTH UBASE_CRQ_DEPTH_REG +#define UNIC_RX_CMDQ_TAIL UBASE_CRQ_TAIL_REG +#define UNIC_RX_CMDQ_HEAD UBASE_CRQ_HEAD_REG +#define UNIC_CMDQ_INT_GEN 0x18000 +#define UNIC_CMDQ_INT_SCR 0x18004 +#define UNIC_CMDQ_INT_MASK 0x18008 +#define UNIC_CMDQ_INT_STS 0x1800c + +#define UNIC_TX_CTRLQ_DEPTH UBASE_CTRLQ_CSQ_DEPTH_REG +#define UNIC_TX_CTRLQ_TAIL UBASE_CTRLQ_CSQ_TAIL_REG +#define UNIC_TX_CTRLQ_HEAD UBASE_CTRLQ_CSQ_HEAD_REG +#define UNIC_RX_CTRLQ_DEPTH UBASE_CTRLQ_CRQ_DEPTH_REG +#define UNIC_RX_CTRLQ_TAIL UBASE_CTRLQ_CRQ_TAIL_REG +#define UNIC_RX_CTRLQ_HEAD UBASE_CTRLQ_CRQ_HEAD_REG +#define UNIC_CTRLQ_INT_GEN 0x18010 +#define UNIC_CTRLQ_INT_SCR 0x18014 +#define UNIC_CTRLQ_INT_MASK 0x18018 +#define UNIC_CTRLQ_INT_STS 0x1801c + +enum unic_reg_num_idx { + UNIC_REG_NUM_IDX_DL = 0, + UNIC_REG_NUM_IDX_NL, + UNIC_REG_NUM_IDX_BA, + UNIC_REG_NUM_IDX_TP, + UNIC_REG_NUM_IDX_TA, + UNIC_REG_NUM_IDX_MAX, +}; + +enum unic_reg_tag { + UNIC_TAG_CMDQ = 0, + UNIC_TAG_CTRLQ, + UNIC_TAG_DL, + UNIC_TAG_NL, + UNIC_TAG_BA, + UNIC_TAG_TP, + UNIC_TAG_TA, + UNIC_TAG_MAX, +}; + +struct unic_res_regs_group { + u16 tag; + u32 *regs_addr; + u32 regs_count; + bool (*is_supported)(struct auxiliary_device *adev); +}; + +struct unic_dump_reg_hdr { + u8 flag; + u8 rsv[3]; +}; + +struct unic_tlv_hdr { + u16 tag; + u16 len; +}; + +struct unic_dfx_regs_group { + u16 regs_idx; + u16 tag; + u16 opcode; + u32 property; + bool (*is_supported)(struct unic_dev *unic_dev, u32 property); +}; + +int unic_get_regs_len(struct net_device *netdev); +void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, + void *data); void unic_get_fec_stats(struct net_device *ndev, struct ethtool_fec_stats *fec_stats); diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 311a309d10bd..4eb3c435a8f9 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -36,6 +36,12 @@ enum ubase_opcode_type { UBASE_OPC_CFG_MTU = 0x0033, UBASE_OPC_QUERY_NET_GUID = 0x0035, UBASE_OPC_STATS_MAC_ALL = 0x0038, + UBASE_OPC_DFX_REG_NUM = 0x0039, + UBASE_OPC_DFX_DL_REG = 0x0040, + UBASE_OPC_DFX_NL_REG = 0x0042, + UBASE_OPC_DFX_BA_REG = 0x0043, + UBASE_OPC_DFX_TP_REG = 0x0044, + UBASE_OPC_DFX_TA_REG = 0x0045, UBASE_OPC_QUERY_BUS_EID = 0x0047, UBASE_OPC_QUERY_UBCL_CONFIG = 0x0050, -- Gitee From dedaafc9147117b18adda39b32a8c1ff1f75cb2f Mon Sep 17 00:00:00 2001 From: Junxin Chen Date: Sat, 30 Aug 2025 15:20:46 +0800 Subject: [PATCH 053/243] net: unic: Add debugfs for JFS/JFR/JFC context. commit 615725081a2520ed66c0265c65964fd718f563dd openEuler This patch adds debugfs support for JFS, JFR, and JFC contexts in the UNIC driver. The new debugfs interface allows users to dump and inspect the software and hardware configurations of these contexts, including their states, registers, and related parameters. The implementation includes: 1. New debugfs files (unic_ctx_debugfs.c and unic_ctx_debugfs.h) to handle context dumping. 2. Extended debugfs commands to support JFS, JFR, and JFC contexts in both software and hardware modes. 3. Integration with the existing debugfs framework to expose these new debug features. This enhancement improves the debugging and troubleshooting capabilities for the UNIC driver by providing detailed visibility into the internal states of JFS, JFR, and JFC contexts. Signed-off-by: Peng Li Signed-off-by: Xiongchuan Zhou Signed-off-by: Haibin Lu Signed-off-by: Junxin Chen Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/Makefile | 1 + .../net/ub/unic/debugfs/unic_ctx_debugfs.c | 365 ++++++++++++++++++ .../net/ub/unic/debugfs/unic_ctx_debugfs.h | 19 + drivers/net/ub/unic/debugfs/unic_debugfs.c | 62 +++ drivers/net/ub/unic/debugfs/unic_debugfs.h | 1 + 5 files changed, 448 insertions(+) create mode 100644 drivers/net/ub/unic/debugfs/unic_ctx_debugfs.c create mode 100644 drivers/net/ub/unic/debugfs/unic_ctx_debugfs.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 1419ee569595..67ecd0ad8c11 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,4 +9,5 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o +unic-objs += debugfs/unic_ctx_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/debugfs/unic_ctx_debugfs.c b/drivers/net/ub/unic/debugfs/unic_ctx_debugfs.c new file mode 100644 index 000000000000..fc0d19fee038 --- /dev/null +++ b/drivers/net/ub/unic/debugfs/unic_ctx_debugfs.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include +#include +#include + +#include "unic_ctx_debugfs.h" +#include "unic_debugfs.h" +#include "unic_dev.h" + +static inline void unic_jfs_ctx_titles_print(struct seq_file *s) +{ + seq_puts(s, "SQ_ID SQE_BB_SHIFT STATE JFS_MODE TX_JFCN\n"); +} + +static void unic_dump_jfs_ctx_info_sw(struct unic_sq *sq, struct seq_file *s, + u32 index) +{ + struct unic_jfs_ctx *ctx = &sq->jfs_ctx; + + seq_printf(s, "%-7u", index); + seq_printf(s, "%-14u", ctx->sqe_bb_shift); + seq_printf(s, "%-7u", ctx->state); + seq_printf(s, "%-10u", ctx->jfs_mode); + seq_printf(s, "%-9u\n", ctx->tx_jfcn); +} + +static inline void unic_jfr_ctx_titles_print(struct seq_file *s) +{ + seq_puts(s, "RQ_ID STATE RQE_SHIFT RX_JFCN PI CI"); + seq_puts(s, "RECORD_DB_EN\n"); +} + +static void unic_dump_jfr_ctx_info_sw(struct unic_rq *rq, struct seq_file *s, + u32 index) +{ + struct unic_jfr_ctx *ctx = &rq->jfr_ctx; + u32 jfcn; + + jfcn = ctx->jfcn_l | (ctx->jfcn_h << UNIC_JFR_JFCN_H_OFFSET); + + seq_printf(s, "%-7u", index); + seq_printf(s, "%-7u", ctx->state); + seq_printf(s, "%-11u", ctx->rqe_shift); + seq_printf(s, "%-9u", jfcn); + seq_printf(s, "%-7u", ctx->pi); + seq_printf(s, "%-7u", ctx->ci); + seq_printf(s, "%-14u\n", ctx->record_db_en); +} + +static inline void unic_jfc_ctx_titles_print(struct seq_file *s) +{ + seq_puts(s, "CQ_ID ARM_ST STATE INLINE_EN SHIFT CQE_COAL_CNT"); + seq_puts(s, "CEQN RECORD_DB_EN CQE_COAL_PEIRIOD\n"); +} + +static void unic_dump_jfc_ctx_info_sw(struct unic_cq *cq, struct seq_file *s, + u32 index) +{ + struct unic_jfc_ctx *ctx = &cq->jfc_ctx; + + seq_printf(s, "%-7u", index); + seq_printf(s, "%-8u", ctx->arm_st); + seq_printf(s, "%-7u", ctx->state); + seq_printf(s, "%-11u", ctx->inline_en); + seq_printf(s, "%-7u", ctx->shift); + seq_printf(s, "%-14u", ctx->cqe_coalesce_cnt); + seq_printf(s, "%-6u", ctx->ceqn); + seq_printf(s, "%-14u", ctx->record_db_en); + seq_printf(s, "%-18u\n", ctx->cqe_coalesce_period); +} + +static void unic_get_jfs_ctx_sw(struct unic_channels *channels, + struct seq_file *s, u32 index) +{ + struct unic_channel *channel = &channels->c[index]; + + unic_dump_jfs_ctx_info_sw(channel->sq, s, index); +} + +static void unic_get_jfr_ctx_sw(struct unic_channels *channels, + struct seq_file *s, u32 index) +{ + struct unic_channel *channel = &channels->c[index]; + + unic_dump_jfr_ctx_info_sw(channel->rq, s, index); +} + +static void unic_get_sq_jfc_ctx_sw(struct unic_channels *channels, + struct seq_file *s, u32 index) +{ + struct unic_channel *channel = &channels->c[index]; + + unic_dump_jfc_ctx_info_sw(channel->sq->cq, s, index); +} + +static void unic_get_rq_jfc_ctx_sw(struct unic_channels *channels, + struct seq_file *s, u32 index) +{ + struct unic_channel *channel = &channels->c[index]; + + unic_dump_jfc_ctx_info_sw(channel->rq->cq, s, index); +} + +enum unic_dbg_ctx_type { + UNIC_DBG_JFS_CTX = 0, + UNIC_DBG_JFR_CTX, + UNIC_DBG_SQ_JFC_CTX, + UNIC_DBG_RQ_JFC_CTX, +}; + +static int unic_dbg_dump_ctx_sw(struct seq_file *s, void *data, + enum unic_dbg_ctx_type ctx_type) +{ + struct unic_dbg_context { + void (*print_ctx_titles)(struct seq_file *s); + void (*get_ctx)(struct unic_channels *channels, struct seq_file *s, u32 index); + } dbg_ctx[] = { + {unic_jfs_ctx_titles_print, unic_get_jfs_ctx_sw}, + {unic_jfr_ctx_titles_print, unic_get_jfr_ctx_sw}, + {unic_jfc_ctx_titles_print, unic_get_sq_jfc_ctx_sw}, + {unic_jfc_ctx_titles_print, unic_get_rq_jfc_ctx_sw}, + }; + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + int ret = 0; + u32 i; + + dbg_ctx[ctx_type].print_ctx_titles(s); + + if (!mutex_trylock(&unic_dev->channels.mutex)) + return -EBUSY; + + if (__unic_resetting(unic_dev) || !unic_dev->channels.c) { + ret = -EBUSY; + goto out; + } + + for (i = 0; i < unic_dev->channels.num; i++) + dbg_ctx[ctx_type].get_ctx(&unic_dev->channels, s, i); + +out: + mutex_unlock(&unic_dev->channels.mutex); + + return ret; +} + +int unic_dbg_dump_jfs_ctx_sw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_ctx_sw(s, data, UNIC_DBG_JFS_CTX); +} + +int unic_dbg_dump_jfr_ctx_sw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_ctx_sw(s, data, UNIC_DBG_JFR_CTX); +} + +int unic_dbg_dump_rq_jfc_ctx_sw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_ctx_sw(s, data, UNIC_DBG_RQ_JFC_CTX); +} + +int unic_dbg_dump_sq_jfc_ctx_sw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_ctx_sw(s, data, UNIC_DBG_SQ_JFC_CTX); +} + +struct unic_ctx_info { + u32 start_idx; + u32 ctx_size; + u8 op; + const char *ctx_name; +}; + +static int unic_get_ctx_info(struct unic_dev *unic_dev, + enum unic_dbg_ctx_type ctx_type, + struct unic_ctx_info *ctx_info) +{ + struct ubase_adev_caps *unic_caps = ubase_get_unic_caps(unic_dev->comdev.adev); + + if (!unic_caps) { + unic_err(unic_dev, "failed to get unic caps.\n"); + return -ENODATA; + } + + switch (ctx_type) { + case UNIC_DBG_JFS_CTX: + ctx_info->start_idx = unic_caps->jfs.start_idx; + ctx_info->ctx_size = UBASE_JFS_CTX_SIZE; + ctx_info->op = UBASE_MB_QUERY_JFS_CONTEXT; + ctx_info->ctx_name = "jfs"; + break; + case UNIC_DBG_JFR_CTX: + ctx_info->start_idx = unic_caps->jfr.start_idx; + ctx_info->ctx_size = UBASE_JFR_CTX_SIZE; + ctx_info->op = UBASE_MB_QUERY_JFR_CONTEXT; + ctx_info->ctx_name = "jfr"; + break; + case UNIC_DBG_SQ_JFC_CTX: + ctx_info->start_idx = unic_caps->jfc.start_idx; + ctx_info->ctx_size = UBASE_JFC_CTX_SIZE; + ctx_info->op = UBASE_MB_QUERY_JFC_CONTEXT; + ctx_info->ctx_name = "sq_jfc"; + break; + case UNIC_DBG_RQ_JFC_CTX: + ctx_info->start_idx = unic_caps->jfc.start_idx + + unic_dev->channels.num; + ctx_info->ctx_size = UBASE_JFC_CTX_SIZE; + ctx_info->op = UBASE_MB_QUERY_JFC_CONTEXT; + ctx_info->ctx_name = "rq_jfc"; + break; + default: + unic_err(unic_dev, "failed to get ctx info, ctx_type = %u.\n", + ctx_type); + return -ENODATA; + } + + return 0; +} + +static void unic_mask_jfs_ctx_key_words(void *buf) +{ + struct unic_jfs_ctx *jfs = (struct unic_jfs_ctx *)buf; + + jfs->sqe_token_id_l = 0; + jfs->sqe_token_id_h = 0; + jfs->sqe_base_addr_l = 0; + jfs->sqe_base_addr_h = 0; + jfs->sqe_pld_tokenid = 0; + jfs->rmt_tokenid = 0; + jfs->user_data_l = 0; + jfs->user_data_h = 0; +} + +static void unic_mask_jfr_ctx_key_words(void *buf) +{ + struct unic_jfr_ctx *jfr = (struct unic_jfr_ctx *)buf; + + jfr->rqe_token_id_l = 0; + jfr->rqe_token_id_h = 0; + jfr->rqe_base_addr_l = 0; + jfr->rqe_base_addr_h = 0; + jfr->pld_token_id = 0; + jfr->token_value = 0; + jfr->user_data_l = 0; + jfr->user_data_h = 0; + jfr->idx_que_addr_l = 0; + jfr->idx_que_addr_h = 0; + jfr->record_db_addr_l = 0; + jfr->record_db_addr_m = 0; + jfr->record_db_addr_h = 0; +} + +static void unic_mask_jfc_ctx_key_words(void *buf) +{ + struct unic_jfc_ctx *jfc = (struct unic_jfc_ctx *)buf; + + jfc->cqe_base_addr_l = 0; + jfc->cqe_base_addr_h = 0; + jfc->queue_token_id = 0; + jfc->record_db_addr_l = 0; + jfc->record_db_addr_h = 0; + jfc->rmt_token_id = 0; + jfc->remote_token_value = 0; +} + +static void unic_mask_ctx_key_words(void *buf, + enum unic_dbg_ctx_type ctx_type) +{ + switch (ctx_type) { + case UNIC_DBG_JFS_CTX: + unic_mask_jfs_ctx_key_words(buf); + break; + case UNIC_DBG_JFR_CTX: + unic_mask_jfr_ctx_key_words(buf); + break; + case UNIC_DBG_SQ_JFC_CTX: + case UNIC_DBG_RQ_JFC_CTX: + unic_mask_jfc_ctx_key_words(buf); + break; + default: + break; + } +} + +static int unic_dbg_dump_context_hw(struct seq_file *s, void *data, + enum unic_dbg_ctx_type ctx_type) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_ctx_info ctx_info = {0}; + struct ubase_cmd_mailbox *mailbox; + struct ubase_mbx_attr attr = {0}; + int ret = 0; + u32 i; + + if (!mutex_trylock(&unic_dev->channels.mutex)) + return -EBUSY; + + if (__unic_resetting(unic_dev) || + !unic_dev->channels.c) { + ret = -EBUSY; + goto channel_ready_err; + } + + mailbox = ubase_alloc_cmd_mailbox(adev); + if (IS_ERR_OR_NULL(mailbox)) { + unic_err(unic_dev, "failed to alloc mailbox for dump context.\n"); + ret = -ENOMEM; + goto channel_ready_err; + } + + ret = unic_get_ctx_info(unic_dev, ctx_type, &ctx_info); + if (ret) + goto upgrade_ctx_err; + + for (i = 0; i < unic_dev->channels.num; i++) { + ubase_fill_mbx_attr(&attr, i + ctx_info.start_idx, ctx_info.op, + 0); + ret = ubase_hw_upgrade_ctx_ex(adev, &attr, mailbox); + if (ret) { + unic_err(unic_dev, + "failed to post query %s ctx mbx, ret = %d.\n", + ctx_info.ctx_name, ret); + goto upgrade_ctx_err; + } + + seq_printf(s, "offset\t%s", ctx_info.ctx_name); + seq_printf(s, "%u\n", i); + unic_mask_ctx_key_words(mailbox->buf, ctx_type); + ubase_print_context_hw(s, mailbox->buf, ctx_info.ctx_size); + seq_puts(s, "\n"); + } + +upgrade_ctx_err: + ubase_free_cmd_mailbox(adev, mailbox); +channel_ready_err: + mutex_unlock(&unic_dev->channels.mutex); + + return ret; +} + +int unic_dbg_dump_jfs_context_hw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_context_hw(s, data, UNIC_DBG_JFS_CTX); +} + +int unic_dbg_dump_jfr_context_hw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_context_hw(s, data, UNIC_DBG_JFR_CTX); +} + +int unic_dbg_dump_sq_jfc_context_hw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_context_hw(s, data, UNIC_DBG_SQ_JFC_CTX); +} + +int unic_dbg_dump_rq_jfc_context_hw(struct seq_file *s, void *data) +{ + return unic_dbg_dump_context_hw(s, data, UNIC_DBG_RQ_JFC_CTX); +} diff --git a/drivers/net/ub/unic/debugfs/unic_ctx_debugfs.h b/drivers/net/ub/unic/debugfs/unic_ctx_debugfs.h new file mode 100644 index 000000000000..cb6b08c80d8e --- /dev/null +++ b/drivers/net/ub/unic/debugfs/unic_ctx_debugfs.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_CTX_DEBUGFS_H__ +#define __UNIC_CTX_DEBUGFS_H__ + +int unic_dbg_dump_jfs_ctx_sw(struct seq_file *s, void *data); +int unic_dbg_dump_jfr_ctx_sw(struct seq_file *s, void *data); +int unic_dbg_dump_sq_jfc_ctx_sw(struct seq_file *s, void *data); +int unic_dbg_dump_rq_jfc_ctx_sw(struct seq_file *s, void *data); +int unic_dbg_dump_jfs_context_hw(struct seq_file *s, void *data); +int unic_dbg_dump_jfr_context_hw(struct seq_file *s, void *data); +int unic_dbg_dump_sq_jfc_context_hw(struct seq_file *s, void *data); +int unic_dbg_dump_rq_jfc_context_hw(struct seq_file *s, void *data); + +#endif diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 112e64c25a34..29fc8917eaa2 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -11,6 +11,7 @@ #include #include "unic_dev.h" +#include "unic_ctx_debugfs.h" #include "unic_hw.h" #include "unic_debugfs.h" @@ -198,6 +199,11 @@ static bool unic_dbg_dentry_support(struct device *dev, u32 property) } static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { + { + .name = "context", + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + }, /* keep unic at the bottom and add new directory above */ { .name = "unic", @@ -208,6 +214,34 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { { + .name = "jfs_context", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_jfs_ctx_sw, + }, { + .name = "jfr_context", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_jfr_ctx_sw, + }, { + .name = "sq_jfc_context", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_sq_jfc_ctx_sw, + }, { + .name = "rq_jfc_context", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_rq_jfc_ctx_sw, + }, { .name = "dev_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, @@ -228,6 +262,34 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_page_pool_info, + }, { + .name = "jfs_context_hw", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_jfs_context_hw, + }, { + .name = "jfr_context_hw", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_jfr_context_hw, + }, { + .name = "sq_jfc_context_hw", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_sq_jfc_context_hw, + }, { + .name = "rq_jfc_context_hw", + .dentry_index = UNIC_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_rq_jfc_context_hw, }, { .name = "rss_cfg_hw", .dentry_index = UNIC_DBG_DENTRY_ROOT, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 6efd739d2c5b..1afba40382d6 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -13,6 +13,7 @@ #define unic_get_ubase_root_dentry(adev) ubase_diag_debugfs_root(adev) enum unic_dbg_dentry_type { + UNIC_DBG_DENTRY_CONTEXT, /* must be the last entry. */ UNIC_DBG_DENTRY_ROOT }; -- Gitee From 6c5be793f8518df620f5fc23267555fbfbe241ca Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Thu, 21 Aug 2025 21:50:58 +0800 Subject: [PATCH 054/243] net: unic: support querying and configuring coalesce parameters. commit e0ccc63cc72ec634fffe4107cd60e620c462ad0e openEuler Coalesce parameters determine the interrupt reporting frequency, which can be adjusted in different scenarios to achieve service objectives. To adapt to the ub protocol, This patch adds the capability of querying and configuring coalesce parameters for UNIC driver. Users can use the ethtool to query or configure coalesce parameters. Signed-off-by: Fengyan Mu Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/unic_dev.c | 24 ++++ drivers/net/ub/unic/unic_dev.h | 1 + drivers/net/ub/unic/unic_ethtool.c | 171 +++++++++++++++++++++++++++++ 3 files changed, 196 insertions(+) diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index a0ea8edee2ef..f039151844c2 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -293,6 +293,30 @@ static void unic_uninit_channels_attr(struct unic_dev *unic_dev) mutex_destroy(&channels->mutex); } +u16 unic_cqe_period_round_down(u16 cqe_period) +{ + u16 period[] = { + UNIC_CQE_PERIOD_0, + UNIC_CQE_PERIOD_4, + UNIC_CQE_PERIOD_16, + UNIC_CQE_PERIOD_64, + UNIC_CQE_PERIOD_256, + UNIC_CQE_PERIOD_1024, + UNIC_CQE_PERIOD_4096, + UNIC_CQE_PERIOD_16384, + UNIC_CQE_PERIOD_ERR + }; + u16 i; + + for (i = 0; i < ARRAY_SIZE(period) - 1; i++) { + if (cqe_period >= period[i] && + cqe_period < period[i + 1]) + return period[i]; + } + + return UNIC_CQE_PERIOD_ERR; +} + int unic_init_tx(struct unic_dev *unic_dev, u32 num) { struct unic_channel *c; diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 8654aa9a819e..f014164c8e31 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -247,6 +247,7 @@ void unic_start_period_task(struct net_device *netdev); void unic_remove_period_task(struct unic_dev *unic_dev); int unic_init_wq(void); void unic_destroy_wq(void); +u16 unic_cqe_period_round_down(u16 cqe_period); int unic_init_rx(struct unic_dev *unic_dev, u32 num); int unic_init_tx(struct unic_dev *unic_dev, u32 num); void unic_destroy_rx(struct unic_dev *unic_dev, u32 num); diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 474626833d89..eb90121ad6fb 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -109,6 +109,175 @@ static int unic_set_fecparam(struct net_device *ndev, return 0; } +static int unic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_coalesce *tx_coal = &unic_dev->channels.unic_coal.tx_coal; + struct unic_coalesce *rx_coal = &unic_dev->channels.unic_coal.rx_coal; + + if (unic_resetting(netdev)) + return -EBUSY; + + cmd->tx_coalesce_usecs = tx_coal->int_gl; + cmd->rx_coalesce_usecs = rx_coal->int_gl; + + cmd->tx_max_coalesced_frames = tx_coal->int_ql; + cmd->rx_max_coalesced_frames = rx_coal->int_ql; + + return 0; +} + +static int unic_check_gl_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + u32 rx_gl, tx_gl; + + if (cmd->rx_coalesce_usecs > unic_dev->caps.max_int_gl) { + unic_err(unic_dev, + "invalid rx-usecs value, rx-usecs range is [0, %u].\n", + unic_dev->caps.max_int_gl); + return -EINVAL; + } + + if (cmd->tx_coalesce_usecs > unic_dev->caps.max_int_gl) { + unic_err(unic_dev, + "invalid tx-usecs value, tx-usecs range is [0, %u].\n", + unic_dev->caps.max_int_gl); + return -EINVAL; + } + + rx_gl = unic_cqe_period_round_down(cmd->rx_coalesce_usecs); + if (rx_gl != cmd->rx_coalesce_usecs) { + unic_err(unic_dev, + "invalid rx_usecs(%u), because it must be power of 4.\n", + cmd->rx_coalesce_usecs); + return -EINVAL; + } + + tx_gl = unic_cqe_period_round_down(cmd->tx_coalesce_usecs); + if (tx_gl != cmd->tx_coalesce_usecs) { + unic_err(unic_dev, + "invalid tx_usecs(%u), because it must be power of 4.\n", + cmd->tx_coalesce_usecs); + return -EINVAL; + } + + return 0; +} + +static int unic_check_ql_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + + if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) && + !unic_dev->caps.max_int_ql) { + unic_err(unic_dev, "coalesced frames is not supported.\n"); + return -EOPNOTSUPP; + } + + if (cmd->tx_max_coalesced_frames > unic_dev->caps.max_int_ql || + cmd->rx_max_coalesced_frames > unic_dev->caps.max_int_ql) { + unic_err(unic_dev, + "invalid coalesced frames value, range is [0, %u].\n", + unic_dev->caps.max_int_ql); + return -ERANGE; + } + + return 0; +} + +static int +unic_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + int ret; + + if (cmd->use_adaptive_rx_coalesce || cmd->use_adaptive_tx_coalesce) { + unic_err(unic_dev, + "not support to enable adaptive coalesce.\n"); + return -EINVAL; + } + + ret = unic_check_gl_coalesce_para(netdev, cmd); + if (ret) { + unic_err(unic_dev, + "failed to check gl coalesce param, ret = %d.\n", ret); + return ret; + } + + ret = unic_check_ql_coalesce_para(netdev, cmd); + if (ret) + unic_err(unic_dev, + "failed to check ql coalesce param, ret = %d.\n", ret); + + return ret; +} + +static int unic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_coal_txrx *unic_coal = &unic_dev->channels.unic_coal; + struct unic_coalesce *tx_coal = &unic_coal->tx_coal; + struct unic_coalesce *rx_coal = &unic_coal->rx_coal; + struct unic_coalesce old_tx_coal, old_rx_coal; + int ret, ret1; + + if (test_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state)) { + unic_err(unic_dev, + "failed to set coalesce, due to dev deacitve.\n"); + return -EBUSY; + } + + if (unic_resetting(netdev)) + return -EBUSY; + + ret = unic_check_coalesce_para(netdev, cmd, kernel_coal); + if (ret) + return ret; + + memcpy(&old_tx_coal, tx_coal, sizeof(struct unic_coalesce)); + memcpy(&old_rx_coal, rx_coal, sizeof(struct unic_coalesce)); + + tx_coal->int_gl = cmd->tx_coalesce_usecs; + rx_coal->int_gl = cmd->rx_coalesce_usecs; + + tx_coal->int_ql = cmd->tx_max_coalesced_frames; + rx_coal->int_ql = cmd->rx_max_coalesced_frames; + + unic_net_stop_no_link_change(netdev); + unic_uninit_channels(unic_dev); + + ret = unic_init_channels(unic_dev, unic_dev->channels.num); + if (ret) { + netdev_err(netdev, "failed to init channels, ret = %d.\n", ret); + memcpy(tx_coal, &old_tx_coal, sizeof(struct unic_coalesce)); + memcpy(rx_coal, &old_rx_coal, sizeof(struct unic_coalesce)); + ret1 = unic_init_channels(unic_dev, unic_dev->channels.num); + if (ret1) { + unic_err(unic_dev, + "failed to recover old channels, ret = %d.\n", + ret1); + return ret; + } + } + + ret1 = unic_net_open_no_link_change(netdev); + if (ret1) + unic_err(unic_dev, "failed to set net open, ret = %d.\n", ret1); + + return ret; +} + #define UNIC_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ ETHTOOL_RING_USE_TX_PUSH) #define UNIC_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ @@ -131,6 +300,8 @@ static const struct ethtool_ops unic_ethtool_ops = { .get_fecparam = unic_get_fecparam, .set_fecparam = unic_set_fecparam, .get_fec_stats = unic_get_fec_stats, + .get_coalesce = unic_get_coalesce, + .set_coalesce = unic_set_coalesce, }; void unic_set_ethtool_ops(struct net_device *netdev) -- Gitee From f8939c55495d8dcbbfc76cc9ef62661bf18b1921 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Sun, 24 Aug 2025 10:16:25 +0800 Subject: [PATCH 055/243] net: unic: Support to query and clear historical NIC link status information commit 4420dfeed31d751757b64677336fe906295a7d51 openEuler In debugfs, Added query interface to get the historical NIC link information, Includes the accumulated link up/down times and the latest ten NIC link status changes. Provided interface to clear these information stored in the memory when specified file is read (no parameter is required). Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 71 ++++++++++++++++++++++ drivers/net/ub/unic/unic_dev.c | 16 ++++- drivers/net/ub/unic/unic_dev.h | 12 ++++ drivers/net/ub/unic/unic_netdev.c | 23 +++++++ 4 files changed, 120 insertions(+), 2 deletions(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 29fc8917eaa2..e61e782b044d 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -191,6 +191,63 @@ static int unic_dbg_dump_promisc_cfg_hw(struct seq_file *s, void *data) return 0; } +static int unic_dbg_query_link_record(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_link_stats *record = &unic_dev->stats.link_record; + u8 cnt = 1, stats_cnt; + u64 total, idx; + + mutex_lock(&record->lock); + + seq_puts(s, "current time : "); + ubase_dbg_format_time(ktime_get_real_seconds(), s); + seq_printf(s, "\nlink up count : %llu\n", record->link_up_cnt); + seq_printf(s, "link down count : %llu\n", record->link_down_cnt); + + total = record->link_up_cnt + record->link_down_cnt; + if (!total) { + seq_puts(s, "link change records : NA\n"); + mutex_unlock(&record->lock); + + return 0; + } + + seq_puts(s, "link change records :\n"); + seq_puts(s, "\tNo.\tTIME\t\t\t\tSTATUS\n"); + + stats_cnt = min(total, LINK_STAT_MAX_IDX); + while (cnt <= stats_cnt) { + total--; + idx = total % LINK_STAT_MAX_IDX; + seq_printf(s, "\t%-2d\t", cnt); + ubase_dbg_format_time(ktime_get_real_seconds(), s); + seq_printf(s, "\t%s\n", + record->stats[idx].link_status ? "LINK UP" : "LINK DOWN"); + cnt++; + } + + mutex_unlock(&record->lock); + + return 0; +} + +static int unic_dbg_clear_link_record(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_link_stats *record = &unic_dev->stats.link_record; + + mutex_lock(&record->lock); + record->link_up_cnt = 0; + record->link_down_cnt = 0; + memset(record->stats, 0, sizeof(record->stats)); + mutex_unlock(&record->lock); + + seq_puts(s, "Link status records have been cleared!\n"); + + return 0; +} + static bool unic_dbg_dentry_support(struct device *dev, u32 property) { struct unic_dev *unic_dev = dev_get_drvdata(dev); @@ -304,6 +361,20 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_promisc_cfg_hw, + }, { + .name = "link_status_record", + .dentry_index = UNIC_DBG_DENTRY_ROOT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_query_link_record, + }, { + .name = "clear_link_status_record", + .dentry_index = UNIC_DBG_DENTRY_ROOT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_clear_link_record, } }; diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index f039151844c2..7ed3525837e9 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -574,6 +574,7 @@ static int unic_dev_init_mtu(struct unic_dev *unic_dev) static int unic_init_mac(struct unic_dev *unic_dev) { + struct unic_link_stats *record = &unic_dev->stats.link_record; struct unic_mac *mac = &unic_dev->hw.mac; int ret; @@ -601,9 +602,17 @@ static int unic_init_mac(struct unic_dev *unic_dev) return ret; } + mutex_init(&record->lock); return 0; } +static void unic_uninit_mac(struct unic_dev *unic_dev) +{ + struct unic_link_stats *record = &unic_dev->stats.link_record; + + mutex_destroy(&record->lock); +} + int unic_set_mtu(struct unic_dev *unic_dev, int new_mtu) { u16 max_frame_size; @@ -822,11 +831,11 @@ static int unic_init_netdev_priv(struct net_device *netdev, ret = unic_init_dev_addr(priv); if (ret) - goto err_uninit_vport; + goto unic_unint_mac; ret = unic_init_channels_attr(priv); if (ret) - goto err_uninit_vport; + goto unic_unint_mac; ret = unic_init_channels(priv, priv->channels.num); if (ret) { @@ -840,6 +849,8 @@ static int unic_init_netdev_priv(struct net_device *netdev, err_uninit_channels_attr: unic_uninit_channels_attr(priv); +unic_unint_mac: + unic_uninit_mac(priv); err_uninit_vport: unic_uninit_vport(priv); destroy_lock: @@ -854,6 +865,7 @@ static void unic_uninit_netdev_priv(struct net_device *netdev) unic_uninit_channels(priv); unic_uninit_channels_attr(priv); + unic_uninit_mac(priv); unic_uninit_vport(priv); mutex_destroy(&priv->act_info.mutex); } diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index f014164c8e31..af20b72168ee 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -182,8 +182,20 @@ struct unic_fec_stats { struct unic_fec_stats_item lane[UNIC_FEC_STATS_MAX_LANE]; }; +#define LINK_STAT_MAX_IDX 10U +struct unic_link_stats { + u64 link_up_cnt; + u64 link_down_cnt; + struct { + bool link_status; + time64_t link_tv_sec; + } stats[LINK_STAT_MAX_IDX]; + struct mutex lock; /* protects link record */ +}; + struct unic_stats { struct unic_fec_stats fec_stats; + struct unic_link_stats link_record; }; struct unic_addr_tbl { diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index d8680ff9f894..d2d213c90d0b 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -164,6 +164,27 @@ static int unic_net_up(struct net_device *netdev) return 0; } +static void unic_link_status_record(struct net_device *netdev, bool linkup) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_link_stats *record = &unic_dev->stats.link_record; + u64 idx, total; + + mutex_lock(&record->lock); + + if (linkup) + record->link_up_cnt++; + else + record->link_down_cnt++; + + total = record->link_up_cnt + record->link_down_cnt; + idx = (total - 1) % LINK_STAT_MAX_IDX; + record->stats[idx].link_tv_sec = ktime_get_real_seconds(); + record->stats[idx].link_status = linkup; + + mutex_unlock(&record->lock); +} + static void unic_clear_fec_stats(struct unic_dev *unic_dev) { struct unic_fec_stats *fec_stats = &unic_dev->stats.fec_stats; @@ -194,6 +215,8 @@ void unic_link_status_change(struct net_device *netdev, bool linkup) out: if (netif_msg_link(unic_dev)) unic_info(unic_dev, "%s.\n", linkup ? "link up" : "link down"); + + unic_link_status_record(netdev, linkup); } void unic_link_status_update(struct unic_dev *unic_dev) -- Gitee From 7e8fb9a0d9bb951d7a137df21e8c7fcdb8c9e461 Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Tue, 16 Sep 2025 10:22:41 +0800 Subject: [PATCH 056/243] net: unic: Drive supports ub entity reset. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit b395a4f7347c243a1ed6fdd02d8aa87a94af3c58 openEuler In this patch, add a unified interface to tigger ELR reset by ubus, adapt ehtool\tx_timeout\ubus to use this reset interface, now we can use "ethtool --reset ublx dedicated"、"echo 1 > /sys/class/net /ublx/device/reset" to trigger ELR reset. Signed-off-by: Yixi Shen Signed-off-by: Xiongchuan Zhou Signed-off-by: Haibin Lu --- drivers/net/ub/unic/Makefile | 2 +- drivers/net/ub/unic/unic_ethtool.c | 37 ++++++++ drivers/net/ub/unic/unic_event.c | 25 ++++++ drivers/net/ub/unic/unic_reset.c | 130 +++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_reset.h | 14 ++++ 5 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ub/unic/unic_reset.c create mode 100644 drivers/net/ub/unic/unic_reset.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 67ecd0ad8c11..7098242033eb 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,5 +9,5 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o -unic-objs += debugfs/unic_ctx_debugfs.o +unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index eb90121ad6fb..c9593ba74fe4 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -278,6 +278,42 @@ static int unic_set_coalesce(struct net_device *netdev, return ret; } +static const struct unic_reset_type_map unic_ethtool_reset_map[] = { + {ETH_RESET_DEDICATED, UBASE_UE_RESET}, +}; + +static int unic_reset(struct net_device *ndev, u32 *flags) +{ + enum ubase_reset_type reset_type = UBASE_NO_RESET; + struct unic_dev *unic_dev = netdev_priv(ndev); + enum ethtool_reset_flags reset_flags; + u32 i; + + if (unic_resetting(ndev)) { + unic_err(unic_dev, "failed to reset, due to dev resetting.\n"); + return -EBUSY; + } + + for (i = 0; i < ARRAY_SIZE(unic_ethtool_reset_map); i++) { + if (unic_ethtool_reset_map[i].reset_flags == *flags) { + reset_type = unic_ethtool_reset_map[i].reset_type; + reset_flags = unic_ethtool_reset_map[i].reset_flags; + break; + } + } + + if (reset_type == UBASE_NO_RESET) + return -EOPNOTSUPP; + + unic_info(unic_dev, + "ethtool setting reset type, type = %u.\n", reset_type); + + ubase_reset_event(unic_dev->comdev.adev, reset_type); + *flags &= ~reset_flags; + + return 0; +} + #define UNIC_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ ETHTOOL_RING_USE_TX_PUSH) #define UNIC_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ @@ -302,6 +338,7 @@ static const struct ethtool_ops unic_ethtool_ops = { .get_fec_stats = unic_get_fec_stats, .get_coalesce = unic_get_coalesce, .set_coalesce = unic_set_coalesce, + .reset = unic_reset, }; void unic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index bcd1e210f447..2f24cd423e2b 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -23,6 +23,7 @@ #include "unic_netdev.h" #include "unic_qos_hw.h" #include "unic_rack_ip.h" +#include "unic_reset.h" #include "unic_event.h" int unic_comp_handler(struct notifier_block *nb, unsigned long jfcn, void *data) @@ -52,6 +53,25 @@ int unic_comp_handler(struct notifier_block *nb, unsigned long jfcn, void *data) return 0; } +static void unic_rack_port_reset(struct unic_dev *unic_dev, bool link_up) +{ + if (link_up) + unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_UP; + else + unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_DOWN; +} + +static void unic_port_handler(struct auxiliary_device *adev, bool link_up) +{ + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev = unic_dev->comdev.netdev; + + if (!netif_running(netdev)) + return; + + unic_rack_port_reset(unic_dev, link_up); +} + static struct ubase_ctrlq_event_nb unic_ctrlq_events[] = { { .service_type = UBASE_CTRLQ_SER_TYPE_IP_ACL, @@ -140,6 +160,9 @@ int unic_register_event(struct auxiliary_device *adev) if (ret) goto unregister_crq; + ubase_port_register(adev, unic_port_handler); + ubase_reset_register(adev, unic_reset_handler); + return 0; unregister_crq: @@ -149,6 +172,8 @@ int unic_register_event(struct auxiliary_device *adev) void unic_unregister_event(struct auxiliary_device *adev) { + ubase_reset_unregister(adev); + ubase_port_unregister(adev); unic_unregister_ctrlq_event(adev, ARRAY_SIZE(unic_ctrlq_events)); unic_unregister_crq_event(adev, ARRAY_SIZE(unic_crq_events)); } diff --git a/drivers/net/ub/unic/unic_reset.c b/drivers/net/ub/unic/unic_reset.c new file mode 100644 index 000000000000..ff1239cc5131 --- /dev/null +++ b/drivers/net/ub/unic/unic_reset.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include + +#include "unic_cmd.h" +#include "unic_dev.h" +#include "unic_hw.h" +#include "unic_netdev.h" +#include "unic_rack_ip.h" +#include "unic_reset.h" + +static void unic_dev_suspend(struct unic_dev *unic_dev) +{ + unic_uninit_channels(unic_dev); +} + +static void unic_reset_down(struct auxiliary_device *adev) +{ + struct unic_dev *priv = (struct unic_dev *)dev_get_drvdata(&adev->dev); + struct net_device *netdev = priv->comdev.netdev; + bool if_running; + int ret; + + if (!test_bit(UNIC_STATE_INITED, &priv->state) || + test_and_set_bit(UNIC_STATE_DISABLED, &priv->state)) { + unic_warn(priv, "failed to reset unic, device is not ready.\n"); + return; + } + + set_bit(UNIC_STATE_RESETTING, &priv->state); + if_running = netif_running(netdev); + + unic_info(priv, "unic reset start.\n"); + + unic_remove_period_task(priv); + + /* due to lack of cmdq when resetting, need to close promisc first, + * to prevent that concurrent deactivate event ubable to close promisc + * when resetting + */ + ret = unic_activate_promisc_mode(priv, false); + if (ret) + unic_warn(priv, "failed to close promisc, ret = %d.\n", ret); + else + set_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &priv->vport.state); + + rtnl_lock(); + ret = if_running ? unic_net_stop(netdev) : 0; + rtnl_unlock(); + if (ret) + unic_err(priv, "failed to stop unic net, ret = %d.\n", ret); +} + +static void unic_reset_uninit(struct auxiliary_device *adev) +{ + struct unic_dev *priv = (struct unic_dev *)dev_get_drvdata(&adev->dev); + + if (!test_bit(UNIC_STATE_RESETTING, &priv->state)) + return; + + unic_dev_suspend(priv); +} + +static int unic_dev_resume(struct unic_dev *unic_dev) +{ + int ret; + + ret = unic_init_channels(unic_dev, unic_dev->channels.num); + if (ret) + unic_err(unic_dev, "failed to init channels, ret = %d.\n", ret); + + return ret; +} + +static void unic_reset_init(struct auxiliary_device *adev) +{ + struct unic_dev *priv = (struct unic_dev *)dev_get_drvdata(&adev->dev); + struct net_device *netdev = priv->comdev.netdev; + bool if_running; + int ret; + + if (!test_bit(UNIC_STATE_RESETTING, &priv->state)) + return; + + ret = unic_dev_resume(priv); + if (ret) + goto err_unic_resume; + + unic_query_rack_ip(adev); + unic_start_period_task(netdev); + + if_running = netif_running(netdev); + clear_bit(UNIC_STATE_RESETTING, &priv->state); + clear_bit(UNIC_STATE_DISABLED, &priv->state); + rtnl_lock(); + ret = if_running ? unic_net_open(netdev) : 0; + rtnl_unlock(); + if (ret) + unic_err(priv, "failed to up net, ret = %d.\n", ret); + + unic_info(priv, "unic reset done.\n"); + + return; + +err_unic_resume: + clear_bit(UNIC_STATE_RESETTING, &priv->state); + clear_bit(UNIC_STATE_DISABLED, &priv->state); +} + +void unic_reset_handler(struct auxiliary_device *adev, + enum ubase_reset_stage stage) +{ + switch (stage) { + case UBASE_RESET_STAGE_DOWN: + unic_reset_down(adev); + break; + case UBASE_RESET_STAGE_UNINIT: + unic_reset_uninit(adev); + break; + case UBASE_RESET_STAGE_INIT: + unic_reset_init(adev); + break; + default: + break; + } +} diff --git a/drivers/net/ub/unic/unic_reset.h b/drivers/net/ub/unic/unic_reset.h new file mode 100644 index 000000000000..4ce313ac58aa --- /dev/null +++ b/drivers/net/ub/unic/unic_reset.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_RESET_H__ +#define __UNIC_RESET_H__ + +#include + +void unic_reset_handler(struct auxiliary_device *adev, enum ubase_reset_stage stage); + +#endif -- Gitee From 99f7cd515eaaec70f364c8e91351952acb2c9d4b Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Sat, 30 Aug 2025 10:03:33 +0800 Subject: [PATCH 057/243] net: unic: Support RAS commit 339b55a00771bd1379427c7afcf0b417c0e5b781 openEuler RAS is an important feature of the system. UB also supports RAS function. UB protocol defines multiple types of ras, and different types of ras are processed in different ways to ensure system stability. This patch provides the support of the UNIC driver for ras. As an auxiliary device of the UBSAE driver, the UNIC driver registers the corresponding RAS processing function with the UBASE. After receiving the corresponding RAS, UBASE routes the RAS to UNIC for processing. Signed-off-by: Yaoyao Tu Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/unic_event.c | 73 +++++++++++++++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index 2f24cd423e2b..faa91a99badc 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -148,14 +148,81 @@ static int unic_register_crq_event(struct auxiliary_device *adev) return 0; } +static void unic_unregister_ae_event(struct auxiliary_device *adev, + u8 asyn_event_num) +{ + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + u8 i; + + for (i = 0; i < asyn_event_num; i++) + ubase_event_unregister(adev, &unic_dev->ae_nbs[i]); +} + +static int unic_ae_jetty_level_error(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct ubase_event_nb *ev_nb = container_of(nb, + struct ubase_event_nb, nb); + struct auxiliary_device *adev = (struct auxiliary_device *)ev_nb->back; + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + struct ubase_aeq_notify_info *info = data; + u32 queue_num; + + /* Normally, UNIC does not report such abnormal events, + * but in order to maintain its scalability, + * unic reserves the reset processing of such events. + */ + queue_num = info->aeqe->event.queue_event.num; + unic_err(unic_dev, + "recv jetty level error, event_type = 0x%x, sub_type = 0x%x, queue_num = %u.\n", + info->event_type, info->sub_type, queue_num); + + ubase_reset_event(adev, UBASE_UE_RESET); + + return 0; +} + +static int unic_register_ae_event(struct auxiliary_device *adev) +{ + struct ubase_event_nb unic_ae_nbs[UNIC_AE_LEVEL_NUM] = { + { + UBASE_DRV_UNIC, + UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR, + { unic_ae_jetty_level_error }, + adev + }, + }; + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + int ret; + u8 i; + + for (i = 0; i < ARRAY_SIZE(unic_ae_nbs); i++) { + unic_dev->ae_nbs[i] = unic_ae_nbs[i]; + ret = ubase_event_register(adev, &unic_dev->ae_nbs[i]); + if (ret) { + dev_err(adev->dev.parent, + "failed to register asyn event[%u], ret = %d.\n", + unic_dev->ae_nbs[i].event_type, ret); + unic_unregister_ae_event(adev, i); + return ret; + } + } + + return ret; +} + int unic_register_event(struct auxiliary_device *adev) { int ret; - ret = unic_register_crq_event(adev); + ret = unic_register_ae_event(adev); if (ret) return ret; + ret = unic_register_crq_event(adev); + if (ret) + goto unregister_ae; + ret = unic_register_ctrlq_event(adev); if (ret) goto unregister_crq; @@ -167,6 +234,9 @@ int unic_register_event(struct auxiliary_device *adev) unregister_crq: unic_unregister_crq_event(adev, ARRAY_SIZE(unic_crq_events)); +unregister_ae: + unic_unregister_ae_event(adev, UNIC_AE_LEVEL_NUM); + return ret; } @@ -176,4 +246,5 @@ void unic_unregister_event(struct auxiliary_device *adev) ubase_port_unregister(adev); unic_unregister_ctrlq_event(adev, ARRAY_SIZE(unic_ctrlq_events)); unic_unregister_crq_event(adev, ARRAY_SIZE(unic_crq_events)); + unic_unregister_ae_event(adev, UNIC_AE_LEVEL_NUM); } -- Gitee From 32d20be61e78c27815ae3702ee477c94978d6c1d Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Tue, 16 Sep 2025 11:06:51 +0800 Subject: [PATCH 058/243] net: unic: support config/query the mapping between dscp and tc commit c20baa942215749ed839e4fc02b3d197e3e3d142 openEuler This patch introduces support for configuring and querying the mapping between DSCP (Differentiated Services Code Point) and TC (Traffic Class) in the UNIC driver. The new functionality allows users to set and retrieve the DSCP-to-TC mappings, enhancing the driver's ability to manage network traffic prioritization. Signed-off-by: Haiqing Fang Signed-off-by: Xiaobo Zhang Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/unic_dcbnl.c | 138 +++++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) diff --git a/drivers/net/ub/unic/unic_dcbnl.c b/drivers/net/ub/unic/unic_dcbnl.c index 3110277da020..9e44d74f3b38 100644 --- a/drivers/net/ub/unic/unic_dcbnl.c +++ b/drivers/net/ub/unic/unic_dcbnl.c @@ -10,7 +10,145 @@ #include "unic_netdev.h" #include "unic_dcbnl.h" +static int unic_dscp_prio_check(struct net_device *netdev, struct dcb_app *app) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + + if (!unic_dev_ets_supported(unic_dev)) + return -EOPNOTSUPP; + + if (netif_running(netdev)) { + unic_err(unic_dev, + "failed to set dscp-prio, due to network interface is up, pls down it first and try again.\n"); + return -EBUSY; + } + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= UBASE_MAX_DSCP || + app->priority >= UNIC_MAX_PRIO_NUM) + return -EINVAL; + + if (unic_resetting(netdev)) + return -EBUSY; + + return 0; +} + +static int unic_set_app(struct net_device *netdev, struct dcb_app *app, + struct unic_dev *unic_dev, struct unic_vl *vl) +{ + struct dcb_app old_app; + int ret; + + unic_info(unic_dev, "setapp dscp = %u, priority = %u.\n", + app->protocol, app->priority); + + ret = dcb_ieee_setapp(netdev, app); + if (ret) { + unic_err(unic_dev, "failed to add app, ret = %d.\n", ret); + return ret; + } + + old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP; + old_app.protocol = app->protocol; + old_app.priority = vl->dscp_prio[app->protocol]; + + vl->dscp_prio[app->protocol] = app->priority; + ret = unic_set_vl_map(unic_dev, vl->dscp_prio, vl->prio_vl, + UNIC_DSCP_VL_MAP); + if (ret) { + vl->dscp_prio[app->protocol] = old_app.priority; + dcb_ieee_delapp(netdev, app); + return ret; + } + + if (old_app.priority == UNIC_INVALID_PRIORITY) { + vl->dscp_app_cnt++; + } else { + ret = dcb_ieee_delapp(netdev, &old_app); + if (ret) + unic_err(unic_dev, + "failed to delete old app, ret = %d.\n", ret); + } + + return ret; +} + +static int unic_dcbnl_ieee_setapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_vl *vl = &unic_dev->channels.vl; + int ret; + + ret = unic_dscp_prio_check(netdev, app); + if (ret) { + unic_err(unic_dev, "failed to set dscp-prio, ret = %d\n", ret); + return ret; + } + + /* dscp-prio already set */ + if (vl->dscp_prio[app->protocol] == app->priority) + return 0; + + return unic_set_app(netdev, app, unic_dev, vl); +} + +static int unic_del_app(struct net_device *netdev, struct dcb_app *app, + struct unic_dev *unic_dev, struct unic_vl *vl) +{ + u8 map_type = UNIC_DSCP_VL_MAP; + int ret; + + unic_info(unic_dev, "delapp dscp = %u, priority = %u\n", + app->protocol, app->priority); + + ret = dcb_ieee_delapp(netdev, app); + if (ret) + return ret; + + if (vl->dscp_app_cnt <= 1) + map_type = UNIC_PRIO_VL_MAP; + + vl->dscp_prio[app->protocol] = UNIC_INVALID_PRIORITY; + ret = unic_set_vl_map(unic_dev, vl->dscp_prio, vl->prio_vl, + map_type); + if (ret) { + vl->dscp_prio[app->protocol] = app->priority; + dcb_ieee_setapp(netdev, app); + return ret; + } + + if (vl->dscp_app_cnt) + vl->dscp_app_cnt--; + + return 0; +} + +static int unic_dcbnl_ieee_delapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_vl *vl = &unic_dev->channels.vl; + int ret; + + ret = unic_dscp_prio_check(netdev, app); + if (ret) { + unic_err(unic_dev, "failed to del dscp-prio, ret = %d.\n", ret); + return ret; + } + + if (app->priority != vl->dscp_prio[app->protocol]) { + unic_err(unic_dev, "failed to del no match dscp-prio.\n"); + return -EINVAL; + } + + return unic_del_app(netdev, app, unic_dev, vl); +} + static const struct dcbnl_rtnl_ops unic_dcbnl_ops = { + .ieee_setapp = unic_dcbnl_ieee_setapp, + .ieee_delapp = unic_dcbnl_ieee_delapp, }; void unic_set_dcbnl_ops(struct net_device *netdev) -- Gitee From 477c6541c6b940007a7baaa452e1af6a7e730c54 Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Tue, 23 Sep 2025 17:25:20 +0800 Subject: [PATCH 059/243] net: unic: Add debugfs support for QoS configuration and query. commit 3a3016996e8e5ffff95b290866fc0899409a4e2d openEuler This patch introduces debugfs support for configuring and querying QoS (Quality of Service) mappings in the UNIC driver. The new functionality allows users to inspect and manage DSCP (Differentiated Services Code Point) to TC (Traffic Class) mappings, as well as VLAN queue configurations. Signed-off-by: Haiqing Fang Signed-off-by: Xiaobo Zhang Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/Makefile | 2 +- drivers/net/ub/unic/debugfs/unic_debugfs.c | 35 ++++- drivers/net/ub/unic/debugfs/unic_debugfs.h | 1 + .../net/ub/unic/debugfs/unic_qos_debugfs.c | 148 ++++++++++++++++++ .../net/ub/unic/debugfs/unic_qos_debugfs.h | 15 ++ drivers/net/ub/unic/unic_hw.h | 1 + drivers/net/ub/unic/unic_qos_hw.c | 18 +++ drivers/net/ub/unic/unic_qos_hw.h | 2 + 8 files changed, 220 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ub/unic/debugfs/unic_qos_debugfs.c create mode 100644 drivers/net/ub/unic/debugfs/unic_qos_debugfs.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 7098242033eb..fa5de4255e68 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,5 +9,5 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o -unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o +unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index e61e782b044d..f9cc6e887ce0 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -10,9 +10,10 @@ #include #include -#include "unic_dev.h" #include "unic_ctx_debugfs.h" +#include "unic_dev.h" #include "unic_hw.h" +#include "unic_qos_debugfs.h" #include "unic_debugfs.h" static int unic_dbg_dump_dev_info(struct seq_file *s, void *data) @@ -260,6 +261,10 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { .name = "context", .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, .support = unic_dbg_dentry_support, + }, { + .name = "qos", + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, }, /* keep unic at the bottom and add new directory above */ { @@ -347,6 +352,13 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rq_jfc_context_hw, + }, { + .name = "vl_queue", + .dentry_index = UNIC_DBG_DENTRY_QOS, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_vl_queue, }, { .name = "rss_cfg_hw", .dentry_index = UNIC_DBG_DENTRY_ROOT, @@ -361,6 +373,27 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_promisc_cfg_hw, + }, { + .name = "dscp_vl_map", + .dentry_index = UNIC_DBG_DENTRY_QOS, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_dscp_vl_map, + }, { + .name = "prio_vl_map", + .dentry_index = UNIC_DBG_DENTRY_QOS, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_prio_vl_map, + }, { + .name = "dscp_prio", + .dentry_index = UNIC_DBG_DENTRY_QOS, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_dscp_prio, }, { .name = "link_status_record", .dentry_index = UNIC_DBG_DENTRY_ROOT, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 1afba40382d6..065630fbd35e 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -14,6 +14,7 @@ enum unic_dbg_dentry_type { UNIC_DBG_DENTRY_CONTEXT, + UNIC_DBG_DENTRY_QOS, /* must be the last entry. */ UNIC_DBG_DENTRY_ROOT }; diff --git a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c new file mode 100644 index 000000000000..85b3288f0bec --- /dev/null +++ b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include "unic_dcbnl.h" +#include "unic_debugfs.h" +#include "unic_hw.h" +#include "unic_qos_debugfs.h" + +int unic_dbg_dump_vl_queue(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_vl *vl = &unic_dev->channels.vl; + u8 i; + + seq_puts(s, "VL_ID Q_OFFSET Q_COUNT\n"); + + for (i = 0; i < unic_dev->channels.rss_vl_num; i++) { + seq_printf(s, "%-7d", i); + seq_printf(s, "%-12u", vl->queue_offset[i]); + seq_printf(s, "%-11u\n", vl->queue_count[i]); + } + + return 0; +} + +static void unic_dump_dscp_vl_map(struct unic_dev *unic_dev, + struct seq_file *s, u8 dscp, u8 hw_vl) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_adev_qos *qos = ubase_get_adev_qos(adev); + struct unic_vl *vl = &unic_dev->channels.vl; + u8 prio, sw_vl = 0; + + prio = vl->dscp_prio[dscp]; + if (prio == UNIC_INVALID_PRIORITY || + vl->prio_vl[prio] >= unic_dev->channels.rss_vl_num) + sw_vl = qos->nic_vl[0]; + else + sw_vl = qos->nic_vl[vl->prio_vl[prio]]; + + seq_printf(s, "%-6u", dscp); + seq_printf(s, "%-7u", sw_vl); + + if (!unic_dev_ubl_supported(unic_dev) && + unic_dev_ets_supported(unic_dev)) + seq_printf(s, "%-7u", hw_vl); + else + seq_puts(s, "--"); + + seq_puts(s, "\n"); +} + +int unic_dbg_dump_dscp_vl_map(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_config_vl_map_cmd resp = {0}; + int ret; + u8 i; + + if (__unic_resetting(unic_dev)) + return -EBUSY; + + if (!unic_dev_ubl_supported(unic_dev) && + unic_dev_ets_supported(unic_dev)) { + ret = unic_query_vl_map(unic_dev, &resp); + if (ret) + return ret; + } + + seq_puts(s, "DSCP SW_VL HW_VL\n"); + + for (i = 0; i < UBASE_MAX_DSCP; i++) + unic_dump_dscp_vl_map(unic_dev, s, i, resp.dscp_vl[i]); + + return 0; +} + +static void unic_dump_prio_vl_map(struct unic_dev *unic_dev, + struct seq_file *s, u8 prio, u8 hw_vl) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_adev_qos *qos = ubase_get_adev_qos(adev); + struct unic_vl *vl = &unic_dev->channels.vl; + u8 sw_vl = 0; + + if (vl->prio_vl[prio] >= unic_dev->channels.rss_vl_num) + sw_vl = qos->nic_vl[0]; + else + sw_vl = qos->nic_vl[vl->prio_vl[prio]]; + + seq_printf(s, "%-6u", prio); + seq_printf(s, "%-7u", sw_vl); + + if (!unic_dev_ubl_supported(unic_dev) && + unic_dev_ets_supported(unic_dev)) + seq_printf(s, "%-7u", hw_vl); + else + seq_puts(s, "--"); + + seq_puts(s, "\n"); +} + +int unic_dbg_dump_prio_vl_map(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_config_vl_map_cmd resp = {0}; + int ret; + u8 i; + + if (__unic_resetting(unic_dev)) + return -EBUSY; + + if (!unic_dev_ubl_supported(unic_dev) && + unic_dev_ets_supported(unic_dev)) { + ret = unic_query_vl_map(unic_dev, &resp); + if (ret) + return ret; + } + + seq_puts(s, "PRIO SW_VL HW_VL\n"); + + for (i = 0; i < UNIC_MAX_PRIO_NUM; i++) + unic_dump_prio_vl_map(unic_dev, s, i, resp.prio_vl[i]); + + return 0; +} + +int unic_dbg_dump_dscp_prio(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_vl *vl = &unic_dev->channels.vl; + u16 i; + + if (__unic_resetting(unic_dev)) + return -EBUSY; + + seq_puts(s, "DSCP PRIO\n"); + + for (i = 0; i < UBASE_MAX_DSCP; i++) { + seq_printf(s, "%-6u", i); + seq_printf(s, "%-7u\n", vl->dscp_prio[i]); + } + + return 0; +} diff --git a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h new file mode 100644 index 000000000000..f55616ab1617 --- /dev/null +++ b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_QOS_DEBUGFS_H__ +#define __UNIC_QOS_DEBUGFS_H__ + +int unic_dbg_dump_vl_queue(struct seq_file *s, void *data); +int unic_dbg_dump_dscp_vl_map(struct seq_file *s, void *data); +int unic_dbg_dump_prio_vl_map(struct seq_file *s, void *data); +int unic_dbg_dump_dscp_prio(struct seq_file *s, void *data); + +#endif diff --git a/drivers/net/ub/unic/unic_hw.h b/drivers/net/ub/unic/unic_hw.h index ebe0c714549d..cfd46b6eadf4 100644 --- a/drivers/net/ub/unic/unic_hw.h +++ b/drivers/net/ub/unic/unic_hw.h @@ -11,6 +11,7 @@ #include #include "unic_dev.h" +#include "unic_qos_hw.h" #define UNIC_DL_CONFIG_MODE_TX_EN_B 0 #define UNIC_DL_CONFIG_MODE_RX_EN_B 1 diff --git a/drivers/net/ub/unic/unic_qos_hw.c b/drivers/net/ub/unic/unic_qos_hw.c index 2f463be3d324..79fb7271c08e 100644 --- a/drivers/net/ub/unic/unic_qos_hw.c +++ b/drivers/net/ub/unic/unic_qos_hw.c @@ -29,6 +29,24 @@ int unic_set_hw_vl_map(struct unic_dev *unic_dev, u8 *dscp_vl, u8 *prio_vl, return ret; } +int unic_query_vl_map(struct unic_dev *unic_dev, + struct unic_config_vl_map_cmd *resp) +{ + struct unic_config_vl_map_cmd req = {0}; + struct ubase_cmd_buf in, out; + int ret; + + ubase_fill_inout_buf(&in, UBASE_OPC_CFG_VL_MAP, true, sizeof(req), + &req); + ubase_fill_inout_buf(&out, UBASE_OPC_CFG_VL_MAP, false, sizeof(*resp), + resp); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) + unic_err(unic_dev, "failed to query vl map, ret = %d.\n", ret); + + return ret; +} + /* vl_maxrate: byte per second */ int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, u16 vl_bitmap) diff --git a/drivers/net/ub/unic/unic_qos_hw.h b/drivers/net/ub/unic/unic_qos_hw.h index 9f6947463349..82822cc871b6 100644 --- a/drivers/net/ub/unic/unic_qos_hw.h +++ b/drivers/net/ub/unic/unic_qos_hw.h @@ -13,6 +13,8 @@ int unic_set_hw_vl_map(struct unic_dev *unic_dev, u8 *dscp_vl, u8 *prio_vl, u8 map_type); +int unic_query_vl_map(struct unic_dev *unic_dev, + struct unic_config_vl_map_cmd *resp); int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, u16 vl_bitmap); -- Gitee From fde2f28a626f10224e4433aec8590230e00af4ab Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Tue, 16 Sep 2025 11:55:53 +0800 Subject: [PATCH 060/243] net: unic: support config/query ets parameters commit d6b4c7258c23f655f7d0509c1cd80361abfc245d openEuler This patch enhances the UNIC driver's support for DCB (Data Center Bridging) by adding functionality to configure and query ETS (Enhanced Transmission Selection) parameters, as well as manage VLAN queues. The changes include: 1. New DCBnl Commands: - `ieee_getets`: Retrieves current ETS configuration. - `ieee_setets`: Sets ETS parameters, including priority-to-TC mappings, TC bandwidth, and TSA (Traffic Shaping Algorithm). 2. ETS Configuration Management: - Adds validation for ETS parameters to ensure compatibility with hardware capabilities. - Implements logic to update VLAN mappings and traffic scheduling based on ETS settings. 3. VLAN Queue Adjustments: - Introduces functions to detect changes in VLAN queue numbers and adjust RSS (Receive Side Scaling) queue sizes accordingly, optimizing network performance. 4. Integration with Existing Framework: - Registers new DCBnl commands with the netlink interface for configuration and querying. - Updates internal structures and functions to support the new ETS and VLAN queue management features. This enhancement improves the UNIC driver's ability to manage network traffic prioritization and scheduling, providing greater flexibility and performance in data center environments. Signed-off-by: Haiqing Fang Signed-off-by: Xiaobo Zhang Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/unic_dcbnl.c | 244 ++++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_dev.c | 22 +++ drivers/net/ub/unic/unic_dev.h | 12 ++ include/ub/ubase/ubase_comm_dev.h | 2 + 4 files changed, 280 insertions(+) diff --git a/drivers/net/ub/unic/unic_dcbnl.c b/drivers/net/ub/unic/unic_dcbnl.c index 9e44d74f3b38..2b50901d8434 100644 --- a/drivers/net/ub/unic/unic_dcbnl.c +++ b/drivers/net/ub/unic/unic_dcbnl.c @@ -5,11 +5,253 @@ */ #include +#include #include "unic_hw.h" #include "unic_netdev.h" #include "unic_dcbnl.h" +#define UNIC_PRIO_VL_MAP_CHANGED BIT(0) +#define UNIC_TC_TSA_CHANGED BIT(1) +#define UNIC_TC_BW_CHANGED BIT(2) + +static int unic_ets_prio_tc_validate(struct unic_dev *unic_dev, + struct ieee_ets *ets, u8 *changed, + u8 *vl_num) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + u32 max_queue = unic_channels_max_num(adev); + u8 i, max_vl = 0; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] != unic_dev->channels.vl.prio_vl[i]) + *changed |= UNIC_PRIO_VL_MAP_CHANGED; + + max_vl = max(max_vl, ets->prio_tc[i] + 1); + } + + if (max_vl > caps->vl_num) { + unic_err(unic_dev, "tc num(%u) can't exceed max tc(%u).\n", + max_vl, caps->vl_num); + return -EINVAL; + } + + if (unic_get_rss_vl_num(unic_dev, max_vl) > max_queue) { + unic_err(unic_dev, + "tc num can't exceed queue num(%u).\n", max_queue); + return -EINVAL; + } + + *vl_num = max_vl; + + return 0; +} + +static int unic_ets_tc_bw_validate(struct unic_dev *unic_dev, + struct ieee_ets *ets, u8 *changed) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + struct unic_vl *vl = &unic_dev->channels.vl; + /* continuous bitmap configured by the dcb tool */ + u16 tc_bitmap = (1 << caps->vl_num) - 1; + int ret; + u8 i; + + ret = ubase_check_qos_sch_param(adev, tc_bitmap, ets->tc_tx_bw, + ets->tc_tsa, false); + if (ret) + return ret; + + for (i = 0; i < caps->vl_num; i++) { + if (vl->vl_tsa[i] != ets->tc_tsa[i]) + *changed |= UNIC_TC_TSA_CHANGED; + + if (vl->vl_bw[i] != ets->tc_tx_bw[i]) + *changed |= UNIC_TC_BW_CHANGED; + } + + return 0; +} + +static int unic_setets_params_validate(struct unic_dev *unic_dev, + struct ieee_ets *ets, u8 *changed, + u8 *vl_num) +{ + int ret; + + ret = unic_ets_prio_tc_validate(unic_dev, ets, changed, vl_num); + if (ret) + return ret; + + return unic_ets_tc_bw_validate(unic_dev, ets, changed); +} + +static int unic_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + struct unic_vl *vl = &unic_dev->channels.vl; + + if (unic_resetting(ndev)) + return -EBUSY; + + if (!unic_dev_ets_supported(unic_dev)) + return -EOPNOTSUPP; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = caps->vl_num; + + memcpy(ets->prio_tc, vl->prio_vl, sizeof(ets->prio_tc)); + memcpy(ets->tc_tx_bw, vl->vl_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, vl->vl_tsa, sizeof(ets->tc_tsa)); + + return 0; +} + +static int unic_setets_preconditions(struct net_device *net_dev) +{ + struct unic_dev *unic_dev = netdev_priv(net_dev); + + if (!unic_dev_ets_supported(unic_dev)) + return -EOPNOTSUPP; + + if (netif_running(net_dev)) { + unic_err(unic_dev, + "failed to set ets, due to network interface is up, pls down it first and try again.\n"); + return -EBUSY; + } + + if (!(unic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (unic_resetting(net_dev)) + return -EBUSY; + + return 0; +} + +static int unic_handle_prio_vl_change(struct unic_dev *unic_dev, + struct ieee_ets *ets, u8 changed) +{ + struct unic_vl *vl = &unic_dev->channels.vl; + u8 map_type; + int ret; + + if (!(changed & UNIC_PRIO_VL_MAP_CHANGED)) + return 0; + + map_type = vl->dscp_app_cnt ? UNIC_DSCP_VL_MAP : UNIC_PRIO_VL_MAP; + ret = unic_set_vl_map(unic_dev, vl->dscp_prio, ets->prio_tc, + map_type); + if (ret) + return ret; + + memcpy(vl->prio_vl, ets->prio_tc, sizeof(ets->prio_tc)); + + return 0; +} + +static inline void unic_convert_vl_sch_bw(struct ubase_caps *caps, u8 *vl_bw, + struct ieee_ets *ets) +{ + u8 i; + + for (i = 0; i < caps->vl_num; i++) { + vl_bw[caps->req_vl[i]] = ets->tc_tx_bw[i]; + vl_bw[caps->resp_vl[i]] = ets->tc_tx_bw[i]; + } +} + +static int unic_handle_tm_vl_sch_change(struct unic_dev *unic_dev, + struct ubase_caps *caps, + struct ieee_ets *ets) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_vl *vl = &unic_dev->channels.vl; + u8 vl_tsa[UBASE_MAX_VL_NUM] = {0}; + u8 vl_bw[UBASE_MAX_VL_NUM] = {0}; + u32 i; + + unic_convert_vl_sch_bw(caps, vl_bw, ets); + + for (i = 0; i < caps->vl_num; i++) { + if (ets->tc_tsa[i]) { + vl_tsa[caps->req_vl[i]] = ets->tc_tsa[i]; + vl_tsa[caps->resp_vl[i]] = ets->tc_tsa[i]; + } + } + + return ubase_config_tm_vl_sch(adev, vl->vl_bitmap, vl_bw, vl_tsa); +} + +static int unic_handle_vl_tsa_bw_change(struct unic_dev *unic_dev, + struct ieee_ets *ets, u8 changed) +{ + u8 *vl_tsa = unic_dev->channels.vl.vl_tsa; + u8 *vl_bw = unic_dev->channels.vl.vl_bw; + int ret; + + struct ubase_caps *caps = ubase_get_dev_caps(unic_dev->comdev.adev); + + if (!(changed & UNIC_TC_TSA_CHANGED || changed & UNIC_TC_BW_CHANGED)) + return 0; + + ret = unic_handle_tm_vl_sch_change(unic_dev, caps, ets); + if (ret) + return ret; + + memcpy(vl_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(vl_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + + return 0; +} + +static int unic_setets_config(struct net_device *ndev, struct ieee_ets *ets, + u8 changed, u8 vl_num) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + ret = unic_handle_prio_vl_change(unic_dev, ets, changed); + if (ret) + return ret; + + ret = unic_handle_vl_tsa_bw_change(unic_dev, ets, changed); + if (ret) + return ret; + + unic_dev->channels.vl.vl_num = vl_num; + if (unic_rss_vl_num_changed(unic_dev, vl_num)) + return unic_update_channels(unic_dev, vl_num); + + return 0; +} + +static int unic_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + u8 changed = 0; + u8 vl_num = 0; + int ret; + + ret = unic_setets_preconditions(ndev); + if (ret) + return ret; + + ret = unic_setets_params_validate(unic_dev, ets, &changed, &vl_num); + if (ret) + return ret; + + if (!changed) + return 0; + + return unic_setets_config(ndev, ets, changed, vl_num); +} + static int unic_dscp_prio_check(struct net_device *netdev, struct dcb_app *app) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -147,6 +389,8 @@ static int unic_dcbnl_ieee_delapp(struct net_device *netdev, } static const struct dcbnl_rtnl_ops unic_dcbnl_ops = { + .ieee_getets = unic_dcbnl_ieee_getets, + .ieee_setets = unic_dcbnl_ieee_setets, .ieee_setapp = unic_dcbnl_ieee_setapp, .ieee_delapp = unic_dcbnl_ieee_delapp, }; diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index 7ed3525837e9..ef79194c24bb 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -905,6 +905,13 @@ void unic_remove_period_task(struct unic_dev *unic_dev) cancel_delayed_work_sync(&unic_dev->service_task); } +bool unic_rss_vl_num_changed(struct unic_dev *unic_dev, u8 vl_num) +{ + struct unic_channels *channels = &unic_dev->channels; + + return channels->rss_vl_num != unic_get_rss_vl_num(unic_dev, vl_num); +} + int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, u32 org_rss_size) { @@ -932,6 +939,21 @@ int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, return ret; } +int unic_update_channels(struct unic_dev *unic_dev, u8 vl_num) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_channels *channels = &unic_dev->channels; + u32 new_rss_size, old_rss_size = channels->rss_size; + + channels->rss_vl_num = unic_get_rss_vl_num(unic_dev, vl_num); + if (old_rss_size * channels->rss_vl_num > unic_channels_max_num(adev)) + new_rss_size = unic_get_max_rss_size(unic_dev); + else + new_rss_size = old_rss_size; + + return unic_change_rss_size(unic_dev, new_rss_size, old_rss_size); +} + static struct net_device *unic_alloc_netdev(struct auxiliary_device *adev) { struct ubase_caps *caps = ubase_get_dev_caps(adev); diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index af20b72168ee..a20744b810e8 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -257,6 +257,7 @@ int unic_init_channels(struct unic_dev *unic_dev, u32 channels_num); void unic_uninit_channels(struct unic_dev *unic_dev); void unic_start_period_task(struct net_device *netdev); void unic_remove_period_task(struct unic_dev *unic_dev); +void unic_update_queue_info(struct unic_dev *unic_dev); int unic_init_wq(void); void unic_destroy_wq(void); u16 unic_cqe_period_round_down(u16 cqe_period); @@ -264,8 +265,10 @@ int unic_init_rx(struct unic_dev *unic_dev, u32 num); int unic_init_tx(struct unic_dev *unic_dev, u32 num); void unic_destroy_rx(struct unic_dev *unic_dev, u32 num); void unic_destroy_tx(struct unic_dev *unic_dev, u32 num); +bool unic_rss_vl_num_changed(struct unic_dev *unic_dev, u8 vl_num); int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, u32 org_rss_size); +int unic_update_channels(struct unic_dev *unic_dev, u8 vl_num); int unic_set_vl_map(struct unic_dev *unic_dev, u8 *dscp_prio, u8 *prio_vl, u8 map_type); int unic_dbg_log(void); @@ -357,6 +360,15 @@ static inline u32 unic_read_reg(struct unic_dev *unic_dev, u32 reg) return readl(reg_addr + reg); } +static inline u8 unic_get_rss_vl_num(struct unic_dev *unic_dev, u8 max_vl) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_adev_qos *qos = ubase_get_adev_qos(adev); + u8 vl_num = min(UNIC_RSS_MAX_VL_NUM, qos->nic_vl_num); + + return max_vl < vl_num ? max_vl : vl_num; +} + static inline u32 unic_get_sq_cqe_mask(struct unic_dev *unic_dev) { return unic_dev->channels.sq_cqe_depth - 1; diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 81b2c98af17f..ac85c20311dd 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -24,6 +24,8 @@ struct iova_slot; #error "UBASE_MAX_VL_NUM can't less than IEEE_8021QAZ_MAX_TCS" #endif +#define UBASE_NIC_MAX_VL_NUM (2) + #define UBASE_SUP_UBL BIT(0) #define UBASE_SUP_ETH BIT(1) #define UBASE_SUP_UNIC BIT(2) -- Gitee From 62fd812338a2ecb84b7b3e70a12ea8044d6578d0 Mon Sep 17 00:00:00 2001 From: Xiaobo Zhang Date: Mon, 1 Sep 2025 13:39:35 +0800 Subject: [PATCH 061/243] net: unic: support config/query traffic class parameters commit daea31ba9a6cd73a57dff9769b34b4d3fd673017 openEuler This patch enhances the UNIC driver's DCB (Data Center Bridging) support by adding the following features: 1. DCBX Mode Configuration: - Introduces `unic_dcbnl_getdcbx` and `unic_dcbnl_setdcbx` functions to retrieve and set the DCBX mode, enabling compatibility with different DCBX versions and management modes. 2. Max Rate Management: - Adds `unic_ieee_getmaxrate` and `unic_ieee_setmaxrate` functions to query and configure the maximum transmission rates for each Traffic Class (TC), ensuring optimal network performance. - Includes `unic_check_maxrate` to validate rate settings, ensuring they fall within supported hardware limits. 3. Integration with DCBnl Interface: - Registers the new functions with the DCBnl netlink interface, allowing users to configure and query DCBX and max rate settings via standard network management tools. These enhancements improve the UNIC driver's ability to manage advanced QoS configurations, providing greater flexibility and control over network traffic prioritization and rate limiting. Signed-off-by: Haiqing Fang Signed-off-by: Xiaobo Zhang Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/unic_dcbnl.c | 93 ++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/drivers/net/ub/unic/unic_dcbnl.c b/drivers/net/ub/unic/unic_dcbnl.c index 2b50901d8434..5f14cebac540 100644 --- a/drivers/net/ub/unic/unic_dcbnl.c +++ b/drivers/net/ub/unic/unic_dcbnl.c @@ -388,11 +388,104 @@ static int unic_dcbnl_ieee_delapp(struct net_device *netdev, return unic_del_app(netdev, app, unic_dev, vl); } +static u8 unic_dcbnl_getdcbx(struct net_device *ndev) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + return unic_dev->dcbx_cap; +} + +static u8 unic_dcbnl_setdcbx(struct net_device *ndev, u8 mode) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + unic_info(unic_dev, "set dcbx mode = %u\n.", mode); + + unic_dev->dcbx_cap = mode; + + return 0; +} + +static int unic_ieee_getmaxrate(struct net_device *ndev, + struct ieee_maxrate *maxrate) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (!unic_dev_ets_supported(unic_dev) || + !unic_dev_tc_speed_limit_supported(unic_dev)) + return -EOPNOTSUPP; + + memcpy(maxrate->tc_maxrate, unic_dev->channels.vl.vl_maxrate, + sizeof(maxrate->tc_maxrate)); + return 0; +} + +static int unic_check_maxrate(struct unic_dev *unic_dev, + struct ieee_maxrate *maxrate) +{ + u32 max_speed = unic_dev->hw.mac.max_speed; + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (!maxrate->tc_maxrate[i]) + continue; + + if (maxrate->tc_maxrate[i] / UNIC_MBYTE_PER_SEND > max_speed || + maxrate->tc_maxrate[i] < UNIC_MBYTE_PER_SEND) { + unic_err(unic_dev, + "invalid max_rate(%llubit), the range is [1Mbit, %uMbit].\n", + maxrate->tc_maxrate[i] * BITS_PER_BYTE, + max_speed); + return -EINVAL; + } + } + + return 0; +} + +static int unic_ieee_setmaxrate(struct net_device *ndev, + struct ieee_maxrate *maxrate) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_vl *vl = &unic_dev->channels.vl; + int ret; + + if (!unic_dev_ets_supported(unic_dev) || + !unic_dev_tc_speed_limit_supported(unic_dev)) + return -EOPNOTSUPP; + + if (!(unic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + ret = unic_check_maxrate(unic_dev, maxrate); + if (ret) + return ret; + + ret = unic_config_vl_rate_limit(unic_dev, maxrate->tc_maxrate, + vl->vl_bitmap); + if (ret) + return ret; + + memcpy(vl->vl_maxrate, maxrate->tc_maxrate, + sizeof(maxrate->tc_maxrate)); + + return 0; +} + static const struct dcbnl_rtnl_ops unic_dcbnl_ops = { .ieee_getets = unic_dcbnl_ieee_getets, .ieee_setets = unic_dcbnl_ieee_setets, + .ieee_getmaxrate = unic_ieee_getmaxrate, + .ieee_setmaxrate = unic_ieee_setmaxrate, .ieee_setapp = unic_dcbnl_ieee_setapp, .ieee_delapp = unic_dcbnl_ieee_delapp, + .getdcbx = unic_dcbnl_getdcbx, + .setdcbx = unic_dcbnl_setdcbx, }; void unic_set_dcbnl_ops(struct net_device *netdev) -- Gitee From d7ee0f48247bfb8a1feae4d80d9a5f4e69d0e7e2 Mon Sep 17 00:00:00 2001 From: Haibin Lu Date: Tue, 16 Sep 2025 16:30:21 +0800 Subject: [PATCH 062/243] net: unic: support subscribes to the RX stream stop and recovery interface. commit 58daddb6f308b8a305b2fe3b3d35065b150aac4e openEuler The ub system allows the aux driver to deliver the deactivate/activate dev command to ensure system robustness. The deactivate/activate command requires close cooperation between components. This patch allows the unic to complete its configuration when deactivate or deactivate. Signed-off-by: Zhenyu Wan Signed-off-by: Xiaobo Zhang Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/unic_event.c | 101 +++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index faa91a99badc..1785e0aad7f1 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -53,6 +53,105 @@ int unic_comp_handler(struct notifier_block *nb, unsigned long jfcn, void *data) return 0; } +static void unic_activate_event_process(struct unic_dev *unic_dev) +{ + struct unic_act_info *act_info = &unic_dev->act_info; + struct net_device *netdev = unic_dev->comdev.netdev; + int ret; + + if (test_bit(UNIC_STATE_DISABLED, &unic_dev->state)) { + unic_err(unic_dev, + "failed to process activate event, device is not ready.\n"); + return; + } + + rtnl_lock(); + + if (!test_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state)) + goto unlock; + + /* if network interface has already been stopped, + * no need to open by activate event + */ + if (test_bit(UNIC_STATE_DOWN, &unic_dev->state)) + goto out; + + ret = unic_net_open_no_link_change(netdev); + if (ret) + unic_warn(unic_dev, "failed to open net, ret = %d.\n", ret); + + ret = unic_activate_promisc_mode(unic_dev, true); + if (ret) + unic_warn(unic_dev, "failed to open promisc, ret = %d.\n", ret); + else + clear_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &unic_dev->vport.state); + +out: + mutex_lock(&act_info->mutex); + act_info->deactivate = false; + mutex_unlock(&act_info->mutex); + clear_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state); +unlock: + rtnl_unlock(); +} + +static void unic_deactivate_event_process(struct unic_dev *unic_dev) +{ + struct unic_act_info *act_info = &unic_dev->act_info; + struct net_device *netdev = unic_dev->comdev.netdev; + int ret; + + if (test_bit(UNIC_STATE_DISABLED, &unic_dev->state)) { + unic_err(unic_dev, + "failed to process deactivate event, device is not ready.\n"); + return; + } + + rtnl_lock(); + + if (test_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state)) + goto unlock; + + /* when deactivate event occurs, set flag to true to prevent + * periodic tasks changing promisc + */ + mutex_lock(&act_info->mutex); + act_info->deactivate = true; + mutex_unlock(&act_info->mutex); + + ret = unic_activate_promisc_mode(unic_dev, false); + if (ret) + unic_warn(unic_dev, "failed to close promisc, ret = %d.\n", ret); + else + set_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &unic_dev->vport.state); + + /* if network interface has already been stopped, + * no need to stop again by deactivate event + */ + if (test_bit(UNIC_STATE_DOWN, &unic_dev->state)) + goto out; + + unic_net_stop_no_link_change(netdev); + +out: + set_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state); +unlock: + rtnl_unlock(); +} + +static void unic_activate_handler(struct auxiliary_device *adev, bool activate) +{ + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + + unic_info(unic_dev, "receive %s event callback.\n", + activate ? "activate" : "deactivate"); + + if (activate) + unic_activate_event_process(unic_dev); + else + unic_deactivate_event_process(unic_dev); +} + static void unic_rack_port_reset(struct unic_dev *unic_dev, bool link_up) { if (link_up) @@ -229,6 +328,7 @@ int unic_register_event(struct auxiliary_device *adev) ubase_port_register(adev, unic_port_handler); ubase_reset_register(adev, unic_reset_handler); + ubase_activate_register(adev, unic_activate_handler); return 0; @@ -242,6 +342,7 @@ int unic_register_event(struct auxiliary_device *adev) void unic_unregister_event(struct auxiliary_device *adev) { + ubase_activate_unregister(adev); ubase_reset_unregister(adev); ubase_port_unregister(adev); unic_unregister_ctrlq_event(adev, ARRAY_SIZE(unic_ctrlq_events)); -- Gitee From d7c7bedd40c79775d273fc97d8f00a2ae31650ae Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Sat, 23 Aug 2025 15:53:06 +0800 Subject: [PATCH 063/243] net: unic: Debugfs supports querying IP specifications and IP entries. commit 48ffd57b4028d48472921c13d77630dc7f5b8777 openEuler This patch adds the ability to query IP table entries and their specifications. Users can use standard tools to query IP table entries and their specifications. Signed-off-by: Xiongchuan Zhou --- drivers/net/ub/unic/Makefile | 2 +- drivers/net/ub/unic/debugfs/unic_debugfs.c | 19 ++++++ drivers/net/ub/unic/debugfs/unic_debugfs.h | 1 + .../net/ub/unic/debugfs/unic_entry_debugfs.c | 59 +++++++++++++++++++ .../net/ub/unic/debugfs/unic_entry_debugfs.h | 16 +++++ 5 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ub/unic/debugfs/unic_entry_debugfs.c create mode 100644 drivers/net/ub/unic/debugfs/unic_entry_debugfs.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index fa5de4255e68..041815219e0c 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,5 +9,5 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o -unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o +unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o debugfs/unic_entry_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index f9cc6e887ce0..63703934613d 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -14,6 +14,7 @@ #include "unic_dev.h" #include "unic_hw.h" #include "unic_qos_debugfs.h" +#include "unic_entry_debugfs.h" #include "unic_debugfs.h" static int unic_dbg_dump_dev_info(struct seq_file *s, void *data) @@ -258,6 +259,10 @@ static bool unic_dbg_dentry_support(struct device *dev, u32 property) static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { { + .name = "ip_tbl", + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + }, { .name = "context", .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, .support = unic_dbg_dentry_support, @@ -276,6 +281,20 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { { + .name = "ip_tbl_spec", + .dentry_index = UNIC_DBG_DENTRY_IP, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_ip_tbl_spec, + }, { + .name = "ip_tbl_list", + .dentry_index = UNIC_DBG_DENTRY_IP, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_ip_tbl_list, + }, { .name = "jfs_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 065630fbd35e..73a75b091b4b 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -13,6 +13,7 @@ #define unic_get_ubase_root_dentry(adev) ubase_diag_debugfs_root(adev) enum unic_dbg_dentry_type { + UNIC_DBG_DENTRY_IP = 0, UNIC_DBG_DENTRY_CONTEXT, UNIC_DBG_DENTRY_QOS, /* must be the last entry. */ diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c new file mode 100644 index 000000000000..74b5fdb95aaa --- /dev/null +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include + +#include "unic_comm_addr.h" +#include "unic_dev.h" +#include "unic_debugfs.h" +#include "unic_entry_debugfs.h" + +static const char * const unic_entry_state_str[] = { + "TO_ADD", "TO_DEL", "ACTIVE" +}; + +int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + u32 total_ip_tbl_size, total_ue_num; + struct ubase_caps *ubase_caps; + + ubase_caps = ubase_get_dev_caps(unic_dev->comdev.adev); + total_ue_num = ubase_caps->total_ue_num; + total_ip_tbl_size = unic_dev->caps.total_ip_tbl_size; + + seq_printf(s, "total_ue_num\t: %u\n", total_ue_num); + seq_printf(s, "total_ip_tbl_size\t: %u\n", total_ip_tbl_size); + + return 0; +} + +int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_comm_addr_node *ip_node; + struct unic_addr_tbl *ip_tbl; + struct list_head *list; + u16 i = 0; + + seq_printf(s, "No %-43sSTATE IP_MASK\n", "IP_ADDR"); + + ip_tbl = &unic_dev->vport.addr_tbl; + list = &ip_tbl->ip_list; + spin_lock_bh(&ip_tbl->ip_list_lock); + list_for_each_entry(ip_node, list, node) { + seq_printf(s, "%-4d", i++); + seq_printf(s, "%-43pI6c", &ip_node->ip_addr.s6_addr); + seq_printf(s, "%-9s", unic_entry_state_str[ip_node->state]); + seq_printf(s, "%-3u", ip_node->node_mask); + + seq_puts(s, "\n"); + } + spin_unlock_bh(&ip_tbl->ip_list_lock); + + return 0; +} diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h new file mode 100644 index 000000000000..73ab85f4d5f3 --- /dev/null +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_ENTRY_DEBUGFS_H__ +#define __UNIC_ENTRY_DEBUGFS_H__ + +#include +#include + +int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data); +int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data); + +#endif /* _UNIC_ENTRY_DEBUGFS_H */ -- Gitee From f60626c8929b281877bd526665e9b150860bd084 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Thu, 20 Nov 2025 11:25:40 +0800 Subject: [PATCH 064/243] ub:hisi-ubus: Adding compatibility Interfaces for ub memory commit 56bce93e4090d11862d320e1025418bad27d1f45 openEuler Adding southbound and northbound compatibility Interfaces for ub memory Signed-off-by: Jianquan Lin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubus/memory.c | 4 +- drivers/ub/ubus/memory.h | 4 +- drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h | 4 + drivers/ub/ubus/vendor/hisilicon/memory.c | 90 +++++++++++++++----- 4 files changed, 79 insertions(+), 23 deletions(-) diff --git a/drivers/ub/ubus/memory.c b/drivers/ub/ubus/memory.c index e7b3144db4bd..e7fcdf76f28e 100644 --- a/drivers/ub/ubus/memory.c +++ b/drivers/ub/ubus/memory.c @@ -114,7 +114,7 @@ void ub_mem_drain_start(u32 scna) } if (mem_device->ops && mem_device->ops->mem_drain_start) - mem_device->ops->mem_drain_start(mem_device); + mem_device->ops->mem_drain_start(ubc); else dev_warn(mem_device->dev, "ub mem_device ops mem_drain_start is null.\n"); } @@ -138,7 +138,7 @@ int ub_mem_drain_state(u32 scna) } if (mem_device->ops && mem_device->ops->mem_drain_state) - return mem_device->ops->mem_drain_state(mem_device); + return mem_device->ops->mem_drain_state(ubc); dev_warn(mem_device->dev, "ub memory decoder ops mem_drain_state is null.\n"); return 0; diff --git a/drivers/ub/ubus/memory.h b/drivers/ub/ubus/memory.h index 7c841b466f3e..f96f7a290616 100644 --- a/drivers/ub/ubus/memory.h +++ b/drivers/ub/ubus/memory.h @@ -29,8 +29,8 @@ struct ub_mem_ras_ctx { }; struct ub_mem_device_ops { - void (*mem_drain_start)(struct ub_mem_device *mem_device); - int (*mem_drain_state)(struct ub_mem_device *mem_device); + void (*mem_drain_start)(struct ub_bus_controller *ubc); + int (*mem_drain_state)(struct ub_bus_controller *ubc); bool (*mem_validate_pa)(struct ub_bus_controller *ubc, u64 pa_start, u64 pa_end, bool cacheable); diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h index 9aa3ba5521c1..092695e9d43c 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h @@ -12,6 +12,10 @@ #define MB_SIZE_OFFSET 20 #define HI_UBC_PRIVATE_DATA_RESERVED 3 #define HI_UBC_PRIVATE_DATA_RESERVED2 111 +#define UB_MEM_VERSION_INVALID 0xffffffff +#define UB_MEM_VERSION_0 0 +#define UB_MEM_VERSION_1 1 +#define UB_MEM_VERSION_2 2 struct hi_mem_pa_info { u64 decode_addr; diff --git a/drivers/ub/ubus/vendor/hisilicon/memory.c b/drivers/ub/ubus/vendor/hisilicon/memory.c index 4d4f80f847fc..fa9747171eea 100644 --- a/drivers/ub/ubus/vendor/hisilicon/memory.c +++ b/drivers/ub/ubus/vendor/hisilicon/memory.c @@ -24,9 +24,13 @@ #define MEM_EVENT_MAX_NUM 16 #define MAR_ERR_ADDR_COUNT 10 #define MAR_ERR_ADDR_SIZE 2 +#define MEM_DECODER_NUMBER_V1 5 +#define MEM_DECODER_NUMBER_V2 2 #define hpa_gen(addr_h, addr_l) (((u64)(addr_h) << 32) | (addr_l)) +static u8 ub_mem_num; + struct ub_mem_decoder { struct device *dev; struct ub_entity *uent; @@ -58,25 +62,26 @@ struct hi_get_ubmem_event_pld { static bool hi_mem_validate_pa(struct ub_bus_controller *ubc, u64 pa_start, u64 pa_end, bool cacheable); -static void hi_mem_drain_start(struct ub_mem_device *mem_device) +static void hi_mem_drain_start(struct ub_bus_controller *ubc) { - struct ub_mem_decoder *decoder, *data = mem_device->priv_data; + struct ub_mem_decoder *decoder, *data = ubc->mem_device->priv_data; if (!data) { - dev_err(mem_device->dev, "ubc mem_decoder is null.\n"); + dev_err(&ubc->dev, "ubc mem_decoder is null.\n"); return; } - for (int i = 0; i < MEM_INFO_NUM; i++) { + for (int i = 0; i < ub_mem_num; i++) { decoder = &data[i]; writel(0, decoder->base_reg + DRAIN_ENABLE_REG_OFFSET); writel(1, decoder->base_reg + DRAIN_ENABLE_REG_OFFSET); } } -static int hi_mem_drain_state(struct ub_mem_device *mem_device) +static int hi_mem_drain_state(struct ub_bus_controller *ubc) { - struct ub_mem_decoder *decoder, *data = mem_device->priv_data; + struct ub_mem_decoder *decoder, *data = ubc->mem_device->priv_data; + struct ub_mem_device *mem_device = ubc->mem_device; int val = 0; if (!data) { @@ -84,7 +89,7 @@ static int hi_mem_drain_state(struct ub_mem_device *mem_device) return 0; } - for (int i = 0; i < MEM_INFO_NUM; i++) { + for (int i = 0; i < ub_mem_num; i++) { decoder = &data[i]; val = readb(decoder->base_reg + DRAIN_STATE_REG_OFFSET) & 0x1; dev_info_ratelimited(decoder->dev, "ub memory decoder[%d] drain state, val=%d\n", @@ -246,16 +251,25 @@ static irqreturn_t hi_mem_ras_irq(int irq, void *context) return IRQ_WAKE_THREAD; } -static int hi_mem_decoder_create_one(struct ub_bus_controller *ubc, int mar_id) +static bool is_ub_mem_version_valid(struct ub_bus_controller *ubc) +{ + struct hi_ubc_private_data *data = ubc->data; + + if (!data || data->ub_mem_version == UB_MEM_VERSION_INVALID) + return false; + return true; +} + +static int hi_mem_decoder_create_one(struct ub_bus_controller *ubc, int index) { - struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; struct ub_mem_decoder *decoder, *priv_data = ubc->mem_device->priv_data; + struct hi_ubc_private_data *data = ubc->data; - decoder = &priv_data[mar_id]; + decoder = &priv_data[index]; decoder->dev = &ubc->dev; decoder->uent = ubc->uent; - decoder->base_reg = ioremap(data->mem_pa_info[mar_id].decode_addr, + decoder->base_reg = ioremap(data->mem_pa_info[index].decode_addr, SZ_64); if (!decoder->base_reg) { dev_err(decoder->dev, "ub mem decoder base reg ioremap failed.\n"); @@ -265,24 +279,47 @@ static int hi_mem_decoder_create_one(struct ub_bus_controller *ubc, int mar_id) return 0; } -static void hi_mem_decoder_remove_one(struct ub_bus_controller *ubc, int mar_id) +static void hi_mem_decoder_remove_one(struct ub_bus_controller *ubc, int index) { struct ub_mem_decoder *priv_data = ubc->mem_device->priv_data; - iounmap(priv_data[mar_id].base_reg); + iounmap(priv_data[index].base_reg); +} + +static u8 get_mem_decoder_number(struct hi_ubc_private_data *data) +{ + switch (data->ub_mem_version) { + case UB_MEM_VERSION_0: + case UB_MEM_VERSION_1: + return MEM_DECODER_NUMBER_V1; + case UB_MEM_VERSION_2: + return MEM_DECODER_NUMBER_V2; + default: + return 0; + } } int hi_mem_decoder_create(struct ub_bus_controller *ubc) { struct ub_mem_device *mem_device; + struct hi_ubc_private_data *data; void *priv_data; int ret; + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to create mem decoder\n"); + return 0; + } + + ub_mem_num = get_mem_decoder_number(data); + if (!ub_mem_num) + return -EINVAL; + mem_device = kzalloc(sizeof(*mem_device), GFP_KERNEL); if (!mem_device) return -ENOMEM; - priv_data = kcalloc(MEM_INFO_NUM, sizeof(struct ub_mem_decoder), + priv_data = kcalloc(ub_mem_num, sizeof(struct ub_mem_decoder), GFP_KERNEL); if (!priv_data) { kfree(mem_device); @@ -296,7 +333,7 @@ int hi_mem_decoder_create(struct ub_bus_controller *ubc) mem_device->priv_data = priv_data; ubc->mem_device = mem_device; - for (int i = 0; i < MEM_INFO_NUM; i++) { + for (int i = 0; i < ub_mem_num; i++) { ret = hi_mem_decoder_create_one(ubc, i); if (ret) { dev_err(&ubc->dev, "hi mem create decoder %d failed\n", i); @@ -318,7 +355,12 @@ void hi_mem_decoder_remove(struct ub_bus_controller *ubc) if (!ubc->mem_device) return; - for (int i = 0; i < MEM_INFO_NUM; i++) + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to remove mem decoder\n"); + return; + } + + for (int i = 0; i < ub_mem_num; i++) hi_mem_decoder_remove_one(ubc, i); kfree(ubc->mem_device->priv_data); @@ -333,7 +375,12 @@ void hi_register_ubmem_irq(struct ub_bus_controller *ubc) u32 usi_idx; if (!ubc->mem_device) { - pr_err("mem device is NULL!\n"); + pr_err("register ubmem irq failed, mem device is NULL!\n"); + return; + } + + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to register_ubmem_irq\n"); return; } @@ -371,6 +418,11 @@ void hi_unregister_ubmem_irq(struct ub_bus_controller *ubc) return; } + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to unregister_ubmem_irq\n"); + return; + } + irq_num = ubc->mem_device->ubmem_irq_num; if (irq_num < 0) return; @@ -404,8 +456,8 @@ static bool hi_mem_validate_pa(struct ub_bus_controller *ubc, return false; } - data = (struct hi_ubc_private_data *)ubc->data; - for (u16 i = 0; i < MEM_INFO_NUM; i++) { + data = ubc->data; + for (u16 i = 0; i < ub_mem_num; i++) { if (ub_hpa_valid(pa_start, pa_end, data->mem_pa_info[i].cc_base_addr, data->mem_pa_info[i].cc_base_size) && -- Gitee From 838b18a08ac744b6145e187a4dbdf51ac8906ce5 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:11:22 +0800 Subject: [PATCH 065/243] ub: udma: Support query jfs and jetty context from hw. commit ba0d9456a61f3f559ea978da1639302deb0182c7 openEuler This patch adds the ability to query jfs and jetty context from hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dfx.c | 281 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_dfx.h | 4 + drivers/ub/urma/hw/udma/udma_jetty.h | 143 ++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 50 +++++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 480 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c index f6920f879eea..5580fc474b9d 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.c +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -13,6 +13,172 @@ bool dfx_switch; +static int to_udma_trans_mode(uint32_t type, struct udma_dev *dev, + enum ubcore_transport_mode *trans_mode) +{ + switch (type) { + case JETTY_UM: + *trans_mode = UBCORE_TP_UM; + break; + case JETTY_RC: + *trans_mode = UBCORE_TP_RC; + break; + case JETTY_RM: + *trans_mode = UBCORE_TP_RM; + break; + default: + dev_err(dev->dev, "transport mode error, type = %u.\n", type); + return -EINVAL; + } + + return 0; +} + +static int to_udma_jetty_ctx_state(uint32_t state, struct udma_dev *dev, + enum ubcore_jetty_state *jetty_state) +{ + switch (state) { + case JETTY_RESET: + *jetty_state = UBCORE_JETTY_STATE_RESET; + break; + case JETTY_READY: + *jetty_state = UBCORE_JETTY_STATE_READY; + break; + case JETTY_ERROR: + *jetty_state = UBCORE_JETTY_STATE_ERROR; + break; + case JETTY_SUSPEND: + *jetty_state = UBCORE_JETTY_STATE_SUSPENDED; + break; + default: + dev_err(dev->dev, "JFS context state error, state = %u.\n", state); + return -EINVAL; + } + + return 0; +} + +int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jetty_ctx *jfs_ctx; + uint32_t wqe_bb_depth; + int ret; + + mbox_attr.tag = jfs->jfs_id.id; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfs_ctx = (struct udma_jetty_ctx *)mailbox->buf; + + ret = to_udma_jetty_ctx_state(jfs_ctx->state, udma_dev, &attr->state); + if (ret) + goto err_jfs_ctx; + + cfg->priority = jfs_ctx->sl; + cfg->flag = jfs->jfs_cfg.flag; + cfg->max_sge = jfs->jfs_cfg.max_sge; + cfg->max_rsge = jfs->jfs_cfg.max_rsge; + cfg->err_timeout = jfs_ctx->ta_timeout; + wqe_bb_depth = 1 << jfs_ctx->sqe_bb_shift; + cfg->depth = wqe_bb_depth / udma_jfs->sq.sqe_bb_cnt; + cfg->rnr_retry = jfs_ctx->rnr_retry_num; + cfg->max_inline_data = jfs->jfs_cfg.max_inline_data; + + ret = to_udma_trans_mode(jfs_ctx->type, udma_dev, &cfg->trans_mode); + if (ret) + goto err_jfs_ctx; + + if (udma_jfs->sq.buf.kva) { + cfg->jfc = jfs->jfs_cfg.jfc; + cfg->eid_index = jfs_ctx->seid_idx; + } + +err_jfs_ctx: + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + +int udma_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + struct ubase_mbx_attr jfr_mbox_attr = {}; + struct ubase_cmd_mailbox *jetty_mailbox; + struct ubase_cmd_mailbox *jfr_mailbox; + struct ubase_mbx_attr mbox_attr = {}; + struct udma_jetty_ctx *jetty_ctx; + struct udma_jfr_ctx *jfr_ctx; + uint32_t wqe_bb_depth; + int ret; + + mbox_attr.tag = jetty->jetty_id.id; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + jetty_mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!jetty_mailbox) + return -ENOMEM; + + jfr_mbox_attr.tag = udma_jetty->jfr->ubcore_jfr.jfr_id.id; + jfr_mbox_attr.op = UDMA_CMD_QUERY_JFR_CONTEXT; + jfr_mailbox = udma_mailbox_query_ctx(udma_dev, &jfr_mbox_attr); + if (!jfr_mailbox) { + udma_free_cmd_mailbox(udma_dev, jetty_mailbox); + return -ENOMEM; + } + + jetty_ctx = (struct udma_jetty_ctx *)jetty_mailbox->buf; + jfr_ctx = (struct udma_jfr_ctx *)jfr_mailbox->buf; + + wqe_bb_depth = 1 << jetty_ctx->sqe_bb_shift; + cfg->id = jetty->jetty_id.id; + cfg->jfs_depth = wqe_bb_depth / udma_jetty->sq.sqe_bb_cnt; + cfg->jfr_depth = 1 << jfr_ctx->rqe_shift; + cfg->flag = jetty->jetty_cfg.flag; + cfg->max_send_sge = jetty->jetty_cfg.max_send_sge; + cfg->max_send_rsge = jetty->jetty_cfg.max_send_rsge; + cfg->max_recv_sge = jetty->jetty_cfg.max_recv_sge; + cfg->max_inline_data = jetty->jetty_cfg.max_inline_data; + cfg->priority = jetty_ctx->sl; + cfg->rnr_retry = jetty_ctx->rnr_retry_num; + cfg->err_timeout = jetty_ctx->ta_timeout; + cfg->min_rnr_timer = jetty->jetty_cfg.min_rnr_timer; + + ret = to_udma_trans_mode(jetty_ctx->type, udma_dev, &cfg->trans_mode); + if (ret) + goto err_jetty_ctx; + + cfg->token_value.token = 0; + + ret = to_udma_jetty_ctx_state(jetty_ctx->state, udma_dev, &attr->state); + if (ret) + goto err_jetty_ctx; + + attr->rx_threshold = to_udma_rx_threshold(jfr_ctx->limit_wl); + + if (udma_jetty->sq.buf.kva) { + cfg->eid_index = jetty_ctx->seid_idx; + cfg->send_jfc = jetty->jetty_cfg.send_jfc; + cfg->recv_jfc = jetty->jetty_cfg.recv_jfc; + cfg->jfr = jetty->jetty_cfg.jfr; + cfg->jetty_grp = jetty->jetty_cfg.jetty_grp; + } + +err_jetty_ctx: + jfr_ctx->token_value = 0; + udma_free_cmd_mailbox(udma_dev, jfr_mailbox); + udma_free_cmd_mailbox(udma_dev, jetty_mailbox); + + return ret; +} + static int udma_query_res_list(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, struct ubcore_res_val *val, @@ -139,6 +305,119 @@ static int udma_query_res_rc(struct udma_dev *udma_dev, return 0; } +static int udma_query_res_jetty(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jetty_val *res_jetty = (struct ubcore_res_jetty_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + enum ubcore_jetty_state jetty_state; + struct ubase_cmd_mailbox *mailbox; + struct udma_jetty_ctx *jettyc; + struct udma_dfx_jetty *jetty; + int ret; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jetty, val, "jetty"); + + read_lock(&udma_dev->dfx_info->jetty.rwlock); + jetty = (struct udma_dfx_jetty *)xa_load(&udma_dev->dfx_info->jetty.table, key->key); + if (!jetty) { + read_unlock(&udma_dev->dfx_info->jetty.rwlock); + dev_err(udma_dev->dev, "failed to query jetty, jetty_id = %u.\n", + key->key); + return -EINVAL; + } + res_jetty->jfs_depth = jetty->jfs_depth; + read_unlock(&udma_dev->dfx_info->jetty.rwlock); + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jettyc = (struct udma_jetty_ctx *)mailbox->buf; + res_jetty->jetty_id = key->key; + + ret = to_udma_jetty_ctx_state(jettyc->state, udma_dev, &jetty_state); + if (ret) + goto err_res_jetty_ctx; + + res_jetty->state = jetty_state; + res_jetty->recv_jfc_id = jettyc->rx_jfcn; + res_jetty->send_jfc_id = jettyc->tx_jfcn; + res_jetty->priority = jettyc->sl; + res_jetty->jfr_id = jettyc->jfrn_l | + jettyc->jfrn_h << JETTY_CTX_JFRN_H_OFFSET; + jettyc->sqe_base_addr_l = 0; + jettyc->sqe_base_addr_h = 0; + jettyc->user_data_l = 0; + jettyc->user_data_h = 0; + + udma_dfx_ctx_print(udma_dev, "Jetty", key->key, sizeof(*jettyc) / sizeof(uint32_t), + (uint32_t *)jettyc); +err_res_jetty_ctx: + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + +static int udma_query_res_jfs(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jfs_val *res_jfs = (struct ubcore_res_jfs_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + enum ubcore_jetty_state jfs_state; + struct ubase_cmd_mailbox *mailbox; + struct udma_jetty_ctx *jfsc; + struct udma_dfx_jfs *jfs; + int ret; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jfs, val, "jfs"); + + read_lock(&udma_dev->dfx_info->jfs.rwlock); + jfs = (struct udma_dfx_jfs *)xa_load(&udma_dev->dfx_info->jfs.table, key->key); + if (!jfs) { + read_unlock(&udma_dev->dfx_info->jfs.rwlock); + dev_err(udma_dev->dev, "failed to query jfs, jfs_id = %u.\n", + key->key); + return -EINVAL; + } + res_jfs->depth = jfs->depth; + read_unlock(&udma_dev->dfx_info->jfs.rwlock); + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfsc = (struct udma_jetty_ctx *)mailbox->buf; + res_jfs->jfs_id = key->key; + + ret = to_udma_jetty_ctx_state(jfsc->state, udma_dev, &jfs_state); + if (ret) + goto err_res_jetty_ctx; + + res_jfs->state = jfs_state; + res_jfs->priority = jfsc->sl; + res_jfs->jfc_id = jfsc->tx_jfcn; + jfsc->sqe_base_addr_l = 0; + jfsc->sqe_base_addr_h = 0; + jfsc->user_data_l = 0; + jfsc->user_data_h = 0; + + udma_dfx_ctx_print(udma_dev, "JFS", key->key, sizeof(*jfsc) / sizeof(uint32_t), + (uint32_t *)jfsc); +err_res_jetty_ctx: + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + static int udma_query_res_seg(struct udma_dev *udma_dev, struct ubcore_res_key *key, struct ubcore_res_val *val) { @@ -208,6 +487,8 @@ typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, static udma_query_res_handler g_udma_query_res_handlers[] = { [0] = NULL, + [UBCORE_RES_KEY_JFS] = udma_query_res_jfs, + [UBCORE_RES_KEY_JETTY] = udma_query_res_jetty, [UBCORE_RES_KEY_RC] = udma_query_res_rc, [UBCORE_RES_KEY_SEG] = udma_query_res_seg, [UBCORE_RES_KEY_DEV_TA] = udma_query_res_dev_ta, diff --git a/drivers/ub/urma/hw/udma/udma_dfx.h b/drivers/ub/urma/hw/udma/udma_dfx.h index 92c0db1aa744..febfde3b84ec 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.h +++ b/drivers/ub/urma/hw/udma/udma_dfx.h @@ -47,6 +47,10 @@ static inline uint32_t to_udma_rx_threshold(uint32_t limit_wl) } } +int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr); +int udma_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr); int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_val *val); int udma_dfx_init(struct udma_dev *udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 00a3c41b39b6..5ee8f8f4403b 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -6,6 +6,16 @@ #include "udma_common.h" +#define JETTY_CTX_JFRN_H_OFFSET 12 + +enum jetty_state { + JETTY_RESET, + JETTY_READY, + JETTY_ERROR, + JETTY_SUSPEND, + STATE_NUM, +}; + struct udma_jetty { struct ubcore_jetty ubcore_jetty; struct udma_jfr *jfr; @@ -17,6 +27,139 @@ struct udma_jetty { bool ue_rx_closed; }; +enum jetty_type { + JETTY_RAW_OR_NIC, + JETTY_UM, + JETTY_RC, + JETTY_RM, + JETTY_TYPE_RESERVED, +}; + +struct udma_jetty_ctx { + /* DW0 */ + uint32_t ta_timeout : 2; + uint32_t rnr_retry_num : 3; + uint32_t type : 3; + uint32_t sqe_bb_shift : 4; + uint32_t sl : 4; + uint32_t state : 3; + uint32_t jfs_mode : 1; + uint32_t sqe_token_id_l : 12; + /* DW1 */ + uint32_t sqe_token_id_h : 8; + uint32_t err_mode : 1; + uint32_t rsv : 1; + uint32_t cmp_odr : 1; + uint32_t rsv1 : 1; + uint32_t sqe_base_addr_l : 20; + /* DW2 */ + uint32_t sqe_base_addr_h; + /* DW3 */ + uint32_t rsv2; + /* DW4 */ + uint32_t tx_jfcn : 20; + uint32_t jfrn_l : 12; + /* DW5 */ + uint32_t jfrn_h : 8; + uint32_t rsv3 : 4; + uint32_t rx_jfcn : 20; + /* DW6 */ + uint32_t seid_idx : 10; + uint32_t pi_type : 1; + uint32_t rsv4 : 21; + /* DW7 */ + uint32_t user_data_l; + /* DW8 */ + uint32_t user_data_h; + /* DW9 */ + uint32_t sqe_position : 1; + uint32_t sqe_pld_position : 1; + uint32_t sqe_pld_tokenid : 20; + uint32_t rsv5 : 10; + /* DW10 */ + uint32_t tpn : 24; + uint32_t rsv6 : 8; + /* DW11 */ + uint32_t rmt_eid : 20; + uint32_t rsv7 : 12; + /* DW12 */ + uint32_t rmt_tokenid : 20; + uint32_t rsv8 : 12; + /* DW13 - DW15 */ + uint32_t rsv8_1[3]; + /* DW16 */ + uint32_t next_send_ssn : 16; + uint32_t src_order_wqe : 16; + /* DW17 */ + uint32_t src_order_ssn : 16; + uint32_t src_order_sgme_cnt : 16; + /* DW18 */ + uint32_t src_order_sgme_send_cnt : 16; + uint32_t CI : 16; + /* DW19 */ + uint32_t wqe_sgmt_send_cnt : 20; + uint32_t src_order_wqebb_num : 4; + uint32_t src_order_wqe_vld : 1; + uint32_t no_wqe_send_cnt : 4; + uint32_t so_lp_vld : 1; + uint32_t fence_lp_vld : 1; + uint32_t strong_fence_lp_vld : 1; + /* DW20 */ + uint32_t PI : 16; + uint32_t sq_db_doing : 1; + uint32_t ost_rce_credit : 15; + /* DW21 */ + uint32_t sq_db_retrying : 1; + uint32_t wmtp_rsv0 : 31; + /* DW22 */ + uint32_t wait_ack_timeout : 1; + uint32_t wait_rnr_timeout : 1; + uint32_t cqe_ie : 1; + uint32_t cqe_sz : 1; + uint32_t wml_rsv0 : 28; + /* DW23 */ + uint32_t wml_rsv1 : 32; + /* DW24 */ + uint32_t next_rcv_ssn : 16; + uint32_t next_cpl_bb_idx : 16; + /* DW25 */ + uint32_t next_cpl_sgmt_num : 20; + uint32_t we_rsv0 : 12; + /* DW26 */ + uint32_t next_cpl_bb_num : 4; + uint32_t next_cpl_cqe_en : 1; + uint32_t next_cpl_info_vld : 1; + uint32_t rpting_cqe : 1; + uint32_t not_rpt_cqe : 1; + uint32_t flush_ssn : 16; + uint32_t flush_ssn_vld : 1; + uint32_t flush_vld : 1; + uint32_t flush_cqe_done : 1; + uint32_t we_rsv1 : 5; + /* DW27 */ + uint32_t rcved_cont_ssn_num : 20; + uint32_t we_rsv2 : 12; + /* DW28 */ + uint32_t sq_timer; + /* DW29 */ + uint32_t rnr_cnt : 3; + uint32_t abt_ssn : 16; + uint32_t abt_ssn_vld : 1; + uint32_t taack_timeout_flag : 1; + uint32_t we_rsv3 : 9; + uint32_t err_type_l : 2; + /* DW30 */ + uint32_t err_type_h : 7; + uint32_t sq_flush_ssn : 16; + uint32_t we_rsv4 : 9; + /* DW31 */ + uint32_t avail_sgmt_ost : 10; + uint32_t read_op_cnt : 10; + uint32_t we_rsv5 : 12; + /* DW32 - DW63 */ + uint32_t taack_nack_bm[32]; +}; + static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) { return container_of(jetty, struct udma_jetty, ubcore_jetty); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index cb1ecbaf3572..858e36d3a27a 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -30,6 +30,56 @@ struct udma_jfr { struct completion ae_comp; }; +struct udma_jfr_ctx { + /* DW0 */ + uint32_t state : 2; + uint32_t limit_wl : 2; + uint32_t rqe_size_shift : 3; + uint32_t token_en : 1; + uint32_t rqe_shift : 4; + uint32_t rnr_timer : 5; + uint32_t record_db_en : 1; + uint32_t rqe_token_id_l : 14; + /* DW1 */ + uint32_t rqe_token_id_h : 6; + uint32_t type : 3; + uint32_t rsv : 3; + uint32_t rqe_base_addr_l : 20; + /* DW2 */ + uint32_t rqe_base_addr_h; + /* DW3 */ + uint32_t rqe_position : 1; + uint32_t pld_position : 1; + uint32_t pld_token_id : 20; + uint32_t rsv1 : 10; + /* DW4 */ + uint32_t token_value; + /* DW5 */ + uint32_t user_data_l; + /* DW6 */ + uint32_t user_data_h; + /* DW7 */ + uint32_t pi : 16; + uint32_t ci : 16; + /* DW8 */ + uint32_t idx_que_addr_l; + /* DW9 */ + uint32_t idx_que_addr_h : 20; + uint32_t jfcn_l : 12; + /* DW10 */ + uint32_t jfcn_h : 8; + uint32_t record_db_addr_l : 24; + /* DW11 */ + uint32_t record_db_addr_m; + /* DW12 */ + uint32_t record_db_addr_h : 2; + uint32_t cqeie : 1; + uint32_t cqesz : 1; + uint32_t rsv2 : 28; + /* padding */ + uint32_t reserved[3]; +}; + static inline struct udma_jfr *to_udma_jfr(struct ubcore_jfr *jfr) { return container_of(jfr, struct udma_jfr, ubcore_jfr); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5bf8631260a5..ce6b9db0ea06 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -173,6 +173,8 @@ static struct ubcore_ops g_dev_ops = { .unregister_seg = udma_unregister_seg, .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, + .query_jfs = udma_query_jfs, + .query_jetty = udma_query_jetty, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 72973e44ce99e217044d6efdcfc2b283cde4a173 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:13:06 +0800 Subject: [PATCH 066/243] ub: udma: Support query jfr context from hw. commit b63f4fc5c01bd57625360a6580b1cfdcc0cf833c openEuler This patch adds the ability to query jfr context from hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dfx.c | 130 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_dfx.h | 2 + drivers/ub/urma/hw/udma/udma_jfr.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 135 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c index 5580fc474b9d..96e6185c437f 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.c +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -34,6 +34,27 @@ static int to_udma_trans_mode(uint32_t type, struct udma_dev *dev, return 0; } +static int to_udma_jfr_ctx_state(uint32_t state, struct udma_dev *dev, + enum ubcore_jfr_state *jfr_state) +{ + switch (state) { + case JETTY_RESET: + *jfr_state = UBCORE_JFR_STATE_RESET; + break; + case JETTY_READY: + *jfr_state = UBCORE_JFR_STATE_READY; + break; + case JETTY_ERROR: + *jfr_state = UBCORE_JFR_STATE_ERROR; + break; + default: + dev_err(dev->dev, "JFR context state error, state = %u.\n", state); + return -EINVAL; + } + + return 0; +} + static int to_udma_jetty_ctx_state(uint32_t state, struct udma_dev *dev, enum ubcore_jetty_state *jetty_state) { @@ -58,6 +79,54 @@ static int to_udma_jetty_ctx_state(uint32_t state, struct udma_dev *dev, return 0; } +int udma_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfr_ctx *jfr_ctx; + int ret; + + mbox_attr.tag = jfr->jfr_id.id; + mbox_attr.op = UDMA_CMD_QUERY_JFR_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfr_ctx = (struct udma_jfr_ctx *)mailbox->buf; + + attr->rx_threshold = to_udma_rx_threshold(jfr_ctx->limit_wl); + + ret = to_udma_jfr_ctx_state(jfr_ctx->state, udma_dev, &attr->state); + if (ret) + goto err_jfr_ctx; + + cfg->id = jfr->jfr_id.id; + cfg->flag = jfr->jfr_cfg.flag; + cfg->max_sge = 1 << jfr_ctx->rqe_size_shift; + cfg->depth = 1 << jfr_ctx->rqe_shift; + cfg->token_value.token = 0; + cfg->flag.bs.token_policy = UBCORE_TOKEN_NONE; + cfg->min_rnr_timer = jfr_ctx->rnr_timer; + + ret = to_udma_trans_mode(jfr_ctx->type, udma_dev, &cfg->trans_mode); + if (ret) + goto err_jfr_ctx; + + if (udma_jfr->rq.buf.kva) { + cfg->eid_index = jfr->jfr_cfg.eid_index; + cfg->jfc = jfr->jfr_cfg.jfc; + } + +err_jfr_ctx: + jfr_ctx->token_value = 0; + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, struct ubcore_jfs_attr *attr) { @@ -418,6 +487,66 @@ static int udma_query_res_jfs(struct udma_dev *udma_dev, return ret; } +static int udma_query_res_jfr(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jfr_val *res_jfr = (struct ubcore_res_jfr_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + enum ubcore_jfr_state jfr_state; + struct udma_jfr_ctx *jfrc; + uint32_t *jfr_id; + int ret; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jfr, val, "jfr"); + + jfr_id = (uint32_t *)xa_load(&udma_dev->dfx_info->jfr.table, key->key); + if (!jfr_id) { + dev_err(udma_dev->dev, "failed to query jfr, jfr_id = %u.\n", + key->key); + return -EINVAL; + } + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFR_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfrc = (struct udma_jfr_ctx *)mailbox->buf; + res_jfr->jfr_id = key->key; + + ret = to_udma_jfr_ctx_state(jfrc->state, udma_dev, &jfr_state); + if (ret) + goto err_res_jfr_ctx; + + res_jfr->state = jfr_state; + res_jfr->depth = 1 << jfrc->rqe_shift; + res_jfr->jfc_id = jfrc->jfcn_l | + jfrc->jfcn_h << JFR_JFCN_H_OFFSET; + jfrc->rqe_base_addr_l = 0; + jfrc->rqe_base_addr_h = 0; + jfrc->token_en = 0; + jfrc->token_value = 0; + jfrc->user_data_l = 0; + jfrc->user_data_h = 0; + jfrc->idx_que_addr_l = 0; + jfrc->idx_que_addr_h = 0; + jfrc->record_db_addr_l = 0; + jfrc->record_db_addr_m = 0; + jfrc->record_db_addr_h = 0; + + udma_dfx_ctx_print(udma_dev, "JFR", key->key, sizeof(*jfrc) / sizeof(uint32_t), + (uint32_t *)jfrc); +err_res_jfr_ctx: + jfrc->token_value = 0; + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + static int udma_query_res_seg(struct udma_dev *udma_dev, struct ubcore_res_key *key, struct ubcore_res_val *val) { @@ -488,6 +617,7 @@ typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, static udma_query_res_handler g_udma_query_res_handlers[] = { [0] = NULL, [UBCORE_RES_KEY_JFS] = udma_query_res_jfs, + [UBCORE_RES_KEY_JFR] = udma_query_res_jfr, [UBCORE_RES_KEY_JETTY] = udma_query_res_jetty, [UBCORE_RES_KEY_RC] = udma_query_res_rc, [UBCORE_RES_KEY_SEG] = udma_query_res_seg, diff --git a/drivers/ub/urma/hw/udma/udma_dfx.h b/drivers/ub/urma/hw/udma/udma_dfx.h index febfde3b84ec..dcdf23646c1c 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.h +++ b/drivers/ub/urma/hw/udma/udma_dfx.h @@ -47,6 +47,8 @@ static inline uint32_t to_udma_rx_threshold(uint32_t limit_wl) } } +int udma_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr); int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, struct ubcore_jfs_attr *attr); int udma_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index 858e36d3a27a..bffb68b3cdbd 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -8,6 +8,8 @@ #include "udma_ctx.h" #include "udma_common.h" +#define JFR_JFCN_H_OFFSET 12U + struct udma_jfr_idx_que { struct udma_buf buf; struct udma_table jfr_idx_table; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index ce6b9db0ea06..dcf0ae79d583 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -174,6 +174,7 @@ static struct ubcore_ops g_dev_ops = { .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, .query_jfs = udma_query_jfs, + .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; -- Gitee From ee8119871485bad47869f6ddcda51797f0d464aa Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:15:06 +0800 Subject: [PATCH 067/243] ub: udma: Support query table item from hw. commit a49e6f591920eda270ee67aa7c7a313131903714 openEuler This patch adds the ability to query table item from hardware, Such as jfc context and jetty group context. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dfx.c | 111 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 7 ++ drivers/ub/urma/hw/udma/udma_jfc.h | 71 +++++++++++++++++ 3 files changed, 189 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c index 96e6185c437f..7d186d85531d 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.c +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -432,6 +432,110 @@ static int udma_query_res_jetty(struct udma_dev *udma_dev, return ret; } +static int udma_query_res_jetty_grp(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jetty_group_val *res_jetty_grp = + (struct ubcore_res_jetty_group_val *)val->addr; + struct udma_jetty_grp_ctx *jetty_grpc; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + uint32_t *jetty_grp_id; + int i; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jetty_grp, + val, "jetty_grp"); + + jetty_grp_id = (uint32_t *)xa_load(&udma_dev->dfx_info->jetty_grp.table, key->key); + if (!jetty_grp_id) { + dev_err(udma_dev->dev, "failed to query jetty grp, jetty_grp_id = %u.\n", + key->key); + return -EINVAL; + } + + res_jetty_grp->jetty_cnt = 0; + res_jetty_grp->jetty_list = vmalloc(sizeof(*res_jetty_grp->jetty_list) * + NUM_JETTY_PER_GROUP); + if (!res_jetty_grp->jetty_list) + return -ENOMEM; + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JETTY_GROUP_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) { + vfree(res_jetty_grp->jetty_list); + res_jetty_grp->jetty_list = NULL; + return -ENOMEM; + } + + jetty_grpc = (struct udma_jetty_grp_ctx *)mailbox->buf; + for (i = 0; i < NUM_JETTY_PER_GROUP; ++i) { + if (jetty_grpc->valid & BIT(i)) { + res_jetty_grp->jetty_list[res_jetty_grp->jetty_cnt] = + jetty_grpc->start_jetty_id + i; + ++res_jetty_grp->jetty_cnt; + } + } + + if (res_jetty_grp->jetty_cnt == 0) { + vfree(res_jetty_grp->jetty_list); + res_jetty_grp->jetty_list = NULL; + } + + udma_dfx_ctx_print(udma_dev, "Jetty_grp", key->key, sizeof(*jetty_grpc) / sizeof(uint32_t), + (uint32_t *)jetty_grpc); + udma_free_cmd_mailbox(udma_dev, mailbox); + + return 0; +} + +static int udma_query_res_jfc(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jfc_val *res_jfc = (struct ubcore_res_jfc_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfc_ctx *jfcc; + uint32_t *jfc_id; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jfc, val, "jfc"); + + jfc_id = (uint32_t *)xa_load(&udma_dev->dfx_info->jfc.table, key->key); + if (!jfc_id) { + dev_err(udma_dev->dev, "failed to query jfc, jfc_id = %u.\n", + key->key); + return -EINVAL; + } + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFC_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfcc = (struct udma_jfc_ctx *)mailbox->buf; + res_jfc->jfc_id = key->key; + res_jfc->state = jfcc->state; + res_jfc->depth = 1 << (jfcc->shift + UDMA_JFC_DEPTH_SHIFT_BASE); + jfcc->cqe_va_l = 0; + jfcc->cqe_va_h = 0; + jfcc->token_en = 0; + jfcc->cqe_token_value = 0; + jfcc->record_db_addr_l = 0; + jfcc->record_db_addr_h = 0; + jfcc->remote_token_value = 0; + + udma_dfx_ctx_print(udma_dev, "JFC", key->key, sizeof(*jfcc) / sizeof(uint32_t), + (uint32_t *)jfcc); + udma_free_cmd_mailbox(udma_dev, mailbox); + + return 0; +} + static int udma_query_res_jfs(struct udma_dev *udma_dev, struct ubcore_res_key *key, struct ubcore_res_val *val) @@ -616,12 +720,19 @@ typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, static udma_query_res_handler g_udma_query_res_handlers[] = { [0] = NULL, + [UBCORE_RES_KEY_VTP] = NULL, + [UBCORE_RES_KEY_TP] = NULL, + [UBCORE_RES_KEY_TPG] = NULL, + [UBCORE_RES_KEY_UTP] = NULL, [UBCORE_RES_KEY_JFS] = udma_query_res_jfs, [UBCORE_RES_KEY_JFR] = udma_query_res_jfr, [UBCORE_RES_KEY_JETTY] = udma_query_res_jetty, + [UBCORE_RES_KEY_JETTY_GROUP] = udma_query_res_jetty_grp, + [UBCORE_RES_KEY_JFC] = udma_query_res_jfc, [UBCORE_RES_KEY_RC] = udma_query_res_rc, [UBCORE_RES_KEY_SEG] = udma_query_res_seg, [UBCORE_RES_KEY_DEV_TA] = udma_query_res_dev_ta, + [UBCORE_RES_KEY_DEV_TP] = NULL, }; int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 5ee8f8f4403b..e1c578783a48 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -160,6 +160,13 @@ struct udma_jetty_ctx { uint32_t taack_nack_bm[32]; }; +struct udma_jetty_grp_ctx { + uint32_t start_jetty_id : 16; + uint32_t rsv : 11; + uint32_t jetty_number : 5; + uint32_t valid; +}; + static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) { return container_of(jetty, struct udma_jetty, ubcore_jetty); diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index e225efdece4c..8cb7271739d7 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -7,6 +7,8 @@ #include "udma_dev.h" #include "udma_ctx.h" +#define UDMA_JFC_DEPTH_SHIFT_BASE 6 + struct udma_jfc { struct ubcore_jfc base; uint32_t jfcn; @@ -27,6 +29,75 @@ struct udma_jfc { uint32_t cq_shift; }; +struct udma_jfc_ctx { + /* DW0 */ + uint32_t state : 2; + uint32_t arm_st : 2; + uint32_t shift : 4; + uint32_t cqe_size : 1; + uint32_t record_db_en : 1; + uint32_t jfc_type : 1; + uint32_t inline_en : 1; + uint32_t cqe_va_l : 20; + /* DW1 */ + uint32_t cqe_va_h; + /* DW2 */ + uint32_t cqe_token_id : 20; + uint32_t cq_cnt_mode : 1; + uint32_t rsv0 : 3; + uint32_t ceqn : 8; + /* DW3 */ + uint32_t cqe_token_value : 24; + uint32_t rsv1 : 8; + /* DW4 */ + uint32_t pi : 22; + uint32_t cqe_coalesce_cnt : 10; + /* DW5 */ + uint32_t ci : 22; + uint32_t cqe_coalesce_period : 3; + uint32_t rsv2 : 7; + /* DW6 */ + uint32_t record_db_addr_l; + /* DW7 */ + uint32_t record_db_addr_h : 26; + uint32_t rsv3 : 6; + /* DW8 */ + uint32_t push_usi_en : 1; + uint32_t push_cqe_en : 1; + uint32_t token_en : 1; + uint32_t rsv4 : 9; + uint32_t tpn : 20; + /* DW9 ~ DW12 */ + uint32_t rmt_eid[4]; + /* DW13 */ + uint32_t seid_idx : 10; + uint32_t rmt_token_id : 20; + uint32_t rsv5 : 2; + /* DW14 */ + uint32_t remote_token_value; + /* DW15 */ + uint32_t int_vector : 16; + uint32_t stars_en : 1; + uint32_t rsv6 : 15; + /* DW16 */ + uint32_t poll : 1; + uint32_t cqe_report_timer : 24; + uint32_t se : 1; + uint32_t arm_sn : 2; + uint32_t rsv7 : 4; + /* DW17 */ + uint32_t se_cqe_idx : 24; + uint32_t rsv8 : 8; + /* DW18 */ + uint32_t wr_cqe_idx : 22; + uint32_t rsv9 : 10; + /* DW19 */ + uint32_t cqe_cnt : 24; + uint32_t rsv10 : 8; + /* DW20 ~ DW31 */ + uint32_t rsv11[12]; +}; + static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) { return container_of(jfc, struct udma_jfc, base); -- Gitee From 20e8738493c8aeb21f8de8ed66422e492c950e9b Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:16:51 +0800 Subject: [PATCH 068/243] ub: udma: Support create jfs. commit 30b22bb34e1059055f57a01f01520afe1b104545 openEuler This patch adds the ability to create jfs, During the creation process, driver will create jfs context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_common.h | 16 ++ drivers/ub/urma/hw/udma/udma_dev.h | 4 + drivers/ub/urma/hw/udma/udma_jetty.c | 263 +++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 40 +++ drivers/ub/urma/hw/udma/udma_jfs.c | 357 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 26 ++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 8 files changed, 709 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_jetty.c create mode 100644 drivers/ub/urma/hw/udma/udma_jfs.c diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index a087da421b2e..7b1c82ab51dd 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -3,6 +3,6 @@ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o \ - udma_dfx.o + udma_dfx.o udma_jfs.o udma_jetty.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index aba7b4afddb3..b1b129ee4449 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -6,6 +6,7 @@ #include #include +#include "udma_ctx.h" #include "udma_dev.h" struct udma_jetty_grp { @@ -81,6 +82,21 @@ void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_ void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); + +static inline void udma_alloc_kernel_db(struct udma_dev *dev, + struct udma_jetty_queue *queue) +{ + queue->dwqe_addr = dev->k_db_base + JETTY_DSQE_OFFSET + + UDMA_HW_PAGE_SIZE * queue->id; + queue->db_addr = queue->dwqe_addr + UDMA_DOORBELL_OFFSET; +} + +static inline uint8_t to_ta_timeout(uint32_t err_timeout) +{ +#define TA_TIMEOUT_DIVISOR 8 + return err_timeout / TA_TIMEOUT_DIVISOR; +} + static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) { return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 67e8847d66a6..469e55e93d7a 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -17,8 +17,12 @@ extern bool dump_aux_info; #define UBCORE_MAX_DEV_NAME 64 +#define WQE_BB_SIZE_SHIFT 6 + #define MAX_JETTY_IN_JETTY_GRP 32 +#define UDMA_USER_DATA_H_OFFSET 32U + #define MAX_WQEBB_IN_SQE 4 #define JETTY_DSQE_OFFSET 0x1000 diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c new file mode 100644 index 000000000000..a92fbc7d5d11 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include "udma_dev.h" +#include +#include "udma_cmd.h" +#include "udma_jfr.h" +#include "udma_jfs.h" +#include "udma_jfc.h" +#include "udma_jetty.h" + +bool well_known_jetty_pgsz_check = true; + +static int udma_specify_rsvd_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) +{ + struct udma_ida *ida_table = &udma_dev->rsvd_jetty_ida_table; + int id; + + id = ida_alloc_range(&ida_table->ida, cfg_id, cfg_id, GFP_KERNEL); + if (id < 0) { + dev_err(udma_dev->dev, "user specify id %u has been used, ret = %d.\n", cfg_id, id); + return id; + } + + return 0; +} + +static int udma_user_specify_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) +{ + if (cfg_id < udma_dev->caps.jetty.start_idx) + return udma_specify_rsvd_jetty_id(udma_dev, cfg_id); + + return udma_specify_adv_id(udma_dev, &udma_dev->jetty_table.bitmap_table, + cfg_id); +} + +int udma_alloc_jetty_id(struct udma_dev *udma_dev, uint32_t *idx, + struct udma_res *jetty_res) +{ + struct udma_group_bitmap *bitmap = &udma_dev->jetty_table.bitmap_table; + struct ida *ida = &udma_dev->rsvd_jetty_ida_table.ida; + uint32_t min = jetty_res->start_idx; + uint32_t next = jetty_res->next_idx; + uint32_t max; + int ret; + + if (jetty_res->max_cnt == 0) { + dev_err(udma_dev->dev, "ida alloc failed max_cnt is 0.\n"); + return -EINVAL; + } + + max = jetty_res->start_idx + jetty_res->max_cnt - 1; + + if (jetty_res != &udma_dev->caps.jetty) { + ret = ida_alloc_range(ida, next, max, GFP_KERNEL); + if (ret < 0) { + ret = ida_alloc_range(ida, min, max, GFP_KERNEL); + if (ret < 0) { + dev_err(udma_dev->dev, + "ida alloc failed %d.\n", ret); + return ret; + } + } + + *idx = (uint32_t)ret; + } else { + ret = udma_adv_id_alloc(udma_dev, bitmap, idx, false, next); + if (ret) { + ret = udma_adv_id_alloc(udma_dev, bitmap, idx, false, min); + if (ret) { + dev_err(udma_dev->dev, + "bitmap alloc failed %d.\n", ret); + return ret; + } + } + } + + jetty_res->next_idx = (*idx + 1) > max ? min : (*idx + 1); + + return 0; +} + +static int udma_alloc_normal_jetty_id(struct udma_dev *udma_dev, uint32_t *idx) +{ + int ret; + + ret = udma_alloc_jetty_id(udma_dev, idx, &udma_dev->caps.jetty); + if (ret == 0) + return 0; + + ret = udma_alloc_jetty_id(udma_dev, idx, &udma_dev->caps.user_ctrl_normal_jetty); + if (ret == 0) + return 0; + + return udma_alloc_jetty_id(udma_dev, idx, &udma_dev->caps.public_jetty); +} + +#define CFGID_CHECK(a, b) ((a) >= (b).start_idx && (a) < (b).start_idx + (b).max_cnt) + +static int udma_verify_jetty_type_dwqe(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!CFGID_CHECK(cfg_id, udma_dev->caps.stars_jetty)) { + dev_err(udma_dev->dev, + "user id %u error, cache lock st idx %u cnt %u.\n", + cfg_id, udma_dev->caps.stars_jetty.start_idx, + udma_dev->caps.stars_jetty.max_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type_ccu(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!CFGID_CHECK(cfg_id, udma_dev->caps.ccu_jetty)) { + dev_err(udma_dev->dev, + "user id %u error, ccu st idx %u cnt %u.\n", + cfg_id, udma_dev->caps.ccu_jetty.start_idx, + udma_dev->caps.ccu_jetty.max_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type_normal(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!CFGID_CHECK(cfg_id, udma_dev->caps.user_ctrl_normal_jetty)) { + dev_err(udma_dev->dev, + "user id %u error, user ctrl normal st idx %u cnt %u.\n", + cfg_id, + udma_dev->caps.user_ctrl_normal_jetty.start_idx, + udma_dev->caps.user_ctrl_normal_jetty.max_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type_urma_normal(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!(CFGID_CHECK(cfg_id, udma_dev->caps.public_jetty) || + CFGID_CHECK(cfg_id, udma_dev->caps.hdc_jetty) || + CFGID_CHECK(cfg_id, udma_dev->caps.jetty))) { + dev_err(udma_dev->dev, + "user id %u error, ccu st idx %u cnt %u, stars st idx %u, normal st idx %u cnt %u.\n", + cfg_id, udma_dev->caps.ccu_jetty.start_idx, + udma_dev->caps.ccu_jetty.max_cnt, + udma_dev->caps.stars_jetty.start_idx, + udma_dev->caps.jetty.start_idx, + udma_dev->caps.jetty.max_cnt); + return -EINVAL; + } + + if (well_known_jetty_pgsz_check && PAGE_SIZE != UDMA_HW_PAGE_SIZE) { + dev_err(udma_dev->dev, "Does not support specifying Jetty ID on non-4KB page systems.\n"); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type(struct udma_dev *udma_dev, + enum udma_jetty_type jetty_type, uint32_t cfg_id) +{ + int (*udma_cfg_id_check[UDMA_JETTY_TYPE_MAX])(struct udma_dev *udma_dev, + uint32_t cfg_id) = { + udma_verify_jetty_type_dwqe, + udma_verify_jetty_type_ccu, + udma_verify_jetty_type_normal, + udma_verify_jetty_type_urma_normal + }; + + if (jetty_type < UDMA_JETTY_TYPE_MAX) { + if (!cfg_id) + return 0; + + return udma_cfg_id_check[jetty_type](udma_dev, cfg_id); + } + + dev_err(udma_dev->dev, "invalid jetty type 0x%x.\n", jetty_type); + return -EINVAL; +} + +static int udma_alloc_jetty_id_own(struct udma_dev *udma_dev, uint32_t *id, + enum udma_jetty_type jetty_type) +{ + int ret; + + switch (jetty_type) { + case UDMA_CACHE_LOCK_DWQE_JETTY_TYPE: + ret = udma_alloc_jetty_id(udma_dev, id, + &udma_dev->caps.stars_jetty); + break; + case UDMA_NORMAL_JETTY_TYPE: + ret = udma_alloc_jetty_id(udma_dev, id, + &udma_dev->caps.user_ctrl_normal_jetty); + break; + case UDMA_CCU_JETTY_TYPE: + ret = udma_alloc_jetty_id(udma_dev, id, &udma_dev->caps.ccu_jetty); + break; + default: + ret = udma_alloc_normal_jetty_id(udma_dev, id); + break; + } + + if (ret) + dev_err(udma_dev->dev, + "udma alloc jetty id own failed, type = %d, ret = %d.\n", + jetty_type, ret); + + return ret; +} + +int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp) +{ + int ret; + + if (udma_verify_jetty_type(udma_dev, sq->jetty_type, cfg_id)) + return -EINVAL; + + if (cfg_id > 0 && !jetty_grp) { + ret = udma_user_specify_jetty_id(udma_dev, cfg_id); + if (ret) + return ret; + + sq->id = cfg_id; + } else { + ret = udma_alloc_jetty_id_own(udma_dev, &sq->id, sq->jetty_type); + } + + return ret; +} + +void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) +{ +#define UDMA_TA_TIMEOUT_MAX_INDEX 3 + uint32_t time[] = { + UDMA_TA_TIMEOUT_128MS, + UDMA_TA_TIMEOUT_1000MS, + UDMA_TA_TIMEOUT_8000MS, + UDMA_TA_TIMEOUT_64000MS, + }; + uint8_t index; + + index = to_ta_timeout(err_timeout); + if (index > UDMA_TA_TIMEOUT_MAX_INDEX) + index = UDMA_TA_TIMEOUT_MAX_INDEX; + + sq->ta_timeout = time[index]; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index e1c578783a48..fb70231278b7 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -6,7 +6,24 @@ #include "udma_common.h" +#define SQE_TOKEN_ID_L_MASK GENMASK(11, 0) +#define SQE_TOKEN_ID_H_OFFSET 12U +#define SQE_TOKEN_ID_H_MASK GENMASK(7, 0) +#define SQE_VA_L_OFFSET 12U +#define SQE_VA_L_VALID_BIT GENMASK(19, 0) +#define SQE_VA_H_OFFSET 32U +#define SQE_VA_H_VALID_BIT GENMASK(31, 0) #define JETTY_CTX_JFRN_H_OFFSET 12 +#define AVAIL_SGMT_OST_INIT 512 + +#define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) + +#define UDMA_TA_TIMEOUT_128MS 128 +#define UDMA_TA_TIMEOUT_1000MS 1000 +#define UDMA_TA_TIMEOUT_8000MS 8000 +#define UDMA_TA_TIMEOUT_64000MS 64000 + +#define UDMA_MAX_PRIORITY 16 enum jetty_state { JETTY_RESET, @@ -27,6 +44,11 @@ struct udma_jetty { bool ue_rx_closed; }; +enum jfsc_mode { + JFS, + JETTY, +}; + enum jetty_type { JETTY_RAW_OR_NIC, JETTY_UM, @@ -167,6 +189,20 @@ struct udma_jetty_grp_ctx { uint32_t valid; }; +static inline uint32_t to_udma_type(uint32_t trans_mode) +{ + switch (trans_mode) { + case UBCORE_TP_RM: + return JETTY_RM; + case UBCORE_TP_RC: + return JETTY_RC; + case UBCORE_TP_UM: + return JETTY_UM; + default: + return JETTY_TYPE_RESERVED; + } +} + static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) { return container_of(jetty, struct udma_jetty, ubcore_jetty); @@ -182,4 +218,8 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu return container_of(queue, struct udma_jetty, sq); } +int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); +void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); + #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c new file mode 100644 index 000000000000..002636a03c21 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include +#include +#include "udma_common.h" +#include "udma_dev.h" +#include +#include "udma_cmd.h" +#include "udma_jetty.h" +#include "udma_segment.h" +#include "udma_jfs.h" + +int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct udma_create_jetty_ucmd *ucmd) +{ + int ret; + + if (ucmd->sqe_bb_cnt == 0 || ucmd->buf_len == 0) { + dev_err(dev->dev, "invalid param, sqe_bb_cnt=%u, buf_len=%u.\n", + ucmd->sqe_bb_cnt, ucmd->buf_len); + return -EINVAL; + } + + sq->sqe_bb_cnt = ucmd->sqe_bb_cnt; + sq->buf.entry_cnt = ucmd->buf_len >> WQE_BB_SIZE_SHIFT; + if (sq->non_pin) { + sq->buf.addr = ucmd->buf_addr; + } else { + ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &sq->buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jetty/jfs queue addr, ret = %d.\n", + ret); + return ret; + } + } + + return 0; +} + +int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_cfg *jfs_cfg) +{ + uint32_t wqe_bb_depth; + uint32_t sqe_bb_cnt; + uint32_t size; + int ret; + + if (!jfs_cfg->flag.bs.lock_free) + spin_lock_init(&sq->lock); + + sq->max_inline_size = jfs_cfg->max_inline_data; + sq->max_sge_num = jfs_cfg->max_sge; + sq->tid = dev->tid; + sq->lock_free = jfs_cfg->flag.bs.lock_free; + + sqe_bb_cnt = sq_cal_wqebb_num(SQE_WRITE_NOTIFY_CTL_LEN, jfs_cfg->max_sge); + sq->sqe_bb_cnt = sqe_bb_cnt > (uint32_t)MAX_WQEBB_NUM ? (uint32_t)MAX_WQEBB_NUM : + sqe_bb_cnt; + + wqe_bb_depth = roundup_pow_of_two(sq->sqe_bb_cnt * jfs_cfg->depth); + sq->buf.entry_size = UDMA_JFS_WQEBB_SIZE; + size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); + sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; + + ret = udma_k_alloc_buf(dev, size, &sq->buf); + if (ret) { + dev_err(dev->dev, + "failed to alloc jetty (%u) sq buf when size = %u.\n", sq->id, size); + return ret; + } + + sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); + if (!sq->wrid) { + udma_k_free_buf(dev, size, &sq->buf); + dev_err(dev->dev, + "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", + sq->id, sq->buf.entry_cnt); + return -ENOMEM; + } + + udma_alloc_kernel_db(dev, sq); + sq->kva_curr = sq->buf.kva; + + return 0; +} + +void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ + uint32_t size; + + if (sq->buf.kva) { + size = sq->buf.entry_cnt * sq->buf.entry_size; + udma_k_free_buf(dev, size, &sq->buf); + kfree(sq->wrid); + return; + } + if (sq->non_pin) + return; + + unpin_queue_addr(sq->buf.umem); +} + +void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + struct udma_jfs *jfs, void *mb_buf) +{ + struct udma_jetty_ctx *ctx = (struct udma_jetty_ctx *)mb_buf; + uint8_t i; + + ctx->state = JETTY_READY; + ctx->jfs_mode = JFS; + ctx->type = to_udma_type(cfg->trans_mode); + ctx->sl = dev->udma_sl[UDMA_DEFAULT_SL_NUM]; + if (ctx->type == JETTY_RM || ctx->type == JETTY_RC) { + for (i = 0; i < dev->udma_total_sl_num; i++) + if (cfg->priority == dev->udma_sl[i]) + ctx->sl = cfg->priority; + } else if (ctx->type == JETTY_UM) { + ctx->sl = dev->unic_sl[UDMA_DEFAULT_SL_NUM]; + for (i = 0; i < dev->unic_sl_num; i++) + if (cfg->priority == dev->unic_sl[i]) + ctx->sl = cfg->priority; + } + ctx->sqe_base_addr_l = (jfs->sq.buf.addr >> SQE_VA_L_OFFSET) & + (uint32_t)SQE_VA_L_VALID_BIT; + ctx->sqe_base_addr_h = (jfs->sq.buf.addr >> SQE_VA_H_OFFSET) & + (uint32_t)SQE_VA_H_VALID_BIT; + ctx->sqe_token_id_l = jfs->sq.tid & (uint32_t)SQE_TOKEN_ID_L_MASK; + ctx->sqe_token_id_h = (jfs->sq.tid >> SQE_TOKEN_ID_H_OFFSET) & + (uint32_t)SQE_TOKEN_ID_H_MASK; + ctx->sqe_bb_shift = ilog2(roundup_pow_of_two(jfs->sq.buf.entry_cnt)); + ctx->tx_jfcn = cfg->jfc->id; + ctx->ta_timeout = to_ta_timeout(cfg->err_timeout); + + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_RNR_RETRY)) + ctx->rnr_retry_num = cfg->rnr_retry; + + ctx->user_data_l = jfs->jfs_addr; + ctx->user_data_h = jfs->jfs_addr >> UDMA_USER_DATA_H_OFFSET; + ctx->seid_idx = cfg->eid_index; + ctx->err_mode = cfg->flag.bs.error_suspend; + ctx->cmp_odr = cfg->flag.bs.outorder_comp; + ctx->avail_sgmt_ost = AVAIL_SGMT_OST_INIT; + ctx->pi_type = jfs->pi_type; + ctx->sqe_pld_tokenid = jfs->sq.tid & (uint32_t)SQE_PLD_TOKEN_ID_MASK; + ctx->next_send_ssn = get_random_u16(); + ctx->next_rcv_ssn = ctx->next_send_ssn; +} + +void udma_dfx_store_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs) +{ + struct udma_dfx_jfs *jfs; + int ret; + + jfs = (struct udma_dfx_jfs *)xa_load(&udma_dev->dfx_info->jfs.table, + udma_jfs->sq.id); + if (jfs) { + dev_warn(udma_dev->dev, "jfs_id(%u) already exists in DFX.\n", + udma_jfs->sq.id); + return; + } + + jfs = kzalloc(sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return; + + jfs->id = udma_jfs->sq.id; + jfs->depth = udma_jfs->sq.buf.entry_cnt / udma_jfs->sq.sqe_bb_cnt; + + write_lock(&udma_dev->dfx_info->jfs.rwlock); + ret = xa_err(xa_store(&udma_dev->dfx_info->jfs.table, udma_jfs->sq.id, + jfs, GFP_KERNEL)); + if (ret) { + write_unlock(&udma_dev->dfx_info->jfs.rwlock); + dev_err(udma_dev->dev, "store jfs_id(%u) to table failed in DFX.\n", + udma_jfs->sq.id); + kfree(jfs); + return; + } + + ++udma_dev->dfx_info->jfs.cnt; + write_unlock(&udma_dev->dfx_info->jfs.rwlock); +} + +static int udma_create_hw_jfs_ctx(struct udma_dev *dev, struct udma_jfs *jfs, + struct ubcore_jfs_cfg *cfg) +{ + struct ubase_mbx_attr attr = {}; + struct udma_jetty_ctx ctx = {}; + int ret; + + if (cfg->priority >= UDMA_MAX_PRIORITY) { + dev_err(dev->dev, "kernel mode jfs priority is out of range, priority is %u.\n", + cfg->priority); + return -EINVAL; + } + + udma_init_jfsc(dev, cfg, jfs, &ctx); + attr.tag = jfs->sq.id; + attr.op = UDMA_CMD_CREATE_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); + if (ret) { + dev_err(dev->dev, "failed to upgrade JFSC, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static int udma_get_user_jfs_cmd(struct udma_dev *dev, struct udma_jfs *jfs, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + struct udma_context *uctx; + unsigned long byte; + + if (udata) { + if (!udata->udrv_data) { + dev_err(dev->dev, "udrv_data is null.\n"); + return -EINVAL; + } + + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + dev_err(dev->dev, "jfs in_len %u or addr is invalid.\n", + udata->udrv_data->in_len); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + sizeof(*ucmd)); + if (byte) { + dev_err(dev->dev, + "failed to copy jfs udata, ret = %lu.\n", byte); + return -EFAULT; + } + + uctx = to_udma_context(udata->uctx); + jfs->sq.tid = uctx->tid; + jfs->jfs_addr = ucmd->jetty_addr; + jfs->pi_type = ucmd->pi_type; + jfs->sq.non_pin = ucmd->non_pin; + jfs->sq.jetty_type = (enum udma_jetty_type)ucmd->jetty_type; + jfs->sq.id = ucmd->jfs_id; + } else { + jfs->jfs_addr = (uintptr_t)&jfs->sq; + jfs->sq.jetty_type = (enum udma_jetty_type)UDMA_URMA_NORMAL_JETTY_TYPE; + } + + return 0; +} + +static int udma_alloc_jfs_sq(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + struct udma_jfs *jfs, struct ubcore_udata *udata) +{ + struct udma_create_jetty_ucmd ucmd = {}; + int ret; + + ret = udma_get_user_jfs_cmd(dev, jfs, udata, &ucmd); + if (ret) + goto err_get_user_cmd; + + ret = alloc_jetty_id(dev, &jfs->sq, jfs->sq.id, NULL); + if (ret) { + dev_err(dev->dev, "failed to alloc_id.\n"); + goto err_alloc_id; + } + jfs->ubcore_jfs.jfs_id.id = jfs->sq.id; + jfs->ubcore_jfs.jfs_cfg = *cfg; + udma_set_query_flush_time(&jfs->sq, cfg->err_timeout); + + ret = xa_err(xa_store(&dev->jetty_table.xa, jfs->sq.id, &jfs->sq, GFP_KERNEL)); + if (ret) { + dev_err(dev->dev, "failed to store_sq(%u), ret=%d.", jfs->sq.id, ret); + goto err_store_sq; + } + + ret = udata ? udma_alloc_u_sq_buf(dev, &jfs->sq, &ucmd) : + udma_alloc_k_sq_buf(dev, &jfs->sq, cfg); + if (ret) + goto err_alloc_sq_buf; + + jfs->sq.trans_mode = cfg->trans_mode; + + return ret; + +err_alloc_sq_buf: + xa_erase(&dev->jetty_table.xa, jfs->sq.id); +err_store_sq: + if (jfs->sq.id < dev->caps.jetty.start_idx) + udma_id_free(&dev->rsvd_jetty_ida_table, jfs->sq.id); + else + udma_adv_id_free(&dev->jetty_table.bitmap_table, + jfs->sq.id, false); +err_alloc_id: +err_get_user_cmd: + return ret; +} + +struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *dev = to_udma_dev(ub_dev); + struct udma_jfs *jfs; + int ret; + + if (cfg->trans_mode == UBCORE_TP_RC) { + dev_err(dev->dev, "jfs not support RC transmode.\n"); + return NULL; + } + + jfs = kcalloc(1, sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return NULL; + + ret = udma_alloc_jfs_sq(dev, cfg, jfs, udata); + if (ret) { + dev_err(dev->dev, "failed to alloc_jfs_sq, ret = %d.\n", ret); + goto err_alloc_sq; + } + + ret = udma_create_hw_jfs_ctx(dev, jfs, cfg); + if (ret) { + dev_err(dev->dev, + "post mailbox create jfs ctx failed, ret = %d.\n", ret); + goto err_create_hw_jfs; + } + + jfs->mode = UDMA_NORMAL_JFS_TYPE; + jfs->sq.state = UBCORE_JETTY_STATE_READY; + refcount_set(&jfs->ae_refcount, 1); + init_completion(&jfs->ae_comp); + if (dfx_switch) + udma_dfx_store_jfs_id(dev, jfs); + + return &jfs->ubcore_jfs; + +err_create_hw_jfs: + udma_free_sq_buf(dev, &jfs->sq); + xa_erase(&dev->jetty_table.xa, jfs->sq.id); + if (jfs->sq.id < dev->caps.jetty.start_idx) + udma_id_free(&dev->rsvd_jetty_ida_table, jfs->sq.id); + else + udma_adv_id_free(&dev->jetty_table.bitmap_table, + jfs->sq.id, false); +err_alloc_sq: + kfree(jfs); + return NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 39a7b5d1bfc4..425c87400fec 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -6,6 +6,17 @@ #include "udma_common.h" +#define MAX_WQEBB_NUM 4 +#define UDMA_JFS_WQEBB_SIZE 64 +#define UDMA_JFS_SGE_SIZE 16 + +#define SQE_WRITE_NOTIFY_CTL_LEN 80 + +enum udma_jfs_type { + UDMA_NORMAL_JFS_TYPE, + UDMA_KERNEL_STARS_JFS_TYPE, +}; + struct udma_jfs { struct ubcore_jfs ubcore_jfs; struct udma_jetty_queue sq; @@ -27,4 +38,19 @@ static inline struct udma_jfs *to_udma_jfs_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfs, sq); } +static inline uint32_t sq_cal_wqebb_num(uint32_t sqe_ctl_len, uint32_t sge_num) +{ + return (sqe_ctl_len + (sge_num - 1) * UDMA_JFS_SGE_SIZE) / + UDMA_JFS_WQEBB_SIZE + 1; +} + +struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata); +int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct udma_create_jetty_ucmd *ucmd); +int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_cfg *jfs_cfg); +void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); + #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index dcf0ae79d583..bf1ffe10367f 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -19,6 +19,7 @@ #include "udma_dev.h" #include "udma_eq.h" #include "udma_segment.h" +#include "udma_jfs.h" #include "udma_cmd.h" #include "udma_ctx.h" #include "udma_rct.h" @@ -173,6 +174,7 @@ static struct ubcore_ops g_dev_ops = { .unregister_seg = udma_unregister_seg, .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, + .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, -- Gitee From 8e04bdcb04811aa854cf64c1fc2d46a16c55859e Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:18:46 +0800 Subject: [PATCH 069/243] ub: udma: Support destroy jfs. commit 717d2a2e8d2b4539b400f0f4f04833cab2fdc4e3 openEuler This patch adds the ability to destroy jfs, During the destruction process, driver will destroy jfs context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_jetty.c | 200 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 4 + drivers/ub/urma/hw/udma/udma_jfs.c | 54 ++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 6 files changed, 262 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 469e55e93d7a..bc6ad5c509fd 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -37,6 +37,8 @@ extern bool dump_aux_info; #define UDMA_MAX_SL_NUM 16 #define UDMA_DEFAULT_SL_NUM 0 +#define UDMA_RCV_SEND_MAX_DIFF 512U + #define UDMA_CQE_SIZE 64 #define UDMA_MAX_GRANT_SIZE 0xFFFFFFFFF000 diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index a92fbc7d5d11..b012010c0e74 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -261,3 +261,203 @@ void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) sq->ta_timeout = time[index]; } + +int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id) +{ + struct ubase_mbx_attr attr = {}; + int ret; + + attr.tag = jetty_id; + attr.op = UDMA_CMD_DESTROY_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, NULL, 0, &attr); + if (ret) + dev_err(dev->dev, + "post mailbox destroy jetty ctx failed, ret = %d.\n", ret); + + return ret; +} + +int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, + enum jetty_state state) +{ + struct udma_jetty_ctx *ctx, *ctx_mask; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for jettyc.\n"); + return -EINVAL; + } + + ctx = (struct udma_jetty_ctx *)mailbox->buf; + + /* Optimize chip access performance. */ + ctx_mask = (struct udma_jetty_ctx *)((char *)ctx + UDMA_JFS_MASK_OFFSET); + memset(ctx_mask, 0xff, sizeof(struct udma_jetty_ctx)); + ctx->state = state; + ctx_mask->state = 0; + + mbox_attr.tag = jetty_id; + mbox_attr.op = UDMA_CMD_MODIFY_JFS_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to upgrade jettyc, ret = %d.\n", ret); + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_query_jetty_ctx(struct udma_dev *dev, + struct udma_jetty_ctx *jfs_ctx, + uint32_t jetty_id) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + + mbox_attr.tag = jetty_id; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + memcpy((void *)jfs_ctx, mailbox->buf, sizeof(*jfs_ctx)); + + udma_free_cmd_mailbox(dev, mailbox); + + return 0; +} + +static bool udma_wait_timeout(uint32_t *sum_times, uint32_t times, uint32_t ta_timeout) +{ + uint32_t wait_time; + + if (*sum_times > ta_timeout) + return true; + + wait_time = 1 << times; + msleep(wait_time); + *sum_times += wait_time; + + return false; +} + +static bool udma_query_jetty_fd(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ + struct udma_jetty_ctx ctx = {}; + uint16_t rcv_send_diff = 0; + uint32_t sum_times = 0; + uint32_t times = 0; + + while (true) { + if (udma_query_jetty_ctx(dev, &ctx, sq->id)) + return false; + + if (ctx.flush_cqe_done) + return true; + + if (udma_wait_timeout(&sum_times, times, UDMA_TA_TIMEOUT_64000MS)) + break; + + times++; + } + + /* In the flip scenario, ctx.next_rcv_ssn - ctx.next_send_ssn value is less than 512. */ + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.flush_ssn_vld && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF) + return true; + + udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, sizeof(ctx) / sizeof(uint32_t), + (uint32_t *)&ctx); + + return false; +} + +int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ + struct udma_jetty_ctx ctx = {}; + uint16_t rcv_send_diff = 0; + uint32_t sum_times = 0; + uint32_t times = 0; + int ret; + + while (true) { + ret = udma_query_jetty_ctx(dev, &ctx, sq->id); + if (ret) { + dev_err(dev->dev, "query jetty ctx failed, id = %u, ret = %d.\n", + sq->id, ret); + return ret; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.PI == ctx.CI && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_READY) + break; + + if (rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_ERROR) + break; + + if (udma_wait_timeout(&sum_times, times, sq->ta_timeout)) { + dev_warn(dev->dev, "TA timeout, id = %u. PI = %d, CI = %d, nxt_send_ssn = %d nxt_rcv_ssn = %d state = %d.\n", + sq->id, ctx.PI, ctx.CI, ctx.next_send_ssn, + ctx.next_rcv_ssn, ctx.state); + break; + } + times++; + } + + return 0; +} + +static bool udma_destroy_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ +#define UDMA_DESTROY_JETTY_DELAY_TIME 100U + + if (sq->state != UBCORE_JETTY_STATE_READY && sq->state != UBCORE_JETTY_STATE_SUSPENDED) + goto query_jetty_fd; + + if (dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE) + goto modify_to_err; + + if (udma_modify_jetty_precondition(dev, sq)) + return false; + +modify_to_err: + if (udma_set_jetty_state(dev, sq->id, JETTY_ERROR)) { + dev_err(dev->dev, "modify jetty to error failed, id: %u.\n", + sq->id); + return false; + } + + sq->state = UBCORE_JETTY_STATE_ERROR; + +query_jetty_fd: + if (!udma_query_jetty_fd(dev, sq)) + return false; + + udelay(UDMA_DESTROY_JETTY_DELAY_TIME); + + return true; +} + +int udma_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue *sq) +{ + int ret; + + if (!udma_destroy_jetty_precondition(dev, sq)) + return -EFAULT; + + if (sq->state != UBCORE_JETTY_STATE_RESET) { + ret = udma_destroy_hw_jetty_ctx(dev, sq->id); + if (ret) { + dev_err(dev->dev, "jetty destroyed failed, id: %u.\n", + sq->id); + return ret; + } + } + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index fb70231278b7..63d7073b8631 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -15,6 +15,7 @@ #define SQE_VA_H_VALID_BIT GENMASK(31, 0) #define JETTY_CTX_JFRN_H_OFFSET 12 #define AVAIL_SGMT_OST_INIT 512 +#define UDMA_JFS_MASK_OFFSET 128 #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) @@ -220,6 +221,9 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); +int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); +int udma_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue *sq); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 002636a03c21..cb00cec5ccfd 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -355,3 +355,57 @@ struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, kfree(jfs); return NULL; } + +static void udma_free_jfs(struct ubcore_jfs *jfs) +{ + struct udma_dev *dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *ujfs = to_udma_jfs(jfs); + + xa_erase(&dev->jetty_table.xa, ujfs->sq.id); + + if (refcount_dec_and_test(&ujfs->ae_refcount)) + complete(&ujfs->ae_comp); + wait_for_completion(&ujfs->ae_comp); + + if (dfx_switch) + udma_dfx_delete_id(dev, &dev->dfx_info->jfs, jfs->jfs_id.id); + + if (ujfs->mode == UDMA_NORMAL_JFS_TYPE) + udma_free_sq_buf(dev, &ujfs->sq); + else + kfree(ujfs->sq.wrid); + + if (ujfs->sq.id < dev->caps.jetty.start_idx) + udma_id_free(&dev->rsvd_jetty_ida_table, ujfs->sq.id); + else + udma_adv_id_free(&dev->jetty_table.bitmap_table, + ujfs->sq.id, false); + + kfree(ujfs); +} + +int udma_destroy_jfs(struct ubcore_jfs *jfs) +{ + struct udma_dev *dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *ujfs = to_udma_jfs(jfs); + int ret; + + if (!ujfs->ue_rx_closed && udma_close_ue_rx(dev, true, true, false, 0)) { + dev_err(dev->dev, "close ue rx failed when destroying jfs.\n"); + return -EINVAL; + } + + ret = udma_modify_and_destroy_jetty(dev, &ujfs->sq); + if (ret) { + dev_info(dev->dev, "udma modify error and destroy jfs failed, id: %u.\n", + jfs->jfs_id.id); + if (!ujfs->ue_rx_closed) + udma_open_ue_rx(dev, true, true, false, 0); + return ret; + } + + udma_free_jfs(jfs); + udma_open_ue_rx(dev, true, true, false, 0); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 425c87400fec..ed1ff16e4573 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -47,6 +47,7 @@ static inline uint32_t sq_cal_wqebb_num(uint32_t sqe_ctl_len, uint32_t sge_num) struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jfs(struct ubcore_jfs *jfs); int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct udma_create_jetty_ucmd *ucmd); int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index bf1ffe10367f..2d0b1b0f7332 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -176,6 +176,7 @@ static struct ubcore_ops g_dev_ops = { .unimport_seg = udma_unimport_seg, .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, + .destroy_jfs = udma_destroy_jfs, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; -- Gitee From d06a4bcf95bb26d20ad18e286bd5ba42c63d3fbb Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 14:49:06 +0800 Subject: [PATCH 070/243] ub: udma: Support create jfr. commit 4d99b1a2909140212152b956ae8969405ba3bb30 openEuler This patch adds the ability to create jfr, During the creation process, driver will create jfr context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_jfr.c | 447 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 63 ++++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 515 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_jfr.c diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 7b1c82ab51dd..8eddc0984ac7 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -3,6 +3,6 @@ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o \ - udma_dfx.o udma_jfs.o udma_jetty.o + udma_dfx.o udma_jfs.o udma_jetty.o udma_jfr.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index bc6ad5c509fd..7656124f875a 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -19,6 +19,8 @@ extern bool dump_aux_info; #define WQE_BB_SIZE_SHIFT 6 +#define UDMA_CTX_NUM 2 + #define MAX_JETTY_IN_JETTY_GRP 32 #define UDMA_USER_DATA_H_OFFSET 32U diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c new file mode 100644 index 000000000000..9783907f4602 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include "udma_cmd.h" +#include +#include "udma_jetty.h" +#include "udma_common.h" +#include "udma_db.h" +#include "udma_jfc.h" +#include "udma_jfr.h" + +const char *state_str[] = { + "RESET", + "READY", + "ERROR", + "INVALID" +}; + +static int udma_verify_jfr_param(struct udma_dev *dev, + struct ubcore_jfr_cfg *cfg) +{ + if (!cfg->max_sge || !cfg->depth || cfg->depth > dev->caps.jfr.depth || + cfg->max_sge > dev->caps.jfr_sge) { + dev_err(dev->dev, "Invalid jfr param, depth = %u, max_sge = %u.\n", + cfg->depth, cfg->max_sge); + return -EINVAL; + } + + if (cfg->flag.bs.token_policy > UBCORE_TOKEN_PLAIN_TEXT) { + dev_err(dev->dev, "jfr key policy = %d is not supported now.\n", + cfg->flag.bs.token_policy); + return -EINVAL; + } + + return 0; +} + +static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) +{ + uint32_t rqe_buf_size; + uint32_t idx_buf_size; + uint32_t sge_per_wqe; + int ret; + + sge_per_wqe = min(jfr->max_sge, dev->caps.jfr_sge); + jfr->rq.buf.entry_size = UDMA_SGE_SIZE * sge_per_wqe; + jfr->rq.buf.entry_cnt = jfr->wqe_cnt; + rqe_buf_size = jfr->rq.buf.entry_size * jfr->rq.buf.entry_cnt; + + ret = udma_k_alloc_buf(dev, rqe_buf_size, &jfr->rq.buf); + if (ret) { + dev_err(dev->dev, + "failed to alloc rq buffer for jfr when buffer size = %u.\n", + rqe_buf_size); + return ret; + } + + jfr->idx_que.buf.entry_size = UDMA_IDX_QUE_ENTRY_SZ; + jfr->idx_que.buf.entry_cnt = jfr->wqe_cnt; + idx_buf_size = jfr->idx_que.buf.entry_size * jfr->idx_que.buf.entry_cnt; + + ret = udma_k_alloc_buf(dev, idx_buf_size, &jfr->idx_que.buf); + if (ret) { + dev_err(dev->dev, + "failed to alloc idx que buffer for jfr when buffer size = %u.\n", + idx_buf_size); + goto err_idx_que; + } + + jfr->rq.wrid = kcalloc(1, jfr->rq.buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); + if (!jfr->rq.wrid) + goto err_wrid; + + jfr->jetty_addr = (uintptr_t)&jfr->rq; + + if (udma_alloc_sw_db(dev, &jfr->sw_db, UDMA_JFR_TYPE_DB)) { + dev_err(dev->dev, "failed to alloc sw db for jfr(%u).\n", jfr->rq.id); + goto err_alloc_db; + } + + udma_init_udma_table(&jfr->idx_que.jfr_idx_table, jfr->idx_que.buf.entry_cnt - 1, 0); + + jfr->rq.tid = dev->tid; + + return 0; + +err_alloc_db: + kfree(jfr->rq.wrid); +err_wrid: + udma_k_free_buf(dev, idx_buf_size, &jfr->idx_que.buf); +err_idx_que: + udma_k_free_buf(dev, rqe_buf_size, &jfr->rq.buf); + + return -ENOMEM; +} + +static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + unsigned long byte; + int ret; + + if (!udata->udrv_data) { + dev_err(dev->dev, "jfr udata udrv_data is null.\n"); + return -EINVAL; + } + + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + dev_err(dev->dev, "jfr in_len %u or addr is invalid.\n", + udata->udrv_data->in_len); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + sizeof(*ucmd)); + if (byte) { + dev_err(dev->dev, + "failed to copy jfr udata, byte = %lu.\n", byte); + return -EFAULT; + } + + if (!ucmd->non_pin) { + ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfr->rq.buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr rqe buf addr, ret = %d.\n", ret); + return ret; + } + + ret = pin_queue_addr(dev, ucmd->idx_addr, ucmd->idx_len, + &jfr->idx_que.buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr idx que addr, ret = %d.\n", ret); + goto err_pin_idx_buf; + } + } else { + jfr->rq.buf.addr = ucmd->buf_addr; + jfr->idx_que.buf.addr = ucmd->idx_addr; + } + + jfr->udma_ctx = to_udma_context(udata->uctx); + jfr->sw_db.db_addr = ucmd->db_addr; + jfr->jfr_sleep_buf.db_addr = ucmd->jfr_sleep_buf; + + if (!ucmd->non_pin) { + ret = udma_pin_sw_db(jfr->udma_ctx, &jfr->sw_db); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr sw db addr, ret = %d.\n", ret); + goto err_pin_sw_db; + } + + ret = udma_pin_sw_db(jfr->udma_ctx, &jfr->jfr_sleep_buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr sleep time buf, ret = %d.\n", ret); + goto err_pin_jfr_sleep_buf; + } + } + + jfr->jetty_addr = ucmd->jetty_addr; + jfr->rq.tid = jfr->udma_ctx->tid; + + return ret; + +err_pin_jfr_sleep_buf: + udma_unpin_sw_db(jfr->udma_ctx, &jfr->sw_db); +err_pin_sw_db: + unpin_queue_addr(jfr->idx_que.buf.umem); +err_pin_idx_buf: + unpin_queue_addr(jfr->rq.buf.umem); + return ret; +} + +static int udma_get_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_udata *udata) +{ + struct udma_create_jetty_ucmd ucmd = {}; + + if (udata == NULL) + return udma_get_k_jfr_buf(dev, jfr); + else + return udma_get_u_jfr_buf(dev, jfr, udata, &ucmd); +} + +static void udma_put_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) +{ + uint32_t size; + + if (!jfr->rq.buf.kva && !jfr->idx_que.buf.kva && + jfr->sw_db.page && jfr->jfr_sleep_buf.page) { + udma_unpin_sw_db(jfr->udma_ctx, &jfr->jfr_sleep_buf); + udma_unpin_sw_db(jfr->udma_ctx, &jfr->sw_db); + unpin_queue_addr(jfr->idx_que.buf.umem); + unpin_queue_addr(jfr->rq.buf.umem); + return; + } + + if (jfr->rq.buf.kva) { + size = jfr->rq.buf.entry_cnt * jfr->rq.buf.entry_size; + udma_k_free_buf(dev, size, &jfr->rq.buf); + udma_free_sw_db(dev, &jfr->sw_db); + } + + if (jfr->idx_que.buf.kva) { + size = jfr->idx_que.buf.entry_cnt * jfr->idx_que.buf.entry_size; + udma_k_free_buf(dev, size, &jfr->idx_que.buf); + udma_destroy_udma_table(dev, &jfr->idx_que.jfr_idx_table, "JFR_IDX"); + } + + kfree(jfr->rq.wrid); +} + +static enum udma_rx_limit_wl to_udma_limit_wl(uint32_t rx_threshold) +{ + if (rx_threshold >= LIMIT_WL_4096_V) + return UDMA_RX_LIMIT_WL_4096; + if (rx_threshold >= LIMIT_WL_512_V) + return UDMA_RX_LIMIT_WL_512; + if (rx_threshold >= LIMIT_WL_64_V) + return UDMA_RX_LIMIT_WL_64; + + return UDMA_RX_LIMIT_WL_0; +} + +static void udma_init_jfrc(struct udma_dev *dev, struct ubcore_jfr_cfg *cfg, + struct udma_jfr *jfr, void *mb_buf, + uint32_t rx_threshold) +{ + struct udma_jfr_ctx *ctx = (struct udma_jfr_ctx *)mb_buf; + struct udma_jfc *jfc = to_udma_jfc(cfg->jfc); + uint32_t tid = jfr->rq.tid; + uint64_t db_addr; + + db_addr = jfr->sw_db.db_addr; + + memset(ctx, 0, sizeof(struct udma_jfr_ctx) * UDMA_CTX_NUM); + ctx->state = UDMA_JFR_STATE_READY; + ctx->record_db_en = 1; + ctx->rqe_base_addr_l = (jfr->rq.buf.addr >> RQE_VA_L_PAGE_4K_OFFSET) & + (uint32_t)RQE_VA_L_VALID_BIT; + ctx->rqe_base_addr_h = (jfr->rq.buf.addr >> (uint32_t)RQE_VA_H_PAGE_4K_OFFSET) & + (uint32_t)RQE_VA_H_VALID_BIT; + ctx->idx_que_addr_l = (jfr->idx_que.buf.addr >> JFR_IDX_VA_L_PAGE_4K_OFFSET) & + (uint32_t)JFR_IDX_VA_L_VALID_BIT; + ctx->idx_que_addr_h = (jfr->idx_que.buf.addr >> (uint32_t)JFR_IDX_VA_H_PAGE_4K_OFFSET) & + (uint32_t)JFR_IDX_VA_H_VALID_BIT; + ctx->record_db_addr_l = (db_addr >> JFR_DB_VA_L_PAGE_64_OFFSET) & + (uint32_t)JFR_DB_VA_L_VALID_BIT; + ctx->record_db_addr_m = (db_addr >> (uint32_t)JFR_DB_VA_M_PAGE_64_OFFSET) & + (uint32_t)JFR_DB_VA_M_VALID_BIT; + ctx->record_db_addr_h = (db_addr >> (uint32_t)JFR_DB_VA_H_PAGE_64_OFFSET) & + (uint32_t)JFR_DB_VA_H_VALID_BIT; + ctx->rqe_token_id_l = tid & (uint32_t)RQE_TOKEN_ID_L_MASK; + ctx->rqe_token_id_h = (tid >> RQE_TOKEN_ID_H_OFFSET) & (uint32_t)RQE_TOKEN_ID_H_MASK; + ctx->jfcn_l = cfg->jfc->id & (uint32_t)JFR_JFCN_L_VALID_BIT; + ctx->jfcn_h = (cfg->jfc->id >> JFR_JFCN_H_OFFSET) & (uint32_t)JFR_JFCN_H_VALID_BIT; + if (cfg->min_rnr_timer > UDMA_RNR_MAX) { + dev_warn(dev->dev, + "min_rnr_timer is out of range, max_value(%d) is applied.\n", + UDMA_RNR_MAX); + ctx->rnr_timer = UDMA_RNR_MAX; + } else { + ctx->rnr_timer = cfg->min_rnr_timer; + } + if (cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE) + ctx->token_en = 1; + ctx->type = to_udma_type(cfg->trans_mode); + ctx->token_value = cfg->token_value.token; + ctx->user_data_l = jfr->jetty_addr; + ctx->user_data_h = jfr->jetty_addr >> UDMA_USER_DATA_H_OFFSET; + ctx->rqe_size_shift = ilog2(jfr->max_sge); + ctx->rqe_shift = ilog2(jfr->wqe_cnt); + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_JFC_INLINE)) + ctx->cqeie = jfc->inline_en; + + ctx->limit_wl = (uint32_t)to_udma_limit_wl(rx_threshold); + ctx->pld_token_id = tid & (uint32_t)JFR_PLD_TOKEN_ID_MASK; +} + +static void udma_reset_sw_k_jfr_queue(struct udma_jfr *jfr) +{ + ida_destroy(&jfr->idx_que.jfr_idx_table.ida_table.ida); + ida_init(&jfr->idx_que.jfr_idx_table.ida_table.ida); + jfr->rq.pi = 0; + jfr->rq.ci = 0; + *jfr->sw_db.db_record = 0; +} + +static int udma_hw_init_jfrc(struct udma_dev *dev, struct ubcore_jfr_cfg *cfg, + struct udma_jfr *jfr, uint32_t rx_threshold) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfr_ctx *ctx = NULL; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFRC.\n"); + return -ENOMEM; + } + + udma_init_jfrc(dev, cfg, jfr, mailbox->buf, rx_threshold); + + mbox_attr.tag = jfr->rq.id; + mbox_attr.op = UDMA_CMD_CREATE_JFR_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post mbox cmd of create JFRC, ret = %d.\n", + ret); + + if (jfr->rq.buf.kva) + udma_reset_sw_k_jfr_queue(jfr); + + ctx = (struct udma_jfr_ctx *)mailbox->buf; + ctx->token_value = 0; + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static void set_jfr_param(struct udma_jfr *jfr, struct ubcore_jfr_cfg *cfg) +{ + if (cfg->depth < UDMA_MIN_JFR_DEPTH) + jfr->wqe_cnt = UDMA_MIN_JFR_DEPTH; + else + jfr->wqe_cnt = roundup_pow_of_two(cfg->depth); + + jfr->ubcore_jfr.jfr_id.id = jfr->rq.id; + jfr->ubcore_jfr.jfr_cfg = *cfg; + jfr->max_sge = roundup_pow_of_two(cfg->max_sge); + jfr->ubcore_jfr.jfr_cfg.max_sge = jfr->max_sge; + jfr->ubcore_jfr.jfr_cfg.depth = jfr->wqe_cnt; + jfr->state = UBCORE_JFR_STATE_READY; + + if (!cfg->flag.bs.lock_free) + spin_lock_init(&jfr->lock); +} + +static int udma_alloc_jfr_id(struct udma_dev *udma_dev, uint32_t cfg_id, uint32_t *idx) +{ + struct udma_ida *ida_table = &udma_dev->jfr_table.ida_table; + uint32_t min; + uint32_t max; + int id; + + if (cfg_id && (cfg_id < ida_table->min || cfg_id > ida_table->max)) { + dev_err(udma_dev->dev, + "user specify id %u error, min %u max %u.\n", + cfg_id, ida_table->min, ida_table->max); + return -EINVAL; + } + + spin_lock(&ida_table->lock); + min = cfg_id ? cfg_id : ida_table->next; + max = cfg_id ? cfg_id : ida_table->max; + id = ida_alloc_range(&ida_table->ida, min, max, GFP_ATOMIC); + if (id < 0) { + if (!cfg_id) + id = ida_alloc_range(&ida_table->ida, min = ida_table->min, + max, GFP_ATOMIC); + if (id < 0) { + dev_err(udma_dev->dev, + "alloc jfr id range (%u - %u) failed, ret = %d.\n", + min, max, id); + spin_unlock(&ida_table->lock); + + return id; + } + } + + *idx = (uint32_t)id; + + if (!cfg_id) + ida_table->next = (uint32_t)id + 1 > ida_table->max ? + ida_table->min : (uint32_t)id + 1; + spin_unlock(&ida_table->lock); + + return 0; +} + +struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, + struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + struct udma_jfr *udma_jfr; + int ret; + + ret = udma_verify_jfr_param(udma_dev, cfg); + if (ret) { + dev_err(udma_dev->dev, "verify jfr param failed.\n"); + return NULL; + } + + udma_jfr = kzalloc(sizeof(*udma_jfr), GFP_KERNEL); + if (!udma_jfr) + return NULL; + + ret = udma_alloc_jfr_id(udma_dev, cfg->id, &udma_jfr->rq.id); + if (ret) + goto err_alloc_jfr_id; + + set_jfr_param(udma_jfr, cfg); + + ret = udma_get_jfr_buf(udma_dev, udma_jfr, udata); + if (ret) + goto err_get_jfr_buf; + + ret = xa_err(xa_store(&udma_dev->jfr_table.xa, udma_jfr->rq.id, + udma_jfr, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, "store jfr to jfr_table failed.\n"); + goto err_xa_store; + } + + ret = udma_hw_init_jfrc(udma_dev, cfg, udma_jfr, 0); + if (ret) { + dev_err(udma_dev->dev, "failed to init JFRC, ret = %d.\n", ret); + goto err_hw_init_jfrc; + } + + refcount_set(&udma_jfr->ae_refcount, 1); + init_completion(&udma_jfr->ae_comp); + + if (dfx_switch) + udma_dfx_store_id(udma_dev, &udma_dev->dfx_info->jfr, udma_jfr->rq.id, "jfr"); + + return &udma_jfr->ubcore_jfr; + +err_hw_init_jfrc: + xa_erase(&udma_dev->jfr_table.xa, udma_jfr->rq.id); +err_xa_store: + udma_put_jfr_buf(udma_dev, udma_jfr); +err_get_jfr_buf: + udma_id_free(&udma_dev->jfr_table.ida_table, udma_jfr->rq.id); +err_alloc_jfr_id: + kfree(udma_jfr); + return NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index bffb68b3cdbd..bd29f9c4a526 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -8,7 +8,67 @@ #include "udma_ctx.h" #include "udma_common.h" +#define RQE_VA_L_PAGE_4K_OFFSET 12U +#define RQE_VA_L_VALID_BIT GENMASK(19, 0) +#define RQE_VA_H_OFFSET 20 +#define RQE_VA_H_PAGE_4K_OFFSET (RQE_VA_H_OFFSET + RQE_VA_L_PAGE_4K_OFFSET) +#define RQE_VA_H_VALID_BIT GENMASK(31, 0) + +#define RQE_TOKEN_ID_L_MASK GENMASK(13, 0) +#define RQE_TOKEN_ID_H_OFFSET 14U +#define RQE_TOKEN_ID_H_MASK GENMASK(5, 0) + +#define JFR_IDX_VA_L_PAGE_4K_OFFSET 12U +#define JFR_IDX_VA_L_VALID_BIT GENMASK(31, 0) +#define JFR_IDX_VA_H_OFFSET 32 +#define JFR_IDX_VA_H_PAGE_4K_OFFSET \ + (JFR_IDX_VA_H_OFFSET + JFR_IDX_VA_L_PAGE_4K_OFFSET) +#define JFR_IDX_VA_H_VALID_BIT GENMASK(19, 0) + +#define JFR_DB_VA_L_PAGE_64_OFFSET 6U +#define JFR_DB_VA_L_VALID_BIT GENMASK(23, 0) +#define JFR_DB_VA_M_OFFSET 24 +#define JFR_DB_VA_M_PAGE_64_OFFSET \ + (JFR_DB_VA_M_OFFSET + JFR_DB_VA_L_PAGE_64_OFFSET) +#define JFR_DB_VA_M_VALID_BIT GENMASK(31, 0) +#define JFR_DB_VA_H_OFFSET 32 +#define JFR_DB_VA_H_PAGE_64_OFFSET \ + (JFR_DB_VA_H_OFFSET + JFR_DB_VA_M_PAGE_64_OFFSET) +#define JFR_DB_VA_H_VALID_BIT GENMASK(1, 0) + +#define JFR_JFCN_L_VALID_BIT GENMASK(11, 0) #define JFR_JFCN_H_OFFSET 12U +#define JFR_JFCN_H_VALID_BIT GENMASK(7, 0) + +#define UDMA_JFR_DB_PI_M GENMASK(15, 0) + +#define JFR_PLD_TOKEN_ID_MASK GENMASK(19, 0) + +#define UDMA_MIN_JFR_DEPTH 64 +#define UDMA_SGE_SIZE 16U +#define UDMA_IDX_QUE_ENTRY_SZ 4 +#define UDMA_RNR_MAX 19 + +enum jfr_state { + UDMA_JFR_STATE_RESET = 0, + UDMA_JFR_STATE_READY, + UDMA_JFR_STATE_ERROR, + JFR_STATE_NUM, +}; + +enum udma_rx_limit_wl { + UDMA_RX_LIMIT_WL_0 = 0, + UDMA_RX_LIMIT_WL_64, + UDMA_RX_LIMIT_WL_512, + UDMA_RX_LIMIT_WL_4096 +}; + +enum { + LIMIT_WL_0_V = 0, + LIMIT_WL_64_V = 64, + LIMIT_WL_512_V = 512, + LIMIT_WL_4096_V = 4096 +}; struct udma_jfr_idx_que { struct udma_buf buf; @@ -92,4 +152,7 @@ static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfr, rq); } +struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata); + #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2d0b1b0f7332..2b6cbcb0bd20 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -20,6 +20,7 @@ #include "udma_eq.h" #include "udma_segment.h" #include "udma_jfs.h" +#include "udma_jfr.h" #include "udma_cmd.h" #include "udma_ctx.h" #include "udma_rct.h" @@ -177,6 +178,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, + .create_jfr = udma_create_jfr, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; -- Gitee From d57de0ac9721a69f6540f75400e407aeb7fe03c1 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 15:22:55 +0800 Subject: [PATCH 071/243] ub: udma: Support destroy jfr. commit c9ab1d26bebe7b020b7b0f938cd76b8157133f92 openEuler This patch adds the ability to destroy jfr, During the destruction process, driver will destroy jfr context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_jfr.c | 205 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 5 + drivers/ub/urma/hw/udma/udma_main.c | 6 + 4 files changed, 217 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 7656124f875a..9f2a20eea557 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -12,6 +12,7 @@ #include extern bool dfx_switch; +extern uint32_t jfr_sleep_time; extern uint32_t jfc_arm_mode; extern bool dump_aux_info; diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 9783907f4602..f15ca6b26d42 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -445,3 +445,208 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, kfree(udma_jfr); return NULL; } + +static int modify_jfr_context(struct udma_dev *dev, uint32_t jfrn, + bool state_flag, bool rx_threshold_flag, + struct ubcore_jfr_attr *attr) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct udma_jfr_ctx *ctx, *ctx_mask; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFRC.\n"); + return -EINVAL; + } + + ctx = (struct udma_jfr_ctx *)mailbox->buf; + ctx_mask = ctx + 1; + memset(ctx_mask, 0xff, sizeof(struct udma_jfr_ctx)); + if (state_flag) { + ctx->state = attr->state; + ctx_mask->state = 0; + } + + if (rx_threshold_flag) { + ctx->limit_wl = (uint32_t)to_udma_limit_wl(attr->rx_threshold); + ctx_mask->limit_wl = 0; + } + + mbox_attr.tag = jfrn; + mbox_attr.op = UDMA_CMD_MODIFY_JFR_CONTEXT; + + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post mbox cmd of modify JFRC, ret = %d.\n", ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_modify_jfr_to_error(struct ubcore_jfr *jfr, bool *need_sleep) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + struct ubcore_jfr_attr attr; + int ret = 0; + + if (udma_jfr->state == UBCORE_JFR_STATE_READY) { + attr.state = UBCORE_JFR_STATE_ERROR; + attr.mask = UBCORE_JFR_STATE; + ret = modify_jfr_context(udma_dev, udma_jfr->rq.id, true, false, &attr); + if (ret) { + dev_err(udma_dev->dev, "failed to modify jfr state to error, id: %u.\n", + udma_jfr->rq.id); + return ret; + } + + udma_jfr->state = UBCORE_JFR_STATE_ERROR; + + *need_sleep = true; + } + + return ret; +} + +static int udma_modify_jfr_to_reset(struct ubcore_jfr *jfr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + struct ubase_mbx_attr mbox_attr = {}; + int ret = 0; + + if (udma_jfr->state != UBCORE_JFR_STATE_RESET) { + mbox_attr.tag = udma_jfr->rq.id; + mbox_attr.op = UDMA_CMD_DESTROY_JFR_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, NULL, 0, &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, "failed to post jfr destroy cmd, id: %u.\n", + udma_jfr->rq.id); + return ret; + } + + udma_jfr->state = UBCORE_JFR_STATE_RESET; + } + + return ret; +} + +static int udma_modify_and_del_jfr(struct udma_dev *udma_dev, struct udma_jfr *udma_jfr) +{ + bool large_payload = false; + bool need_sleep = false; + uint32_t sleep_time = 0; + int ret = 0; + + ret = udma_modify_jfr_to_error(&udma_jfr->ubcore_jfr, &need_sleep); + if (ret) + return ret; + if (!udma_jfr->rq.buf.kva && udma_jfr->jfr_sleep_buf.page) + large_payload = !!(*(bool *)udma_jfr->jfr_sleep_buf.virt_addr); + if (need_sleep) { + sleep_time = large_payload ? jfr_sleep_time : UDMA_DEF_JFR_SLEEP_TIME; + dev_info_ratelimited(udma_dev->dev, "jfr sleep time = %u us.\n", sleep_time); + usleep_range(sleep_time, sleep_time + UDMA_SLEEP_DELAY_TIME); + } + + return udma_modify_jfr_to_reset(&udma_jfr->ubcore_jfr); +} + +static void udma_free_jfr(struct ubcore_jfr *jfr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + + if (dfx_switch) + udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jfr, udma_jfr->rq.id); + + xa_erase(&udma_dev->jfr_table.xa, udma_jfr->rq.id); + + if (refcount_dec_and_test(&udma_jfr->ae_refcount)) + complete(&udma_jfr->ae_comp); + wait_for_completion(&udma_jfr->ae_comp); + + udma_put_jfr_buf(udma_dev, udma_jfr); + udma_id_free(&udma_dev->jfr_table.ida_table, udma_jfr->rq.id); + jfr->jfr_cfg.token_value.token = 0; + kfree(udma_jfr); +} + +int udma_destroy_jfr(struct ubcore_jfr *jfr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + int ret; + + ret = udma_modify_and_del_jfr(udma_dev, udma_jfr); + if (ret) { + dev_err(udma_dev->dev, + "failed to modify and delete jfr, id: %u, ret = %d.\n", + udma_jfr->rq.id, ret); + return ret; + } + + udma_free_jfr(jfr); + + return 0; +} + +int udma_destroy_jfr_batch(struct ubcore_jfr **jfr, int jfr_cnt, int *bad_jfr_index) +{ + bool large_payload = false; + struct udma_dev *udma_dev; + struct udma_jfr *udma_jfr; + bool need_sleep = false; + uint32_t sleep_time = 0; + uint32_t i; + int ret; + + if (!jfr) { + pr_info("jfr array is null.\n"); + return -EINVAL; + } + + if (!jfr_cnt) { + pr_info("jfr cnt is 0.\n"); + return -EINVAL; + } + + udma_dev = to_udma_dev(jfr[0]->ub_dev); + + for (i = 0; i < jfr_cnt; i++) { + ret = udma_modify_jfr_to_error(jfr[i], &need_sleep); + if (ret) { + *bad_jfr_index = 0; + return ret; + } + + if (unlikely(large_payload)) + continue; + udma_jfr = to_udma_jfr(jfr[i]); + if (!udma_jfr->rq.buf.kva && udma_jfr->jfr_sleep_buf.page) + large_payload = !!(*(bool *)udma_jfr->jfr_sleep_buf.virt_addr); + } + + if (need_sleep) { + sleep_time = large_payload ? jfr_sleep_time : UDMA_DEF_JFR_SLEEP_TIME; + dev_info(udma_dev->dev, "jfr sleep time = %u us.\n", sleep_time); + usleep_range(sleep_time, sleep_time + UDMA_SLEEP_DELAY_TIME); + } + + for (i = 0; i < jfr_cnt; i++) { + ret = udma_modify_jfr_to_reset(jfr[i]); + if (ret) { + *bad_jfr_index = 0; + return ret; + } + } + + for (i = 0; i < jfr_cnt; i++) + udma_free_jfr(jfr[i]); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index bd29f9c4a526..43ee96cea746 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -49,6 +49,9 @@ #define UDMA_IDX_QUE_ENTRY_SZ 4 #define UDMA_RNR_MAX 19 +#define UDMA_DEF_JFR_SLEEP_TIME 1000 +#define UDMA_SLEEP_DELAY_TIME 10 + enum jfr_state { UDMA_JFR_STATE_RESET = 0, UDMA_JFR_STATE_READY, @@ -154,5 +157,7 @@ static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *q struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jfr(struct ubcore_jfr *jfr); +int udma_destroy_jfr_batch(struct ubcore_jfr **jfr_arr, int jfr_num, int *bad_jfr_index); #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2b6cbcb0bd20..c5911b027ba6 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -33,6 +33,7 @@ bool is_rmmod; static DEFINE_MUTEX(udma_reset_mutex); +uint32_t jfr_sleep_time = 1000; uint32_t jfc_arm_mode; bool dump_aux_info; @@ -179,6 +180,8 @@ static struct ubcore_ops g_dev_ops = { .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, .create_jfr = udma_create_jfr, + .destroy_jfr = udma_destroy_jfr, + .destroy_jfr_batch = udma_destroy_jfr_batch, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; @@ -1092,6 +1095,9 @@ module_init(udma_init); module_exit(udma_exit); MODULE_LICENSE("GPL"); +module_param(jfr_sleep_time, uint, 0444); +MODULE_PARM_DESC(jfr_sleep_time, "Set the destroy jfr sleep time, default: 1000 us.\n"); + module_param(jfc_arm_mode, uint, 0444); MODULE_PARM_DESC(jfc_arm_mode, "Set the ARM mode of the JFC, default: 0(0:Always ARM, other: NO ARM."); -- Gitee From 8ea6b97064206a1bbc64376946405f76e54f6723 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 15:51:47 +0800 Subject: [PATCH 072/243] ub: udma: Support create jfc. commit 14ca6ade755ce2333f8f9c1524bdd24ff5577c8b openEuler This patch adds the ability to create jfc. During the creation process, driver will create jfc context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 7 + drivers/ub/urma/hw/udma/udma_jfc.c | 381 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 36 +++ drivers/ub/urma/hw/udma/udma_main.c | 6 + 4 files changed, 430 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 9f2a20eea557..d9b10ab28028 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -12,6 +12,7 @@ #include extern bool dfx_switch; +extern bool cqe_mode; extern uint32_t jfr_sleep_time; extern uint32_t jfc_arm_mode; extern bool dump_aux_info; @@ -85,6 +86,11 @@ struct udma_mailbox_cmd { struct rw_semaphore udma_mb_rwsem; }; +struct udma_ex_jfc_addr { + uint64_t cq_addr; + uint32_t cq_len; +}; + struct udma_dev { struct ubase_adev_com comdev; struct ubcore_device ub_dev; @@ -124,6 +130,7 @@ struct udma_dev { uint32_t status; struct udma_dev_debugfs *dbgfs; uint32_t ue_num; + struct udma_ex_jfc_addr cq_addr_array[UDMA_JFC_TYPE_NUM]; uint32_t ue_id; struct page *db_page; u8 udma_tp_sl_num; diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index ee223bb923f6..9d86d9003593 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -18,6 +18,387 @@ #include #include "udma_jfc.h" +static void udma_construct_jfc_ctx(struct udma_dev *dev, + struct udma_jfc *jfc, + struct udma_jfc_ctx *ctx) +{ + memset(ctx, 0, sizeof(struct udma_jfc_ctx)); + + ctx->state = UDMA_JFC_STATE_VALID; + if (jfc_arm_mode) + ctx->arm_st = UDMA_CTX_NO_ARMED; + else + ctx->arm_st = UDMA_CTX_ALWAYS_ARMED; + ctx->shift = jfc->cq_shift - UDMA_JFC_DEPTH_SHIFT_BASE; + ctx->jfc_type = UDMA_NORMAL_JFC_TYPE; + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_JFC_INLINE)) + ctx->inline_en = jfc->inline_en; + ctx->cqe_va_l = jfc->buf.addr >> CQE_VA_L_OFFSET; + ctx->cqe_va_h = jfc->buf.addr >> CQE_VA_H_OFFSET; + ctx->cqe_token_id = jfc->tid; + + if (cqe_mode) + ctx->cq_cnt_mode = UDMA_CQE_CNT_MODE_BY_CI_PI_GAP; + else + ctx->cq_cnt_mode = UDMA_CQE_CNT_MODE_BY_COUNT; + + ctx->ceqn = jfc->ceqn; + if (jfc->stars_en) { + ctx->stars_en = UDMA_STARS_SWITCH; + ctx->record_db_en = UDMA_NO_RECORD_EN; + } else { + ctx->record_db_en = UDMA_RECORD_EN; + ctx->record_db_addr_l = jfc->db.db_addr >> UDMA_DB_L_OFFSET; + ctx->record_db_addr_h = jfc->db.db_addr >> UDMA_DB_H_OFFSET; + } +} + +void udma_init_jfc_param(struct ubcore_jfc_cfg *cfg, + struct udma_jfc *jfc) +{ + jfc->base.id = jfc->jfcn; + jfc->base.jfc_cfg = *cfg; + jfc->ceqn = cfg->ceqn; + jfc->lock_free = cfg->flag.bs.lock_free; + jfc->inline_en = cfg->flag.bs.jfc_inline; + jfc->cq_shift = ilog2(jfc->buf.entry_cnt); +} + +int udma_check_jfc_cfg(struct udma_dev *dev, struct udma_jfc *jfc, + struct ubcore_jfc_cfg *cfg) +{ + if (!jfc->buf.entry_cnt || jfc->buf.entry_cnt > dev->caps.jfc.depth) { + dev_err(dev->dev, "invalid jfc depth = %u, cap depth = %u.\n", + jfc->buf.entry_cnt, dev->caps.jfc.depth); + return -EINVAL; + } + + if (jfc->buf.entry_cnt < UDMA_JFC_DEPTH_MIN) + jfc->buf.entry_cnt = UDMA_JFC_DEPTH_MIN; + + if (cfg->ceqn >= dev->caps.comp_vector_cnt) { + dev_err(dev->dev, "invalid ceqn = %u, cap ceq cnt = %u.\n", + cfg->ceqn, dev->caps.comp_vector_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_get_cmd_from_user(struct udma_create_jfc_ucmd *ucmd, + struct udma_dev *dev, + struct ubcore_udata *udata, + struct udma_jfc *jfc) +{ +#define UDMA_JFC_CQE_SHIFT 6 + unsigned long byte; + + if (!udata->udrv_data || !udata->udrv_data->in_addr) { + dev_err(dev->dev, "jfc udrv_data or in_addr is null.\n"); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + min(udata->udrv_data->in_len, + (uint32_t)sizeof(*ucmd))); + if (byte) { + dev_err(dev->dev, + "failed to copy udata from user, byte = %lu.\n", byte); + return -EFAULT; + } + + jfc->mode = ucmd->mode; + jfc->ctx = to_udma_context(udata->uctx); + if (jfc->mode > UDMA_NORMAL_JFC_TYPE && jfc->mode < UDMA_KERNEL_STARS_JFC_TYPE) { + jfc->buf.entry_cnt = ucmd->buf_len; + return 0; + } + + jfc->db.db_addr = ucmd->db_addr; + jfc->buf.entry_cnt = ucmd->buf_len >> UDMA_JFC_CQE_SHIFT; + + return 0; +} + +static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *ucmd, + struct ubcore_udata *udata, struct udma_jfc *jfc) +{ + struct udma_context *uctx; + uint32_t size; + int ret = 0; + + if (udata) { + ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfc->buf); + if (ret) { + dev_err(dev->dev, "failed to pin queue for jfc, ret = %d.\n", ret); + return ret; + } + uctx = to_udma_context(udata->uctx); + jfc->tid = uctx->tid; + ret = udma_pin_sw_db(uctx, &jfc->db); + if (ret) { + dev_err(dev->dev, "failed to pin sw db for jfc, ret = %d.\n", ret); + unpin_queue_addr(jfc->buf.umem); + } + + return ret; + } + + if (!jfc->lock_free) + spin_lock_init(&jfc->lock); + jfc->buf.entry_size = dev->caps.cqe_size; + jfc->tid = dev->tid; + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + + ret = udma_k_alloc_buf(dev, size, &jfc->buf); + if (ret) { + dev_err(dev->dev, "failed to alloc buffer for jfc.\n"); + return ret; + } + + ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); + if (ret) { + dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); + udma_k_free_buf(dev, size, &jfc->buf); + return -ENOMEM; + } + + return ret; +} + +static void udma_free_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) +{ + struct udma_context *uctx; + uint32_t size; + + if (jfc->buf.kva) { + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + udma_k_free_buf(dev, size, &jfc->buf); + } else if (jfc->buf.umem) { + uctx = to_udma_context(jfc->base.uctx); + unpin_queue_addr(jfc->buf.umem); + } + + if (jfc->db.page) { + uctx = to_udma_context(jfc->base.uctx); + udma_unpin_sw_db(uctx, &jfc->db); + } else if (jfc->db.kpage) { + udma_free_sw_db(dev, &jfc->db); + } +} + +int udma_post_create_jfc_mbox(struct udma_dev *dev, struct udma_jfc *jfc) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFCC.\n"); + return -ENOMEM; + } + + if (jfc->mode == UDMA_STARS_JFC_TYPE || jfc->mode == UDMA_CCU_JFC_TYPE || + jfc->mode == UDMA_KERNEL_STARS_JFC_TYPE) + jfc->stars_en = true; + udma_construct_jfc_ctx(dev, jfc, (struct udma_jfc_ctx *)mailbox->buf); + + mbox_attr.tag = jfc->jfcn; + mbox_attr.op = UDMA_CMD_CREATE_JFC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post create JFC mailbox, ret = %d.\n", ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_verify_stars_jfc_param(struct udma_dev *dev, + struct udma_ex_jfc_addr *jfc_addr, + struct udma_jfc *jfc) +{ + uint32_t size; + + if (!jfc_addr->cq_addr) { + dev_err(dev->dev, "CQE addr is wrong.\n"); + return -ENOMEM; + } + if (!jfc_addr->cq_len) { + dev_err(dev->dev, "CQE len is wrong.\n"); + return -EINVAL; + } + + size = jfc->buf.entry_cnt * dev->caps.cqe_size; + + if (size != jfc_addr->cq_len) { + dev_err(dev->dev, "cqe buff size is wrong, buf size = %u.\n", size); + return -EINVAL; + } + + return 0; +} + +static int udma_get_stars_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) +{ + struct udma_ex_jfc_addr *jfc_addr = &dev->cq_addr_array[jfc->mode]; + int ret; + + jfc->tid = dev->tid; + + ret = udma_verify_stars_jfc_param(dev, jfc_addr, jfc); + if (ret) + return ret; + + jfc->buf.addr = (dma_addr_t)(uintptr_t)jfc_addr->cq_addr; + + ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); + if (ret) { + dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); + return -ENOMEM; + } + + return ret; +} + +static int udma_create_stars_jfc(struct udma_dev *dev, + struct udma_jfc *jfc, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata, + struct udma_create_jfc_ucmd *ucmd) +{ + unsigned long flags_store; + unsigned long flags_erase; + int ret; + + ret = udma_id_alloc_auto_grow(dev, &dev->jfc_table.ida_table, &jfc->jfcn); + if (ret) { + dev_err(dev->dev, "failed to alloc id for stars JFC.\n"); + return -ENOMEM; + } + + udma_init_jfc_param(cfg, jfc); + xa_lock_irqsave(&dev->jfc_table.xa, flags_store); + ret = xa_err(__xa_store(&dev->jfc_table.xa, jfc->jfcn, jfc, GFP_ATOMIC)); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_store); + if (ret) { + dev_err(dev->dev, + "failed to stored stars jfc id to jfc_table, jfcn: %u.\n", + jfc->jfcn); + goto err_store_jfcn; + } + + ret = udma_get_stars_jfc_buf(dev, jfc); + if (ret) + goto err_alloc_cqc; + + ret = udma_post_create_jfc_mbox(dev, jfc); + if (ret) + goto err_get_jfc_buf; + + refcount_set(&jfc->event_refcount, 1); + init_completion(&jfc->event_comp); + + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->jfc, jfc->jfcn, "jfc"); + + return 0; + +err_get_jfc_buf: + udma_free_sw_db(dev, &jfc->db); +err_alloc_cqc: + xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); + __xa_erase(&dev->jfc_table.xa, jfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_erase); +err_store_jfcn: + udma_id_free(&dev->jfc_table.ida_table, jfc->jfcn); + + return -ENOMEM; +} + +struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + struct udma_create_jfc_ucmd ucmd = {}; + unsigned long flags_store; + unsigned long flags_erase; + struct udma_jfc *jfc; + int ret; + + jfc = kzalloc(sizeof(struct udma_jfc), GFP_KERNEL); + if (!jfc) + return NULL; + + if (udata) { + ret = udma_get_cmd_from_user(&ucmd, dev, udata, jfc); + if (ret) + goto err_get_cmd; + } else { + jfc->arm_sn = 1; + jfc->buf.entry_cnt = cfg->depth ? roundup_pow_of_two(cfg->depth) : cfg->depth; + } + + ret = udma_check_jfc_cfg(dev, jfc, cfg); + if (ret) + goto err_get_cmd; + + if (jfc->mode == UDMA_STARS_JFC_TYPE || jfc->mode == UDMA_CCU_JFC_TYPE) { + if (udma_create_stars_jfc(dev, jfc, cfg, udata, &ucmd)) + goto err_get_cmd; + return &jfc->base; + } + + ret = udma_id_alloc_auto_grow(dev, &dev->jfc_table.ida_table, + &jfc->jfcn); + if (ret) + goto err_get_cmd; + + udma_init_jfc_param(cfg, jfc); + + xa_lock_irqsave(&dev->jfc_table.xa, flags_store); + ret = xa_err(__xa_store(&dev->jfc_table.xa, jfc->jfcn, jfc, GFP_ATOMIC)); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_store); + if (ret) { + dev_err(dev->dev, + "failed to stored jfc id to jfc_table, jfcn: %u.\n", + jfc->jfcn); + goto err_store_jfcn; + } + + ret = udma_get_jfc_buf(dev, &ucmd, udata, jfc); + if (ret) + goto err_get_jfc_buf; + + ret = udma_post_create_jfc_mbox(dev, jfc); + if (ret) + goto err_alloc_cqc; + + refcount_set(&jfc->event_refcount, 1); + init_completion(&jfc->event_comp); + + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->jfc, jfc->jfcn, "jfc"); + + return &jfc->base; + +err_alloc_cqc: + jfc->base.uctx = (udata == NULL ? NULL : udata->uctx); + udma_free_jfc_buf(dev, jfc); +err_get_jfc_buf: + xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); + __xa_erase(&dev->jfc_table.xa, jfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_erase); +err_store_jfcn: + udma_id_free(&dev->jfc_table.ida_table, jfc->jfcn); +err_get_cmd: + kfree(jfc); + return NULL; +} + int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data) { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 8cb7271739d7..eba31242050c 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -7,10 +7,43 @@ #include "udma_dev.h" #include "udma_ctx.h" +#define UDMA_JFC_DEPTH_MIN 64 #define UDMA_JFC_DEPTH_SHIFT_BASE 6 +#define CQE_VA_L_OFFSET 12 +#define CQE_VA_H_OFFSET 32 + +#define UDMA_DB_L_OFFSET 6 +#define UDMA_DB_H_OFFSET 38 + +#define UDMA_STARS_SWITCH 1 + +enum udma_jfc_state { + UDMA_JFC_STATE_INVALID, + UDMA_JFC_STATE_VALID, + UDMA_JFC_STATE_ERROR, +}; + +enum udma_armed_jfc { + UDMA_CTX_NO_ARMED, + UDMA_CTX_ALWAYS_ARMED, + UDMA_CTX_REG_NEXT_CEQE, + UDMA_CTX_REG_NEXT_SOLICITED_CEQE, +}; + +enum udma_record_db { + UDMA_NO_RECORD_EN, + UDMA_RECORD_EN, +}; + +enum udma_cq_cnt_mode { + UDMA_CQE_CNT_MODE_BY_COUNT, + UDMA_CQE_CNT_MODE_BY_CI_PI_GAP, +}; + struct udma_jfc { struct ubcore_jfc base; + struct udma_context *ctx; uint32_t jfcn; uint32_t ceqn; uint32_t tid; @@ -103,6 +136,9 @@ static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) return container_of(jfc, struct udma_jfc, base); } +struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata); int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index c5911b027ba6..72457ae13878 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -20,6 +20,7 @@ #include "udma_eq.h" #include "udma_segment.h" #include "udma_jfs.h" +#include "udma_jfc.h" #include "udma_jfr.h" #include "udma_cmd.h" #include "udma_ctx.h" @@ -31,6 +32,7 @@ #include "udma_common.h" #include "udma_ctrlq_tp.h" +bool cqe_mode = true; bool is_rmmod; static DEFINE_MUTEX(udma_reset_mutex); uint32_t jfr_sleep_time = 1000; @@ -176,6 +178,7 @@ static struct ubcore_ops g_dev_ops = { .unregister_seg = udma_unregister_seg, .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, + .create_jfc = udma_create_jfc, .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, @@ -1095,6 +1098,9 @@ module_init(udma_init); module_exit(udma_exit); MODULE_LICENSE("GPL"); +module_param(cqe_mode, bool, 0444); +MODULE_PARM_DESC(cqe_mode, "Set cqe reporting mode, default: 1 (0:BY_COUNT, 1:BY_CI_PI_GAP)"); + module_param(jfr_sleep_time, uint, 0444); MODULE_PARM_DESC(jfr_sleep_time, "Set the destroy jfr sleep time, default: 1000 us.\n"); -- Gitee From 91945efc8531def97a3f9f6fa511841b6e6f10b4 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 16:06:10 +0800 Subject: [PATCH 073/243] ub: udma: Support destroy jfc. commit 10408c91c73de7803de5f335804d8c85c6b6ddcf openEuler This patch adds the ability to destroy jfc, During the destruction process, driver will destroy jfc context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfc.c | 105 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 107 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 9d86d9003593..bfbf479ec06f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -399,6 +399,111 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, return NULL; } +static int udma_post_destroy_jfc_mbox(struct udma_dev *dev, uint32_t jfcn) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfc_ctx *ctx; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFCC.\n"); + return -ENOMEM; + } + + ctx = (struct udma_jfc_ctx *)mailbox->buf; + + mbox_attr.tag = jfcn; + mbox_attr.op = UDMA_CMD_DESTROY_JFC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post destroy JFC mailbox, ret = %d.\n", + ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_query_jfc_destroy_done(struct udma_dev *dev, uint32_t jfcn) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfc_ctx *jfc_ctx; + int ret; + + mbox_attr.tag = jfcn; + mbox_attr.op = UDMA_CMD_QUERY_JFC_CONTEXT; + mailbox = udma_mailbox_query_ctx(dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfc_ctx = (struct udma_jfc_ctx *)mailbox->buf; + ret = jfc_ctx->pi == jfc_ctx->wr_cqe_idx ? 0 : -EAGAIN; + + jfc_ctx->cqe_token_value = 0; + jfc_ctx->remote_token_value = 0; + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_destroy_and_flush_jfc(struct udma_dev *dev, uint32_t jfcn) +{ +#define QUERY_MAX_TIMES 5 + uint32_t wait_times = 0; + int ret; + + ret = udma_post_destroy_jfc_mbox(dev, jfcn); + if (ret) { + dev_err(dev->dev, "failed to post mbox to destroy jfc, id: %u.\n", jfcn); + return ret; + } + + while (true) { + if (udma_query_jfc_destroy_done(dev, jfcn) == 0) + return 0; + if (wait_times > QUERY_MAX_TIMES) + break; + msleep(1 << wait_times); + wait_times++; + } + dev_err(dev->dev, "jfc flush timed out, id: %u.\n", jfcn); + + return -EFAULT; +} + +int udma_destroy_jfc(struct ubcore_jfc *jfc) +{ + struct udma_dev *dev = to_udma_dev(jfc->ub_dev); + struct udma_jfc *ujfc = to_udma_jfc(jfc); + unsigned long flags; + int ret; + + ret = udma_destroy_and_flush_jfc(dev, ujfc->jfcn); + if (ret) + return ret; + + xa_lock_irqsave(&dev->jfc_table.xa, flags); + __xa_erase(&dev->jfc_table.xa, ujfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags); + + if (refcount_dec_and_test(&ujfc->event_refcount)) + complete(&ujfc->event_comp); + wait_for_completion(&ujfc->event_comp); + + if (dfx_switch) + udma_dfx_delete_id(dev, &dev->dfx_info->jfc, jfc->id); + + udma_free_jfc_buf(dev, ujfc); + udma_id_free(&dev->jfc_table.ida_table, ujfc->jfcn); + kfree(ujfc); + + return 0; +} + int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data) { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index eba31242050c..21f4016a42cd 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -139,6 +139,7 @@ static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, struct ubcore_jfc_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jfc(struct ubcore_jfc *jfc); int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 72457ae13878..d1d45200b585 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -179,6 +179,7 @@ static struct ubcore_ops g_dev_ops = { .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, .create_jfc = udma_create_jfc, + .destroy_jfc = udma_destroy_jfc, .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, -- Gitee From 64f89f3883ec6c7bb34e21ce8f2ac897d380f188 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 16:35:29 +0800 Subject: [PATCH 074/243] ub: udma: Support create jetty. commit 4d0a86f5836c68cb1303313cd3b7eed1876864b2 openEuler This patch adds the ability to create jetty. During the creation process, driver will create jetty context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 300 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 304 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index b012010c0e74..63e5eae5f7b7 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -18,6 +18,144 @@ bool well_known_jetty_pgsz_check = true; +static int udma_get_user_jetty_cmd(struct udma_dev *dev, struct udma_jetty *jetty, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + struct udma_context *uctx; + unsigned long byte; + + if (!udata) { + jetty->sq.jetty_type = (enum udma_jetty_type)UDMA_URMA_NORMAL_JETTY_TYPE; + return 0; + } + + if (!udata->udrv_data) { + dev_err(dev->dev, "jetty udata udrv_data is null.\n"); + return -EINVAL; + } + + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + dev_err(dev->dev, "jetty in_len (%u) or addr is invalid.\n", + udata->udrv_data->in_len); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + sizeof(*ucmd)); + if (byte) { + dev_err(dev->dev, + "failed to copy jetty udata, byte = %lu.\n", byte); + return -EFAULT; + } + + uctx = to_udma_context(udata->uctx); + jetty->sq.tid = uctx->tid; + jetty->jetty_addr = ucmd->jetty_addr; + jetty->pi_type = ucmd->pi_type; + jetty->sq.jetty_type = (enum udma_jetty_type)ucmd->jetty_type; + jetty->sq.non_pin = ucmd->non_pin; + + return 0; +} + +static int udma_get_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, + struct ubcore_udata *udata, + struct ubcore_jetty_cfg *cfg, + struct udma_create_jetty_ucmd *ucmd) +{ + struct ubcore_jfs_cfg jfs_cfg = { + .depth = cfg->jfs_depth, + .trans_mode = cfg->trans_mode, + .priority = cfg->priority, + .max_sge = cfg->max_send_sge, + .max_rsge = cfg->max_send_rsge, + .max_inline_data = cfg->max_inline_data, + .rnr_retry = cfg->rnr_retry, + .err_timeout = cfg->err_timeout, + .jfs_context = cfg->jetty_context, + .jfc = cfg->send_jfc, + }; + int ret; + + jfs_cfg.flag.bs.lock_free = cfg->flag.bs.lock_free; + if (!udata) + jetty->jetty_addr = (uintptr_t)&jetty->sq; + + jetty->jfr = to_udma_jfr(cfg->jfr); + + ret = udata ? udma_alloc_u_sq_buf(dev, &jetty->sq, ucmd) : + udma_alloc_k_sq_buf(dev, &jetty->sq, &jfs_cfg); + if (ret) { + dev_err(dev->dev, "failed to get sq buf, ret = %d.\n", ret); + return ret; + } + jetty->sq.trans_mode = jfs_cfg.trans_mode; + jetty->sq.is_jetty = true; + + return ret; +} + +static void udma_init_jettyc(struct udma_dev *dev, struct ubcore_jetty_cfg *cfg, + struct udma_jetty *jetty, void *mb_buf) +{ + struct udma_jetty_ctx *ctx = (struct udma_jetty_ctx *)mb_buf; + struct udma_jfc *receive_jfc = to_udma_jfc(cfg->recv_jfc); + uint8_t i; + + ctx->state = JETTY_READY; + ctx->jfs_mode = JETTY; + ctx->type = to_udma_type(cfg->trans_mode); + ctx->sl = dev->udma_sl[UDMA_DEFAULT_SL_NUM]; + if (ctx->type == JETTY_RM || ctx->type == JETTY_RC) { + for (i = 0; i < dev->udma_total_sl_num; i++) { + if (cfg->priority == dev->udma_sl[i]) { + ctx->sl = cfg->priority; + break; + } + } + } else if (ctx->type == JETTY_UM) { + ctx->sl = dev->unic_sl[UDMA_DEFAULT_SL_NUM]; + for (i = 0; i < dev->unic_sl_num; i++) { + if (cfg->priority == dev->unic_sl[i]) { + ctx->sl = cfg->priority; + break; + } + } + } + ctx->sqe_base_addr_l = (jetty->sq.buf.addr >> SQE_VA_L_OFFSET) & + (uint32_t)SQE_VA_L_VALID_BIT; + ctx->sqe_base_addr_h = (jetty->sq.buf.addr >> SQE_VA_H_OFFSET) & + (uint32_t)SQE_VA_H_VALID_BIT; + ctx->sqe_token_id_l = jetty->sq.tid & (uint32_t)SQE_TOKEN_ID_L_MASK; + ctx->sqe_token_id_h = (jetty->sq.tid >> SQE_TOKEN_ID_H_OFFSET) & + (uint32_t)SQE_TOKEN_ID_H_MASK; + ctx->sqe_bb_shift = ilog2(roundup_pow_of_two(jetty->sq.buf.entry_cnt)); + ctx->tx_jfcn = cfg->send_jfc->id; + ctx->ta_timeout = to_ta_timeout(cfg->err_timeout); + + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_RNR_RETRY)) + ctx->rnr_retry_num = cfg->rnr_retry; + + ctx->jfrn_l = jetty->jfr->rq.id; + ctx->jfrn_h = jetty->jfr->rq.id >> JETTY_CTX_JFRN_H_OFFSET; + ctx->rx_jfcn = cfg->recv_jfc->id; + ctx->user_data_l = jetty->jetty_addr; + ctx->user_data_h = jetty->jetty_addr >> UDMA_USER_DATA_H_OFFSET; + ctx->seid_idx = cfg->eid_index; + ctx->pi_type = jetty->pi_type ? 1 : 0; + + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_JFC_INLINE)) + ctx->cqe_ie = receive_jfc->inline_en; + + ctx->err_mode = cfg->flag.bs.error_suspend; + ctx->cmp_odr = cfg->flag.bs.outorder_comp; + ctx->avail_sgmt_ost = AVAIL_SGMT_OST_INIT; + ctx->sqe_pld_tokenid = jetty->sq.tid & (uint32_t)SQE_PLD_TOKEN_ID_MASK; + ctx->next_send_ssn = get_random_u16(); + ctx->next_rcv_ssn = ctx->next_send_ssn; +} + static int udma_specify_rsvd_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) { struct udma_ida *ida_table = &udma_dev->rsvd_jetty_ida_table; @@ -244,6 +382,114 @@ int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, return ret; } +static void free_jetty_id(struct udma_dev *udma_dev, + struct udma_jetty *udma_jetty, bool is_grp) +{ + if (udma_jetty->sq.id < udma_dev->caps.jetty.start_idx) + udma_id_free(&udma_dev->rsvd_jetty_ida_table, udma_jetty->sq.id); + else + udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, + udma_jetty->sq.id, false); +} + +static void udma_dfx_store_jetty_id(struct udma_dev *udma_dev, + struct udma_jetty *udma_jetty) +{ + struct udma_dfx_jetty *jetty; + int ret; + + jetty = (struct udma_dfx_jetty *)xa_load(&udma_dev->dfx_info->jetty.table, + udma_jetty->sq.id); + if (jetty) { + dev_warn(udma_dev->dev, "jetty_id(%u) already exists in dfx.\n", + udma_jetty->sq.id); + return; + } + + jetty = kzalloc(sizeof(*jetty), GFP_KERNEL); + if (!jetty) + return; + + jetty->id = udma_jetty->sq.id; + jetty->jfs_depth = udma_jetty->sq.buf.entry_cnt / udma_jetty->sq.sqe_bb_cnt; + + write_lock(&udma_dev->dfx_info->jetty.rwlock); + ret = xa_err(xa_store(&udma_dev->dfx_info->jetty.table, udma_jetty->sq.id, + jetty, GFP_KERNEL)); + if (ret) { + write_unlock(&udma_dev->dfx_info->jetty.rwlock); + dev_err(udma_dev->dev, "store jetty_id(%u) to jetty_table failed in dfx.\n", + udma_jetty->sq.id); + kfree(jetty); + return; + } + + ++udma_dev->dfx_info->jetty.cnt; + write_unlock(&udma_dev->dfx_info->jetty.rwlock); +} + +static int +udma_alloc_jetty_sq(struct udma_dev *udma_dev, struct udma_jetty *jetty, + struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata) +{ + struct udma_create_jetty_ucmd ucmd = {}; + int ret; + + ret = udma_get_user_jetty_cmd(udma_dev, jetty, udata, &ucmd); + if (ret) { + dev_err(udma_dev->dev, + "udma get user jetty ucmd failed, ret = %d.\n", ret); + return ret; + } + + ret = alloc_jetty_id(udma_dev, &jetty->sq, cfg->id, cfg->jetty_grp); + if (ret) { + dev_err(udma_dev->dev, "alloc jetty id failed, ret = %d.\n", ret); + return ret; + } + jetty->ubcore_jetty.jetty_id.id = jetty->sq.id; + jetty->ubcore_jetty.jetty_cfg = *cfg; + + ret = udma_get_jetty_buf(udma_dev, jetty, udata, cfg, &ucmd); + if (ret) + free_jetty_id(udma_dev, jetty, !!cfg->jetty_grp); + + return ret; +} + +static void udma_free_jetty_id_buf(struct udma_dev *udma_dev, + struct udma_jetty *udma_jetty, + struct ubcore_jetty_cfg *cfg) +{ + udma_free_sq_buf(udma_dev, &udma_jetty->sq); + free_jetty_id(udma_dev, udma_jetty, !!cfg->jetty_grp); +} + +static int udma_create_hw_jetty_ctx(struct udma_dev *dev, struct udma_jetty *udma_jetty, + struct ubcore_jetty_cfg *cfg) +{ + struct ubase_mbx_attr attr = {}; + struct udma_jetty_ctx ctx = {}; + int ret; + + if (cfg->priority >= UDMA_MAX_PRIORITY) { + dev_err(dev->dev, "kernel mode jetty priority is out of range, priority is %u.\n", + cfg->priority); + return -EINVAL; + } + + udma_init_jettyc(dev, cfg, udma_jetty, &ctx); + + attr.tag = udma_jetty->sq.id; + attr.op = UDMA_CMD_CREATE_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); + if (ret) + dev_err(dev->dev, + "post mailbox create jetty ctx failed, ret = %d.\n", ret); + + return ret; +} + void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) { #define UDMA_TA_TIMEOUT_MAX_INDEX 3 @@ -262,6 +508,60 @@ void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) sq->ta_timeout = time[index]; } +struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_jetty *udma_jetty; + int ret; + + udma_jetty = kzalloc(sizeof(*udma_jetty), GFP_KERNEL); + if (!udma_jetty) + return NULL; + + ret = udma_alloc_jetty_sq(udma_dev, udma_jetty, cfg, udata); + if (ret) { + dev_err(udma_dev->dev, + "udma alloc jetty id buf failed, ret = %d.\n", ret); + goto err_alloc_jetty; + } + + ret = xa_err(xa_store(&udma_dev->jetty_table.xa, udma_jetty->sq.id, + &udma_jetty->sq, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, + "store jetty sq(%u) to sq table failed, ret = %d.\n", + udma_jetty->sq.id, ret); + goto err_store_jetty_sq; + } + + ret = udma_create_hw_jetty_ctx(udma_dev, udma_jetty, cfg); + if (ret) { + dev_err(udma_dev->dev, + "post mailbox create jetty ctx failed, ret = %d.\n", ret); + goto err_create_hw_jetty; + } + + udma_set_query_flush_time(&udma_jetty->sq, cfg->err_timeout); + udma_jetty->sq.state = UBCORE_JETTY_STATE_READY; + refcount_set(&udma_jetty->ae_refcount, 1); + init_completion(&udma_jetty->ae_comp); + + if (dfx_switch) + udma_dfx_store_jetty_id(udma_dev, udma_jetty); + + return &udma_jetty->ubcore_jetty; +err_create_hw_jetty: + xa_erase(&udma_dev->jetty_table.xa, udma_jetty->sq.id); +err_store_jetty_sq: + udma_free_jetty_id_buf(udma_dev, udma_jetty, cfg); +err_alloc_jetty: + kfree(udma_jetty); + + return NULL; +} + int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id) { struct ubase_mbx_attr attr = {}; diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 63d7073b8631..0c6d409520eb 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -221,6 +221,9 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); +struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index d1d45200b585..6d2832081630 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -187,6 +187,7 @@ static struct ubcore_ops g_dev_ops = { .destroy_jfr = udma_destroy_jfr, .destroy_jfr_batch = udma_destroy_jfr_batch, .query_jfr = udma_query_jfr, + .create_jetty = udma_create_jetty, .query_jetty = udma_query_jetty, }; -- Gitee From 02643e0417634cbf5a490e62783216ac53ea12a7 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 16:50:18 +0800 Subject: [PATCH 075/243] ub: udma: Support destroy jetty. commit 70df47775eb5fec27c8b0fccfadeb97d9f0e210a openEuler This patch adds the ability to destroy jetty, During the destruction process, driver will destroy jetty context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 46 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 48 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 63e5eae5f7b7..bc35512c57b6 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -761,3 +761,49 @@ int udma_modify_and_destroy_jetty(struct udma_dev *dev, return 0; } + +static void udma_free_jetty(struct ubcore_jetty *jetty) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + + if (dfx_switch) + udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jetty, + udma_jetty->sq.id); + + xa_erase(&udma_dev->jetty_table.xa, udma_jetty->sq.id); + + if (refcount_dec_and_test(&udma_jetty->ae_refcount)) + complete(&udma_jetty->ae_comp); + wait_for_completion(&udma_jetty->ae_comp); + + udma_free_sq_buf(udma_dev, &udma_jetty->sq); + free_jetty_id(udma_dev, udma_jetty, !!udma_jetty->sq.jetty_grp); + kfree(udma_jetty); +} + +int udma_destroy_jetty(struct ubcore_jetty *jetty) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + int ret; + + if (!udma_jetty->ue_rx_closed && udma_close_ue_rx(udma_dev, true, true, false, 0)) { + dev_err(udma_dev->dev, "close ue rx failed when destroying jetty.\n"); + return -EINVAL; + } + + ret = udma_modify_and_destroy_jetty(udma_dev, &udma_jetty->sq); + if (ret) { + dev_err(udma_dev->dev, "udma modify error and destroy jetty failed, id: %u.\n", + jetty->jetty_id.id); + if (!udma_jetty->ue_rx_closed) + udma_open_ue_rx(udma_dev, true, true, false, 0); + return ret; + } + + udma_free_jetty(jetty); + udma_open_ue_rx(udma_dev, true, true, false, 0); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 0c6d409520eb..a37c9a9ff54f 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -224,6 +224,7 @@ int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jetty(struct ubcore_jetty *jetty); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 6d2832081630..a1f3bcb5dde3 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -189,6 +189,7 @@ static struct ubcore_ops g_dev_ops = { .query_jfr = udma_query_jfr, .create_jetty = udma_create_jetty, .query_jetty = udma_query_jetty, + .destroy_jetty = udma_destroy_jetty, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 215b0c5601c20de9b1914b9410b8a945ff61a3fb Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 17:05:48 +0800 Subject: [PATCH 076/243] ub: udma: Support create jetty group. commit fdebd311c4b97415cdc307c322944dbc691f32d0 openEuler This patch adds the ability to create jetty group. During The creation process, driver will create jetty group context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 112 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 116 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index bc35512c57b6..7cbffa81bf1d 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -807,3 +807,115 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } + +static int udma_alloc_group_start_id(struct udma_dev *udma_dev, + struct udma_group_bitmap *bitmap_table, + uint32_t *start_jetty_id) +{ + int ret; + + ret = udma_adv_id_alloc(udma_dev, bitmap_table, start_jetty_id, true, + bitmap_table->grp_next); + if (ret) { + ret = udma_adv_id_alloc(udma_dev, bitmap_table, start_jetty_id, + true, bitmap_table->min); + if (ret) + return ret; + } + + bitmap_table->grp_next = (*start_jetty_id + NUM_JETTY_PER_GROUP) > + bitmap_table->max ? bitmap_table->min : + (*start_jetty_id + NUM_JETTY_PER_GROUP); + + return 0; +} + +static int udma_alloc_jetty_grp_id(struct udma_dev *udma_dev, + struct udma_jetty_grp *jetty_grp) +{ + int ret; + + ret = udma_alloc_group_start_id(udma_dev, &udma_dev->jetty_table.bitmap_table, + &jetty_grp->start_jetty_id); + if (ret) { + dev_err(udma_dev->dev, + "alloc jetty id for grp failed, ret = %d.\n", ret); + return ret; + } + + ret = udma_id_alloc_auto_grow(udma_dev, &udma_dev->jetty_grp_table.ida_table, + &jetty_grp->jetty_grp_id); + if (ret) { + dev_err(udma_dev->dev, + "alloc jetty grp id failed, ret = %d.\n", ret); + udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, + jetty_grp->start_jetty_id, true); + return ret; + } + + jetty_grp->ubcore_jetty_grp.jetty_grp_id.id = jetty_grp->jetty_grp_id; + + return 0; +} + +struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + struct ubase_mbx_attr mbox_attr = {}; + struct udma_jetty_grp_ctx ctx = {}; + struct udma_jetty_grp *jetty_grp; + int ret; + + if (cfg->policy != UBCORE_JETTY_GRP_POLICY_HASH_HINT) { + dev_err(udma_dev->dev, "policy %u not support.\n", cfg->policy); + return NULL; + } + + jetty_grp = kzalloc(sizeof(*jetty_grp), GFP_KERNEL); + if (!jetty_grp) + return NULL; + + ret = udma_alloc_jetty_grp_id(udma_dev, jetty_grp); + if (ret) + goto err_alloc_jetty_grp_id; + + ctx.start_jetty_id = jetty_grp->start_jetty_id; + + ret = xa_err(xa_store(&udma_dev->jetty_grp_table.xa, jetty_grp->jetty_grp_id, + jetty_grp, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, "store jetty group(%u) failed, ret = %d.\n", + jetty_grp->jetty_grp_id, ret); + goto err_store_jetty_grp; + } + + mbox_attr.tag = jetty_grp->jetty_grp_id; + mbox_attr.op = UDMA_CMD_CREATE_JETTY_GROUP_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, &ctx, sizeof(ctx), &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, + "post mailbox update jetty ctx failed, ret = %d.\n", ret); + goto err_post_mailbox; + } + + mutex_init(&jetty_grp->valid_lock); + refcount_set(&jetty_grp->ae_refcount, 1); + init_completion(&jetty_grp->ae_comp); + + if (dfx_switch) + udma_dfx_store_id(udma_dev, &udma_dev->dfx_info->jetty_grp, + jetty_grp->jetty_grp_id, "jetty_grp"); + + return &jetty_grp->ubcore_jetty_grp; +err_post_mailbox: + xa_erase(&udma_dev->jetty_grp_table.xa, jetty_grp->jetty_grp_id); +err_store_jetty_grp: + udma_id_free(&udma_dev->jetty_grp_table.ida_table, + jetty_grp->jetty_grp_id); +err_alloc_jetty_grp_id: + kfree(jetty_grp); + + return NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index a37c9a9ff54f..b2e65a8c8f86 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -225,6 +225,9 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index a1f3bcb5dde3..baf9a970fed8 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -190,6 +190,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty = udma_create_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, + .create_jetty_grp = udma_create_jetty_grp, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 4d71e837bee787b60da0631be33c7ee86349d881 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 17:24:30 +0800 Subject: [PATCH 077/243] ub: udma: Support destroy jetty group. commit a9fe853ef4163a534f78f2b380bdf51f845b0242 openEuler This patch adds the ability to destroy jetty group. During the destruction process, driver will destroy jetty group context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 41 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 +- drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 7cbffa81bf1d..67534599f23d 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -919,3 +919,44 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, return NULL; } + +int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) +{ + struct udma_jetty_grp *udma_jetty_grp = to_udma_jetty_grp(jetty_grp); + struct udma_dev *udma_dev = to_udma_dev(jetty_grp->ub_dev); + struct ubase_mbx_attr mbox_attr = {}; + int ret; + + mbox_attr.tag = udma_jetty_grp->jetty_grp_id; + mbox_attr.op = UDMA_CMD_DESTROY_JETTY_GROUP_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, NULL, 0, &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, + "post mailbox destroy jetty group failed, ret = %d.\n", ret); + return ret; + } + + xa_erase(&udma_dev->jetty_grp_table.xa, udma_jetty_grp->jetty_grp_id); + + if (refcount_dec_and_test(&udma_jetty_grp->ae_refcount)) + complete(&udma_jetty_grp->ae_comp); + wait_for_completion(&udma_jetty_grp->ae_comp); + + if (dfx_switch) + udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jetty_grp, + udma_jetty_grp->jetty_grp_id); + + if (udma_jetty_grp->valid != 0) + dev_err(udma_dev->dev, + "jetty group been used, jetty valid is 0x%x.\n", + udma_jetty_grp->valid); + + mutex_destroy(&udma_jetty_grp->valid_lock); + udma_id_free(&udma_dev->jetty_grp_table.ida_table, + udma_jetty_grp->jetty_grp_id); + udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, + udma_jetty_grp->start_jetty_id, true); + kfree(udma_jetty_grp); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index b2e65a8c8f86..8f23621a58f6 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -228,8 +228,9 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty); struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); -int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); +int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); +int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index baf9a970fed8..2491aea7f9e2 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -191,6 +191,7 @@ static struct ubcore_ops g_dev_ops = { .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, .create_jetty_grp = udma_create_jetty_grp, + .delete_jetty_grp = udma_delete_jetty_grp, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From bff0bca39679b03da12dc710ee71799051ff242c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 19:52:57 +0800 Subject: [PATCH 078/243] ub: udma: Support modify jfs. commit 0ea69661e7ac19a85dcad8c250275d7f9582731b openEuler This patch adds the ability to modify jfs. During the modify jfs process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 58 +++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 11 +++- drivers/ub/urma/hw/udma/udma_jfs.c | 77 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 5 files changed, 148 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 67534599f23d..74f9a30e2be3 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -18,6 +18,22 @@ bool well_known_jetty_pgsz_check = true; +const char *state_name[] = { + "RESET", + "READY", + "SUSPENDED", + "ERROR", + "INVALID" +}; + +const char *to_state_name(enum ubcore_jetty_state state) +{ + if ((int)state >= (int)STATE_NUM) + return state_name[STATE_NUM]; + + return state_name[state]; +} + static int udma_get_user_jetty_cmd(struct udma_dev *dev, struct udma_jetty *jetty, struct ubcore_udata *udata, struct udma_create_jetty_ucmd *ucmd) @@ -465,6 +481,14 @@ static void udma_free_jetty_id_buf(struct udma_dev *udma_dev, free_jetty_id(udma_dev, udma_jetty, !!cfg->jetty_grp); } +void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq) +{ + sq->kva_curr = sq->buf.kva; + sq->pi = 0; + sq->ci = 0; + sq->flush_flag = false; +} + static int udma_create_hw_jetty_ctx(struct udma_dev *dev, struct udma_jetty *udma_jetty, struct ubcore_jetty_cfg *cfg) { @@ -808,6 +832,40 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } +bool verify_modify_jetty(enum ubcore_jetty_state jetty_state, + enum ubcore_jetty_state attr_state) +{ + switch (jetty_state) { + case UBCORE_JETTY_STATE_RESET: + return attr_state == UBCORE_JETTY_STATE_READY; + case UBCORE_JETTY_STATE_READY: + return attr_state == UBCORE_JETTY_STATE_ERROR || + attr_state == UBCORE_JETTY_STATE_SUSPENDED; + case UBCORE_JETTY_STATE_SUSPENDED: + return attr_state == UBCORE_JETTY_STATE_ERROR; + case UBCORE_JETTY_STATE_ERROR: + return attr_state == UBCORE_JETTY_STATE_RESET; + default: + break; + } + + return false; +} + +enum jetty_state to_jetty_state(enum ubcore_jetty_state state) +{ + switch (state) { + case UBCORE_JETTY_STATE_ERROR: + return JETTY_ERROR; + case UBCORE_JETTY_STATE_SUSPENDED: + return JETTY_SUSPEND; + default: + break; + } + + return STATE_NUM; +} + static int udma_alloc_group_start_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, uint32_t *start_jetty_id) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 8f23621a58f6..013fb8ddd17f 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -219,6 +219,10 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu return container_of(queue, struct udma_jetty, sq); } +enum jetty_state to_jetty_state(enum ubcore_jetty_state state); +const char *to_state_name(enum ubcore_jetty_state state); +bool verify_modify_jetty(enum ubcore_jetty_state jetty_state, + enum ubcore_jetty_state attr_state); int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, @@ -229,9 +233,14 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); -void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); +int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, + enum jetty_state state); + +void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); +void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); +int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index cb00cec5ccfd..e770bc5f6a2f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -409,3 +409,80 @@ int udma_destroy_jfs(struct ubcore_jfs *jfs) return 0; } + +static int udma_modify_jfs_state(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs, + struct ubcore_jfs_attr *attr) +{ + int ret; + + switch (attr->state) { + case UBCORE_JETTY_STATE_RESET: + ret = udma_destroy_hw_jetty_ctx(udma_dev, udma_jfs->sq.id); + break; + case UBCORE_JETTY_STATE_READY: + ret = udma_create_hw_jfs_ctx(udma_dev, udma_jfs, &udma_jfs->ubcore_jfs.jfs_cfg); + if (ret) + break; + + udma_reset_sw_k_jetty_queue(&udma_jfs->sq); + break; + default: + ret = udma_close_ue_rx(udma_dev, true, true, false, 0); + if (ret) + break; + + if (!(udma_dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE)) { + if (udma_modify_jetty_precondition(udma_dev, &udma_jfs->sq)) { + ret = -ENOMEM; + udma_open_ue_rx(udma_dev, true, true, false, 0); + break; + } + } + + ret = udma_set_jetty_state(udma_dev, udma_jfs->sq.id, to_jetty_state(attr->state)); + if (ret) + udma_open_ue_rx(udma_dev, true, true, false, 0); + else + udma_jfs->ue_rx_closed = true; + break; + } + + return ret; +} + +int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + int ret = 0; + + if (!(attr->mask & UBCORE_JFS_STATE)) { + dev_err(udma_dev->dev, "modify jfs mask is error or not set, jfs_id = %u.\n", + udma_jfs->sq.id); + return -EINVAL; + } + + if (udma_jfs->sq.state == attr->state) { + dev_info(udma_dev->dev, "jfs state has been %s.\n", + to_state_name(attr->state)); + return 0; + } + + if (!verify_modify_jetty(udma_jfs->sq.state, attr->state)) { + dev_err(udma_dev->dev, "not support modify jfs state from %s to %s.\n", + to_state_name(udma_jfs->sq.state), to_state_name(attr->state)); + return -EINVAL; + } + + ret = udma_modify_jfs_state(udma_dev, udma_jfs, attr); + if (ret) { + dev_err(udma_dev->dev, "modify jfs %u state to %u failed.\n", + udma_jfs->sq.id, attr->state); + return ret; + } + + udma_jfs->sq.state = attr->state; + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index ed1ff16e4573..6cdc281e53c3 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -53,5 +53,7 @@ int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct ubcore_jfs_cfg *jfs_cfg); void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); +int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata); #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2491aea7f9e2..c726a231e4f8 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -181,6 +181,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfc = udma_create_jfc, .destroy_jfc = udma_destroy_jfc, .create_jfs = udma_create_jfs, + .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, .create_jfr = udma_create_jfr, -- Gitee From dd8a4cf6316551892d6c37ba2e8966f4d5f6ea95 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 20:09:44 +0800 Subject: [PATCH 079/243] ub: udma: Support modify jetty. commit f9d5d20b756c9484513f0c0a0493b560f98d9546 openEuler This patch adds the ability to modify jetty. During the modify jetty process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 77 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 80 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 74f9a30e2be3..914ef33b81d9 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -866,6 +866,83 @@ enum jetty_state to_jetty_state(enum ubcore_jetty_state state) return STATE_NUM; } +static int udma_modify_jetty_state(struct udma_dev *udma_dev, struct udma_jetty *udma_jetty, + struct ubcore_jetty_attr *attr) +{ + int ret; + + switch (attr->state) { + case UBCORE_JETTY_STATE_RESET: + ret = udma_destroy_hw_jetty_ctx(udma_dev, udma_jetty->sq.id); + break; + case UBCORE_JETTY_STATE_READY: + ret = udma_create_hw_jetty_ctx(udma_dev, udma_jetty, + &udma_jetty->ubcore_jetty.jetty_cfg); + if (ret) + break; + + udma_reset_sw_k_jetty_queue(&udma_jetty->sq); + break; + default: + ret = udma_close_ue_rx(udma_dev, true, true, false, 0); + if (ret) + break; + + if (!(udma_dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE)) { + if (udma_modify_jetty_precondition(udma_dev, &udma_jetty->sq)) { + ret = -ENOMEM; + udma_open_ue_rx(udma_dev, true, true, false, 0); + break; + } + } + + ret = udma_set_jetty_state(udma_dev, udma_jetty->sq.id, + to_jetty_state(attr->state)); + if (ret) + udma_open_ue_rx(udma_dev, true, true, false, 0); + else + udma_jetty->ue_rx_closed = true; + break; + } + + return ret; +} + +int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + int ret; + + if (!(attr->mask & UBCORE_JETTY_STATE)) { + dev_err(udma_dev->dev, "modify jetty mask is error or not set, jetty_id = %u.\n", + udma_jetty->sq.id); + return -EINVAL; + } + + if (udma_jetty->sq.state == attr->state) { + dev_info(udma_dev->dev, "jetty state has been %s.\n", to_state_name(attr->state)); + return 0; + } + + if (!verify_modify_jetty(udma_jetty->sq.state, attr->state)) { + dev_err(udma_dev->dev, "not support modify jetty state from %s to %s.\n", + to_state_name(udma_jetty->sq.state), to_state_name(attr->state)); + return -EINVAL; + } + + ret = udma_modify_jetty_state(udma_dev, udma_jetty, attr); + if (ret) { + dev_err(udma_dev->dev, "modify jetty %u state to %s failed.\n", + udma_jetty->sq.id, to_state_name(attr->state)); + return ret; + } + udma_jetty->sq.state = attr->state; + + return 0; +} + static int udma_alloc_group_start_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, uint32_t *start_jetty_id) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 013fb8ddd17f..5b428e999ff1 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -229,6 +229,8 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata); struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index c726a231e4f8..93ca98ef248e 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -189,6 +189,7 @@ static struct ubcore_ops g_dev_ops = { .destroy_jfr_batch = udma_destroy_jfr_batch, .query_jfr = udma_query_jfr, .create_jetty = udma_create_jetty, + .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, .create_jetty_grp = udma_create_jetty_grp, -- Gitee From 1d456dda7261c54edfb7540b53c910e4960ed9d8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 09:26:22 +0800 Subject: [PATCH 080/243] ub: udma: Support modify jfr. commit d7b858224867b71d55b7ad700bb86da388ec2d91 openEuler This patch adds the ability to modify jfr. During the modify jfr process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfr.c | 140 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 3 +- 3 files changed, 144 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index f15ca6b26d42..953fcffc5001 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -19,6 +19,14 @@ const char *state_str[] = { "INVALID" }; +static const char *to_state_str(enum ubcore_jfr_state state) +{ + if ((int)state >= (int)JFR_STATE_NUM) + return state_str[JFR_STATE_NUM]; + + return state_str[state]; +} + static int udma_verify_jfr_param(struct udma_dev *dev, struct ubcore_jfr_cfg *cfg) { @@ -650,3 +658,135 @@ int udma_destroy_jfr_batch(struct ubcore_jfr **jfr, int jfr_cnt, int *bad_jfr_in return 0; } + +static bool verify_modify_jfr_state(enum ubcore_jfr_state jfr_state, + enum ubcore_jfr_state attr_state) +{ + switch (jfr_state) { + case UBCORE_JFR_STATE_RESET: + return attr_state == UBCORE_JFR_STATE_READY; + case UBCORE_JFR_STATE_READY: + return attr_state == UBCORE_JFR_STATE_ERROR; + case UBCORE_JFR_STATE_ERROR: + return attr_state == UBCORE_JFR_STATE_RESET; + default: + break; + } + + return false; +} + +static int verify_modify_jfr(struct udma_dev *udma_dev, struct udma_jfr *udma_jfr, + struct ubcore_jfr_attr *attr, bool *state_flag, + bool *rx_threshold_flag) +{ + *rx_threshold_flag = false; + *state_flag = false; + + if (!(attr->mask & (UBCORE_JFR_RX_THRESHOLD | UBCORE_JFR_STATE))) { + dev_err(udma_dev->dev, + "modify jfr mask is error or not set, jfrn = %u.\n", + udma_jfr->rq.id); + return -EINVAL; + } + + if (attr->mask & UBCORE_JFR_RX_THRESHOLD) { + if (attr->rx_threshold >= udma_jfr->wqe_cnt) { + dev_err(udma_dev->dev, + "JFR rx_threshold(%u) must less than wqe num(%u).\n", + attr->rx_threshold, udma_jfr->wqe_cnt); + return -EINVAL; + } + *rx_threshold_flag = true; + } + + if (attr->mask & UBCORE_JFR_STATE) { + if (udma_jfr->state == attr->state) { + dev_info(udma_dev->dev, + "jfr(%u) state has been %s, keep it unchanged.\n", + udma_jfr->rq.id, to_state_str(attr->state)); + return 0; + } else if (!verify_modify_jfr_state(udma_jfr->state, + attr->state)) { + dev_err(udma_dev->dev, + "jfr(%u) not support modify jfr state from %s to %s.\n", + udma_jfr->rq.id, to_state_str(udma_jfr->state), + to_state_str(attr->state)); + return -EINVAL; + } else if ((attr->state == UBCORE_JFR_STATE_RESET || + attr->state == UBCORE_JFR_STATE_ERROR) && + *rx_threshold_flag) { + dev_err(udma_dev->dev, + "jfr(%u) not support set rx threshold when change state to %s.\n", + udma_jfr->rq.id, to_state_str(attr->state)); + return -EINVAL; + } + *state_flag = true; + } + + return 0; +} + +static int udma_destroy_hw_jfr_ctx(struct udma_dev *dev, uint32_t jfr_id) +{ + struct ubase_mbx_attr attr = {}; + int ret; + + attr.tag = jfr_id; + attr.op = UDMA_CMD_DESTROY_JFR_CONTEXT; + ret = post_mailbox_update_ctx(dev, NULL, 0, &attr); + if (ret) + dev_err(dev->dev, + "post mailbox destroy jfr ctx failed, ret = %d.\n", ret); + + return ret; +} + +int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + bool rx_threshold_flag = false; + bool state_flag = false; + int ret = 0; + + ret = verify_modify_jfr(udma_dev, udma_jfr, attr, &state_flag, + &rx_threshold_flag); + if (ret) + return ret; + + if (!(rx_threshold_flag || state_flag)) + return 0; + + if (rx_threshold_flag && !state_flag) { + ret = modify_jfr_context(udma_dev, udma_jfr->rq.id, state_flag, + rx_threshold_flag, attr); + } else { + switch (attr->state) { + case UBCORE_JFR_STATE_RESET: + ret = udma_destroy_hw_jfr_ctx(udma_dev, udma_jfr->rq.id); + break; + case UBCORE_JFR_STATE_READY: + ret = udma_hw_init_jfrc(udma_dev, &jfr->jfr_cfg, udma_jfr, + rx_threshold_flag ? + attr->rx_threshold : udma_jfr->rx_threshold); + break; + default: + ret = modify_jfr_context(udma_dev, udma_jfr->rq.id, state_flag, + rx_threshold_flag, attr); + break; + } + } + + if (ret) + return ret; + + if (state_flag) + udma_jfr->state = attr->state; + + if (rx_threshold_flag) + udma_jfr->rx_threshold = attr->rx_threshold; + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index 43ee96cea746..ae6d0d97f460 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -155,6 +155,8 @@ static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfr, rq); } +int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata); struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jfr(struct ubcore_jfr *jfr); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 93ca98ef248e..3a478a468c6f 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -185,9 +185,10 @@ static struct ubcore_ops g_dev_ops = { .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, .create_jfr = udma_create_jfr, + .modify_jfr = udma_modify_jfr, + .query_jfr = udma_query_jfr, .destroy_jfr = udma_destroy_jfr, .destroy_jfr_batch = udma_destroy_jfr_batch, - .query_jfr = udma_query_jfr, .create_jetty = udma_create_jetty, .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, -- Gitee From ee67651bc9724e5829b9d8090e0a15f10652b842 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 09:37:37 +0800 Subject: [PATCH 081/243] ub: udma: Support modify jfc. commit ab014f21ec7d348ce728443be3d8af41ab6d8d40 openEuler This patch adds the ability to modify jfc. During the modify jfc process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfc.c | 109 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 112 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index bfbf479ec06f..5067b3c52104 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -536,3 +536,112 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, return 0; } + +static int udma_get_cqe_period(uint16_t cqe_period) +{ + uint16_t period[] = { + UDMA_CQE_PERIOD_0, + UDMA_CQE_PERIOD_4, + UDMA_CQE_PERIOD_16, + UDMA_CQE_PERIOD_64, + UDMA_CQE_PERIOD_256, + UDMA_CQE_PERIOD_1024, + UDMA_CQE_PERIOD_4096, + UDMA_CQE_PERIOD_16384 + }; + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(period); ++i) { + if (cqe_period == period[i]) + return i; + } + + return -EINVAL; +} + +static int udma_check_jfc_attr(struct udma_dev *udma_dev, struct ubcore_jfc_attr *attr) +{ + if (!(attr->mask & (UBCORE_JFC_MODERATE_COUNT | UBCORE_JFC_MODERATE_PERIOD))) { + dev_err(udma_dev->dev, + "udma modify jfc mask is not set or invalid.\n"); + return -EINVAL; + } + + if ((attr->mask & UBCORE_JFC_MODERATE_COUNT) && + (attr->moderate_count >= UDMA_CQE_COALESCE_CNT_MAX)) { + dev_err(udma_dev->dev, "udma cqe coalesce cnt %u is invalid.\n", + attr->moderate_count); + return -EINVAL; + } + + if ((attr->mask & UBCORE_JFC_MODERATE_PERIOD) && + (udma_get_cqe_period(attr->moderate_period) == -EINVAL)) { + dev_err(udma_dev->dev, "udma cqe coalesce period %u is invalid.\n", + attr->moderate_period); + return -EINVAL; + } + + return 0; +} + +static int udma_modify_jfc_attr(struct udma_dev *dev, uint32_t jfcn, + struct ubcore_jfc_attr *attr) +{ + struct udma_jfc_ctx *jfc_context, *ctx_mask; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for modify jfc.\n"); + return -ENOMEM; + } + + jfc_context = &((struct udma_jfc_ctx *)mailbox->buf)[0]; + ctx_mask = &((struct udma_jfc_ctx *)mailbox->buf)[1]; + memset(ctx_mask, 0xff, sizeof(struct udma_jfc_ctx)); + + if (attr->mask & UBCORE_JFC_MODERATE_COUNT) { + jfc_context->cqe_coalesce_cnt = attr->moderate_count; + ctx_mask->cqe_coalesce_cnt = 0; + } + + if (attr->mask & UBCORE_JFC_MODERATE_PERIOD) { + jfc_context->cqe_coalesce_period = + udma_get_cqe_period(attr->moderate_period); + ctx_mask->cqe_coalesce_period = 0; + } + + mbox_attr.tag = jfcn; + mbox_attr.op = UDMA_CMD_MODIFY_JFC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to send post mbox in modify JFCC, ret = %d.\n", + ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_device = to_udma_dev(ubcore_jfc->ub_dev); + struct udma_jfc *udma_jfc = to_udma_jfc(ubcore_jfc); + int ret; + + ret = udma_check_jfc_attr(udma_device, attr); + if (ret) + return ret; + + ret = udma_modify_jfc_attr(udma_device, udma_jfc->jfcn, attr); + if (ret) + dev_err(udma_device->dev, + "failed to modify JFC, jfcn = %u, ret = %d.\n", + udma_jfc->jfcn, ret); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 21f4016a42cd..29db1243623e 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -142,5 +142,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, int udma_destroy_jfc(struct ubcore_jfc *jfc); int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); +int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata); #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 3a478a468c6f..b1fad9e31f38 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -179,6 +179,7 @@ static struct ubcore_ops g_dev_ops = { .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, .create_jfc = udma_create_jfc, + .modify_jfc = udma_modify_jfc, .destroy_jfc = udma_destroy_jfc, .create_jfs = udma_create_jfs, .modify_jfs = udma_modify_jfs, -- Gitee From 5ef1dc11cbf02a46cec3617773c0aa3a08f6345c Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:43:50 +0800 Subject: [PATCH 082/243] ub: cdma: support kernel resource reclamation commit 82ffc9d85146f7dbf53de48a04c7d56ee9fce95a openEuler This patch implements kernel resource reclamation functionality within the CDMA driver. The implementation includes reclaiming the corresponding context and the resources under that context, such as queues, jfs, ctp, jfc, and segments, after the user-space process has exited. Signed-off-by: Zhipeng Lu Signed-off-by: Bangwei Zhang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_chardev.c | 60 ++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_chardev.h | 2 ++ drivers/ub/cdma/cdma_context.c | 46 ++++++++++++++++++++++++++ drivers/ub/cdma/cdma_context.h | 1 + drivers/ub/cdma/cdma_event.c | 2 ++ drivers/ub/cdma/cdma_types.h | 6 ++++ 6 files changed, 117 insertions(+) diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index 124b5701b253..a1a289eb0e91 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -176,11 +176,61 @@ static int cdma_mmap(struct file *file, struct vm_area_struct *vma) return 0; } +static void cdma_mmu_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct cdma_mn *mn_notifier = container_of(mn, struct cdma_mn, mn); + struct cdma_file *cfile = container_of(mn_notifier, struct cdma_file, mn_notifier); + + if (mn_notifier->mm != mm || mn_notifier->mm == NULL) { + pr_info("mm already released.\n"); + return; + } + mn_notifier->mm = NULL; + + mutex_lock(&cfile->ctx_mutex); + cdma_cleanup_context_uobj(cfile); + if (cfile->uctx) + cdma_cleanup_context_res(cfile->uctx); + cfile->uctx = NULL; + mutex_unlock(&cfile->ctx_mutex); +} + +static const struct mmu_notifier_ops cdma_mm_notifier_ops = { + .release = cdma_mmu_release +}; + +static int cdma_register_mmu(struct cdma_file *file) +{ + struct cdma_mn *mn_notifier = &file->mn_notifier; + int ret; + + mn_notifier->mm = current->mm; + mn_notifier->mn.ops = &cdma_mm_notifier_ops; + ret = mmu_notifier_register(&mn_notifier->mn, current->mm); + if (ret) + mn_notifier->mm = NULL; + + return ret; +} + +static void cdma_unregister_mmu(struct cdma_file *cfile) +{ + struct cdma_mn *mn_notifier = &cfile->mn_notifier; + struct mm_struct *mm = mn_notifier->mm; + + if (!mm) + return; + + cfile->mn_notifier.mm = NULL; + mmu_notifier_unregister(&cfile->mn_notifier.mn, mm); +} + static int cdma_open(struct inode *inode, struct file *file) { struct cdma_chardev *chardev; struct cdma_file *cfile; struct cdma_dev *cdev; + int ret; chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); cdev = container_of(chardev, struct cdma_dev, chardev); @@ -189,6 +239,13 @@ static int cdma_open(struct inode *inode, struct file *file) if (!cfile) return -ENOMEM; + ret = cdma_register_mmu(cfile); + if (ret) { + dev_err(cdev->dev, "register mmu failed, ret = %d.\n", ret); + kfree(cfile); + return ret; + } + cdma_init_uobj_idr(cfile); mutex_lock(&cdev->file_mutex); cfile->cdev = cdev; @@ -216,6 +273,8 @@ static int cdma_close(struct inode *inode, struct file *file) mutex_lock(&cfile->ctx_mutex); cdma_cleanup_context_uobj(cfile); + if (cfile->uctx) + cdma_cleanup_context_res(cfile->uctx); cfile->uctx = NULL; mutex_unlock(&cfile->ctx_mutex); @@ -302,6 +361,7 @@ void cdma_release_file(struct kref *ref) { struct cdma_file *cfile = container_of(ref, struct cdma_file, ref); + cdma_unregister_mmu(cfile); mutex_destroy(&cfile->ctx_mutex); idr_destroy(&cfile->idr); kfree(cfile); diff --git a/drivers/ub/cdma/cdma_chardev.h b/drivers/ub/cdma/cdma_chardev.h index 5366dd77ea54..0bd4fcc654ff 100644 --- a/drivers/ub/cdma/cdma_chardev.h +++ b/drivers/ub/cdma/cdma_chardev.h @@ -4,6 +4,8 @@ #ifndef __CDMA_CHARDEV_H__ #define __CDMA_CHARDEV_H__ +#include + #define CDMA_TEST_NAME "cdma_dev" #define CDMA_MAX_DEVICES 1 #define CDMA_JETTY_DSQE_OFFSET 0x1000 diff --git a/drivers/ub/cdma/cdma_context.c b/drivers/ub/cdma/cdma_context.c index e3b3e13d8a4e..c95ccb0c28b4 100644 --- a/drivers/ub/cdma/cdma_context.c +++ b/drivers/ub/cdma/cdma_context.c @@ -6,6 +6,11 @@ #include #include #include "cdma.h" +#include "cdma_queue.h" +#include "cdma_jfc.h" +#include "cdma_jfs.h" +#include "cdma_tp.h" +#include "cdma_segment.h" #include "cdma_context.h" static void cdma_ctx_handle_free(struct cdma_dev *cdev, @@ -133,3 +138,44 @@ void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx) mutex_destroy(&ctx->pgdir_mutex); kfree(ctx); } + +static void cdma_cleanup_queue_res(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + struct cdma_table *queue_tbl = &cdev->queue_table; + struct cdma_queue *queue, *next_queue; + + list_for_each_entry_safe(queue, next_queue, &ctx->queue_list, list) { + list_del(&queue->list); + + if (queue->jfs) + cdma_delete_jfs(cdev, queue->jfs->id); + + if (queue->tp) + cdma_delete_ctp(cdev, queue->tp->tp_id); + + if (queue->jfc) + cdma_delete_jfc(cdev, queue->jfc->id, NULL); + + spin_lock(&queue_tbl->lock); + idr_remove(&queue_tbl->idr_tbl.idr, queue->id); + spin_unlock(&queue_tbl->lock); + kfree(queue); + } +} + +static void cdma_cleanup_segment_res(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + struct cdma_segment *segment, *next_segment; + + list_for_each_entry_safe(segment, next_segment, &ctx->seg_list, list) { + list_del(&segment->list); + cdma_unregister_seg(cdev, segment); + } +} + +void cdma_cleanup_context_res(struct cdma_context *ctx) +{ + cdma_cleanup_queue_res(ctx->cdev, ctx); + cdma_cleanup_segment_res(ctx->cdev, ctx); + cdma_free_context(ctx->cdev, ctx); +} diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 590bffb14cce..47736a281257 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -35,5 +35,6 @@ struct cdma_ctx_res { struct cdma_context *cdma_find_ctx_by_handle(struct cdma_dev *cdev, int handle); struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel); void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx); +void cdma_cleanup_context_res(struct cdma_context *ctx); #endif /* CDMA_CONTEXT_H */ diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index f887c52a0479..f2c51d4833ee 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -409,6 +409,7 @@ struct cdma_jfce *cdma_alloc_jfce(struct cdma_file *cfile) jfce->fd = new_fd; jfce->file = file; jfce->cfile = cfile; + kref_get(&cfile->ref); fd_install(new_fd, file); return jfce; @@ -655,6 +656,7 @@ struct cdma_jfae *cdma_alloc_jfae(struct cdma_file *cfile) jfae->fd = fd; jfae->file = file; jfae->cfile = cfile; + kref_get(&cfile->ref); fd_install(fd, file); return jfae; diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index e4c2f3fd7b52..afd59c2c4731 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -121,6 +121,11 @@ struct cdma_base_jfc { struct cdma_jfc_event jfc_event; }; +struct cdma_mn { + struct mmu_notifier mn; + struct mm_struct *mm; +}; + struct cdma_file { struct cdma_dev *cdev; struct list_head list; @@ -128,6 +133,7 @@ struct cdma_file { struct cdma_context *uctx; struct idr idr; spinlock_t idr_lock; + struct cdma_mn mn_notifier; struct kref ref; }; -- Gitee From 26cd5ae3aa9450f151828a3b305e9fb750f486f3 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:46:33 +0800 Subject: [PATCH 083/243] ub: cdma: support dma write semantic configuration commit 81b92c3f5cf6162d1fda6853750e6b0031390d80 openEuler This patch implements functionality related to DMA write semantic configuration within the CDMA driver. The implementation includes support for the dma_write interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 3 +- drivers/ub/cdma/cdma_api.c | 62 ++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 3 ++ drivers/ub/cdma/cdma_handle.c | 72 +++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 14 +++++++ drivers/ub/cdma/cdma_jfs.h | 70 ++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_types.h | 9 +++++ include/ub/cdma/cdma_api.h | 8 ++++ 8 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/cdma/cdma_handle.c create mode 100644 drivers/ub/cdma/cdma_handle.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 714e0542f387..cb3ea219f9e2 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -2,6 +2,7 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ - cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o + cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o \ + cdma_handle.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 89d01159f797..c3656793abb2 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -11,6 +11,7 @@ #include "cdma_queue.h" #include "cdma_jfc.h" #include "cdma.h" +#include "cdma_handle.h" #include struct dma_device *dma_get_device_list(u32 *num_devices) @@ -437,6 +438,67 @@ void dma_unimport_seg(struct dma_seg *dma_seg) } EXPORT_SYMBOL_GPL(dma_unimport_seg); +static int cdma_param_transfer(struct dma_device *dma_dev, int queue_id, + struct cdma_dev **cdev, + struct cdma_queue **cdma_queue) +{ + struct cdma_queue *tmp_q; + struct cdma_dev *tmp_dev; + u32 eid; + + eid = dma_dev->attr.eid.dw0; + tmp_dev = get_cdma_dev_by_eid(eid); + if (!tmp_dev) { + pr_err("get cdma dev failed, eid = 0x%x.\n", eid); + return -EINVAL; + } + + if (tmp_dev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); + return -EINVAL; + } + + tmp_q = cdma_find_queue(tmp_dev, queue_id); + if (!tmp_q) { + dev_err(tmp_dev->dev, "get resource failed.\n"); + return -EINVAL; + } + + if (!tmp_q->tp || !tmp_q->jfs || !tmp_q->jfc) { + dev_err(tmp_dev->dev, "get jetty parameters failed.\n"); + return -EFAULT; + } + + *cdev = tmp_dev; + *cdma_queue = tmp_q; + + return 0; +} + +enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg) { + pr_err("write input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_write); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index f0370bea2861..3d45f64f5926 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -23,6 +23,9 @@ #define CDMA_RANGE_INDEX_ENTRY_CNT 0x100000 #define CDMA_SEGMENT_ENTRY_CNT 0x10000 +#define CDMA_ENABLE_FLAG 1 +#define CDMA_DISABLE_FLAG 0 + #define CDMA_DB_SIZE 64 #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c new file mode 100644 index 000000000000..aa4274aac27b --- /dev/null +++ b/drivers/ub/cdma/cdma_handle.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include "cdma_jfs.h" +#include "cdma_common.h" +#include "cdma_handle.h" + +static int cdma_rw_check(struct cdma_dev *cdev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg) +{ + if (!rmt_seg->len || !local_seg->len) { + dev_err(cdev->dev, "invalid len.\n"); + return -EINVAL; + } + + if (!rmt_seg->sva || !local_seg->sva) { + dev_err(cdev->dev, "invalid address.\n"); + return -EINVAL; + } + + return 0; +} + +static inline void cdma_fill_comm_wr(struct cdma_jfs_wr *wr, + struct cdma_queue *queue) +{ + wr->flag.bs.complete_enable = CDMA_ENABLE_FLAG; + wr->flag.bs.inline_flag = CDMA_DISABLE_FLAG; + wr->flag.bs.fence = CDMA_ENABLE_FLAG; + wr->tpn = queue->tp->tpn; + wr->rmt_eid = queue->cfg.rmt_eid.dw0; + wr->next = NULL; +} + +static inline void cdma_fill_sge(struct cdma_sge_info *rmt_sge, + struct cdma_sge_info *local_sge, + struct dma_seg *rmt_seg, + struct dma_seg *local_seg) +{ + local_sge->addr = local_seg->sva; + local_sge->len = local_seg->len; + local_sge->seg = local_seg; + + rmt_sge->addr = rmt_seg->sva; + rmt_sge->len = rmt_seg->len; + rmt_sge->seg = rmt_seg; +} + +int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_WRITE }; + struct cdma_sge_info rmt_sge, local_sge; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "write param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.rw.src.num_sge = 1; + wr.rw.src.sge = &local_sge; + wr.rw.dst.num_sge = 1; + wr.rw.dst.sge = &rmt_sge; + + return 0; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h new file mode 100644 index 000000000000..27a3f9495d18 --- /dev/null +++ b/drivers/ub/cdma/cdma_handle.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_HANDLE_H__ +#define __CDMA_HANDLE_H__ + +#include "cdma_segment.h" +#include "cdma_queue.h" +#include "cdma.h" + +int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg); + +#endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index e4dcaa765a89..b34821701511 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -20,6 +20,74 @@ #define CDMA_RCV_SEND_MAX_DIFF 512U +union cdma_jfs_wr_flag { + struct { + /* 0: There is no order with other WR. + * 1: relax order. + * 2: strong order. + * 3: reserve. + */ + u32 place_order : 2; + /* 0: There is no completion order with other WR + * 1: Completion order with previous WR. + */ + u32 comp_order : 1; + /* 0: There is no fence. + * 1: Fence with previous read and atomic WR + */ + u32 fence : 1; + /* 0: not solicited. + * 1: solicited. It will trigger an event + * on remote side + */ + u32 solicited_enable : 1; + /* 0: Do not notify local process + * after the task is complete. + * 1: Notify local process + * after the task is completed. + */ + u32 complete_enable : 1; + /* 0: No inline. + * 1: Inline data. + */ + u32 inline_flag : 1; + u32 reserved : 25; + } bs; + u32 value; +}; + +struct cdma_sge_info { + u64 addr; + u32 len; + struct dma_seg *seg; +}; + +struct cdma_sg { + struct cdma_sge_info *sge; + u32 num_sge; +}; + +struct cdma_rw_wr { + struct cdma_sg src; + struct cdma_sg dst; + u8 target_hint; /* hint of jetty in a target jetty group */ + u64 notify_data; /* notify data or immeditate data in host byte order */ + u64 notify_addr; + u32 notify_tokenid; + u32 notify_tokenvalue; +}; + +struct cdma_jfs_wr { + enum cdma_wr_opcode opcode; + union cdma_jfs_wr_flag flag; + u32 tpn; + u32 rmt_eid; + union { + struct cdma_rw_wr rw; + }; + struct cdma_jfs_wr *next; +}; + struct cdma_jfs { struct cdma_base_jfs base_jfs; struct cdma_dev *dev; @@ -155,5 +223,7 @@ struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, struct cdma_jfs_cfg *cfg, struct cdma_udata *udata); int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id); +int cdma_post_jfs_wr(struct cdma_jfs *jfs, struct cdma_jfs_wr *wr, + struct cdma_jfs_wr **bad_wr); #endif diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index afd59c2c4731..0b861c891558 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -121,6 +121,15 @@ struct cdma_base_jfc { struct cdma_jfc_event jfc_event; }; +enum cdma_wr_opcode { + CDMA_WR_OPC_WRITE = 0x00, + CDMA_WR_OPC_WRITE_NOTIFY = 0x02, + CDMA_WR_OPC_READ = 0x10, + CDMA_WR_OPC_CAS = 0x20, + CDMA_WR_OPC_FADD = 0x22, + CDMA_WR_OPC_LAST +}; + struct cdma_mn { struct mmu_notifier mn; struct mm_struct *mm; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index ff69c268b569..bc30586a5c4f 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -72,6 +72,11 @@ struct dma_context { u32 tid; /* data valid only in bit 0-19 */ }; +enum dma_status { + DMA_STATUS_OK, + DMA_STATUS_INVAL, +}; + struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); @@ -96,6 +101,9 @@ struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg); void dma_unimport_seg(struct dma_seg *dma_seg); +enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From ee5d7b631b5ba539d8983254e82000d79a5ea11e Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:53:44 +0800 Subject: [PATCH 084/243] ub: cdma: support dma write semantic delivery commit 43aea2fc121a93eb5c3f95992018763b7860b0c9 openEuler This patch implements the functionality of issuing dma write semantics in the CDMA driver. The implementation includes executing the issuance of semantics after the configuration of semantics in the dma_write interface is completed. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_common.h | 6 + drivers/ub/cdma/cdma_handle.c | 8 +- drivers/ub/cdma/cdma_jfs.c | 301 ++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfs.h | 49 ++++++ 4 files changed, 363 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 3d45f64f5926..a0bb3758ae3a 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -30,6 +30,12 @@ #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) +/* thanks to include/rdma/ib_verbs.h */ +enum cdma_sq_opcode { + CDMA_OPC_WRITE = 0x3, + CDMA_OPC_INVALID = 0x12, +}; + enum cdma_jfsc_mode { CDMA_JFS_MODE, CDMA_JETTY_MODE, diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index aa4274aac27b..11446f2a8307 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -53,6 +53,8 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, { struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_WRITE }; struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; if (cdma_rw_check(cdev, rmt_seg, local_seg)) { dev_err(cdev->dev, "write param check failed.\n"); @@ -68,5 +70,9 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, wr.rw.dst.num_sge = 1; wr.rw.dst.sge = &rmt_sge; - return 0; + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for write failed, ret = %d.\n", ret); + + return ret; } diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index abc05c44432b..86bc71851731 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -558,3 +558,304 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) return 0; } + +static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) +{ + switch (opcode) { + case CDMA_WR_OPC_WRITE: + return CDMA_OPC_WRITE; + default: + return CDMA_OPC_INVALID; + } +} + +static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq) +{ + return tmp_sq->sge_num; +} + +static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, + struct cdma_jfs_wr *wr) +{ + return wr->rw.src.num_sge > sq->max_sge_num; +} + +static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr, + struct cdma_normal_sge *sge) +{ + struct cdma_sge_info *sge_info; + u32 sge_num = 0; + u32 num_sge; + u32 i; + + switch (wr->opcode) { + case CDMA_WR_OPC_WRITE: + sge_info = wr->rw.src.sge; + num_sge = wr->rw.src.num_sge; + break; + default: + return -EINVAL; + } + + for (i = 0; i < num_sge; i++) { + if (sge_info[i].len == 0) + continue; + sge->va = sge_info[i].addr; + sge->length = sge_info[i].len; + sge->token_id = sge_info[i].seg->tid; + sge++; + sge_num++; + } + sqe_ctl->sge_num = sge_num; + + return 0; +} + +static inline u32 cdma_get_ctl_len(u8 opcode) +{ + return SQE_NORMAL_CTL_LEN; +} + +static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + u32 ctrl_len; + + ctrl_len = cdma_get_ctl_len(sqe_ctl->opcode); + sge = (struct cdma_normal_sge *)((void *)sqe_ctl + ctrl_len); + + if (cdma_fill_sw_sge(sqe_ctl, wr, sge)) + return -EINVAL; + + sge_info = wr->rw.dst.sge; + + sqe_ctl->toid = sge_info[0].seg->tid; + sqe_ctl->token_en = sge_info[0].seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info[0].seg->token_value; + sqe_ctl->target_hint = wr->rw.target_hint; + sqe_ctl->rmt_addr_l_or_token_id = + sge_info[0].addr & (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + return 0; +} + +static int cdma_fill_normal_sge(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + switch (wr->opcode) { + case CDMA_WR_OPC_WRITE: + return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); + default: + dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", + (u8)wr->opcode); + return -EINVAL; + } +} + +static int cdma_set_sqe(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr, struct cdma_jetty_queue *sq, + u8 opcode) +{ + int ret; + + sqe_ctl->cqe = wr->flag.bs.complete_enable; + sqe_ctl->owner = (sq->pi & sq->buf.entry_cnt) == 0 ? 1 : 0; + sqe_ctl->opcode = opcode; + sqe_ctl->tpn = wr->tpn; + sqe_ctl->place_odr = wr->flag.bs.place_order; + sqe_ctl->fence = wr->flag.bs.fence; + sqe_ctl->comp_order = wr->flag.bs.comp_order; + sqe_ctl->se = wr->flag.bs.solicited_enable; + sqe_ctl->inline_en = 0; + memcpy(sqe_ctl->rmt_eid, &wr->rmt_eid, sizeof(wr->rmt_eid)); + + ret = cdma_fill_normal_sge(cdev, sqe_ctl, wr); + if (ret) + dev_err(cdev->dev, + "cdma fill normal sge failed, wr opcode = %u.\n", + (u8)wr->opcode); + + return ret; +} + +static u32 cdma_cal_wqebb_num(struct cdma_jfs_wr *wr, u8 opcode, + struct cdma_sqe_ctl *tmp_sq) +{ + u32 normal_sge_num; + u32 sqe_ctl_len; + u32 wqebb_cnt; + + sqe_ctl_len = cdma_get_ctl_len(opcode); + + normal_sge_num = cdma_get_normal_sge_num(opcode, tmp_sq); + wqebb_cnt = cdma_sq_cal_wqebb_num(sqe_ctl_len, normal_sge_num); + + return wqebb_cnt; +} + +static inline bool to_check_sq_overflow(struct cdma_jetty_queue *sq, + u32 wqebb_cnt) +{ + return (sq->pi - sq->ci + wqebb_cnt) > sq->buf.entry_cnt; +} + +static int cdma_copy_to_sq(struct cdma_jetty_queue *sq, u32 wqebb_cnt, + struct cdma_jfs_wqebb *tmp_sq) +{ + u32 remain = sq->buf.entry_cnt - (sq->pi & (sq->buf.entry_cnt - 1)); + u32 tail_cnt; + u32 head_cnt; + + if (to_check_sq_overflow(sq, wqebb_cnt)) + return -ENOMEM; + + tail_cnt = remain > wqebb_cnt ? wqebb_cnt : remain; + head_cnt = wqebb_cnt - tail_cnt; + + memcpy(sq->kva_curr, tmp_sq, tail_cnt * sizeof(*tmp_sq)); + if (head_cnt) + memcpy(sq->buf.kva, tmp_sq + tail_cnt, + head_cnt * sizeof(*tmp_sq)); + + return 0; +} + +static void *cdma_k_update_ptr(u32 total_size, u32 wqebb_size, u8 *base_addr, + u8 *curr_addr) +{ + u8 *end_addr; + + end_addr = base_addr + total_size; + curr_addr = ((curr_addr + wqebb_size) < end_addr) ? + (curr_addr + wqebb_size) : + base_addr + (curr_addr + wqebb_size - end_addr); + + return curr_addr; +} + +static int cdma_post_one_wr(struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr, + struct cdma_dev *cdev, + struct cdma_sqe_ctl **dwqe_addr, u8 *dwqe_enable) +{ + struct cdma_jfs_wqebb tmp_sq[MAX_WQEBB_NUM] = { 0 }; + u32 wqebb_cnt; + u8 opcode; + int ret; + + opcode = cdma_get_jfs_opcode(wr->opcode); + if (opcode == CDMA_OPC_INVALID) { + dev_err(cdev->dev, "cdma invalid opcode = %u.\n", wr->opcode); + return -EINVAL; + } + + if (cdma_k_check_sge_num(opcode, sq, wr)) { + dev_err(cdev->dev, "cdma sge num invalid, opcode = %u.\n", + opcode); + return -EINVAL; + } + + ret = cdma_set_sqe(cdev, (struct cdma_sqe_ctl *)tmp_sq, wr, sq, opcode); + if (ret) + return ret; + + wqebb_cnt = + cdma_cal_wqebb_num(wr, opcode, (struct cdma_sqe_ctl *)tmp_sq); + if (wqebb_cnt == 1 && + !!(cdev->caps.feature & CDMA_CAP_FEATURE_DIRECT_WQE)) + *dwqe_enable = 1; + + ret = cdma_copy_to_sq(sq, wqebb_cnt, tmp_sq); + if (ret) { + dev_err(cdev->dev, "cdma jfs overflow, wqebb_cnt = %u.\n", + wqebb_cnt); + return ret; + } + + *dwqe_addr = sq->kva_curr; + + sq->kva_curr = cdma_k_update_ptr(sq->buf.entry_cnt * sq->buf.entry_size, + wqebb_cnt * sq->buf.entry_size, + (u8 *)sq->buf.kva, (u8 *)sq->kva_curr); + + sq->pi += wqebb_cnt; + + return 0; +} + +static void cdma_write_dsqe(struct cdma_jetty_queue *sq, + struct cdma_sqe_ctl *ctrl) +{ +#define DWQE_SIZE 8 + int i; + + ctrl->sqe_bb_idx = sq->pi; + for (i = 0; i < DWQE_SIZE; i++) + writeq_relaxed(*((u64 *)ctrl + i), (u64 *)sq->dwqe_addr + i); +} + +static inline void cdma_k_update_sq_db(struct cdma_jetty_queue *sq) +{ + u32 *db_addr = (u32 *)sq->db_addr; + *db_addr = sq->pi; +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +static int cdma_post_sq_wr(struct cdma_dev *cdev, struct cdma_jetty_queue *sq, + struct cdma_jfs_wr *wr, struct cdma_jfs_wr **bad_wr) +{ + struct cdma_sqe_ctl *dwqe_addr; + struct cdma_jfs_wr *it; + u8 dwqe_enable = 0; + int wr_cnt = 0; + int ret = 0; + + spin_lock(&sq->lock); + + for (it = wr; it != NULL; it = it->next) { + ret = cdma_post_one_wr(sq, it, cdev, &dwqe_addr, &dwqe_enable); + if (ret) { + dev_err(cdev->dev, "cdma post one wr failed.\n"); + *bad_wr = it; + goto post_wr; + } + wr_cnt++; + } + +post_wr: + if (wr_cnt) { + if (cdev->status != CDMA_SUSPEND) { + /* Ensure the order of write memory operations */ + wmb(); + if (wr_cnt == 1 && dwqe_enable && (sq->pi - sq->ci == 1)) + cdma_write_dsqe(sq, dwqe_addr); + else + cdma_k_update_sq_db(sq); + } + } + + spin_unlock(&sq->lock); + + return ret; +} + +int cdma_post_jfs_wr(struct cdma_jfs *jfs, struct cdma_jfs_wr *wr, + struct cdma_jfs_wr **bad_wr) +{ + struct cdma_dev *cdev = jfs->dev; + int ret; + + ret = cdma_post_sq_wr(cdev, &jfs->sq, wr, bad_wr); + if (ret) + dev_err(cdev->dev, + "cdma post jfs wr failed, sq_id = %u.\n", jfs->sq.id); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index b34821701511..98374b278737 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -9,9 +9,13 @@ #include "cdma_segment.h" #define MAX_WQEBB_NUM 4 +#define CDMA_SQE_RMT_EID_SIZE 4 #define CDMA_JFS_WQEBB_SIZE 64 +#define SQE_NORMAL_CTL_LEN 48 #define CDMA_JFS_SGE_SIZE 16 #define SQE_WRITE_NOTIFY_CTL_LEN 80 +#define SQE_CTL_RMA_ADDR_OFFSET 32 +#define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) #define CDMA_TA_TIMEOUT_128MS 128 #define CDMA_TA_TIMEOUT_1000MS 1000 @@ -20,6 +24,45 @@ #define CDMA_RCV_SEND_MAX_DIFF 512U +struct cdma_jfs_wqebb { + u32 value[16]; +}; + +struct cdma_sqe_ctl { + /* DW0 */ + u32 sqe_bb_idx : 16; + u32 place_odr : 2; + u32 comp_order : 1; + u32 fence : 1; + u32 se : 1; + u32 cqe : 1; + u32 inline_en : 1; + u32 rsv : 5; + u32 token_en : 1; + u32 rmt_jetty_type : 2; + u32 owner : 1; + /* DW1 */ + u32 target_hint : 8; + u32 opcode : 8; + u32 rsv1 : 6; + u32 inline_msg_len : 10; + /* DW2 */ + u32 tpn : 24; + u32 sge_num : 8; + /* DW3 */ + u32 toid : 20; + u32 rsv2 : 12; + /* DW4~7 */ + u32 rmt_eid[CDMA_SQE_RMT_EID_SIZE]; + /* DW8 */ + u32 rmt_token_value; + /* DW9~11 */ + u32 rsv3; + u32 rmt_addr_l_or_token_id; + u32 rmt_addr_h_or_token_value; +}; + + union cdma_jfs_wr_flag { struct { /* 0: There is no order with other WR. @@ -62,6 +105,12 @@ struct cdma_sge_info { struct dma_seg *seg; }; +struct cdma_normal_sge { + u32 length; + u32 token_id; + u64 va; +}; + struct cdma_sg { struct cdma_sge_info *sge; u32 num_sge; -- Gitee From bbd0d000f480ee0d9327abac0a95a60884e57004 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:56:41 +0800 Subject: [PATCH 085/243] ub: cdma: support dma write with notify semantic commit 26598fa61f1a005665e4dba668fbe5c0844072fa openEuler This patch implements functionality related to the configuration and issuance of DMA write with notify semantics in the CDMA driver. The implementation includes support for the dma_write_with_notify interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 28 +++++++++++++++++++++++++++- drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_handle.c | 11 ++++++++++- drivers/ub/cdma/cdma_handle.h | 3 ++- drivers/ub/cdma/cdma_jfs.c | 28 +++++++++++++++++++++++++++- drivers/ub/cdma/cdma_jfs.h | 11 ++++++++++- include/ub/cdma/cdma_api.h | 10 ++++++++++ 7 files changed, 87 insertions(+), 5 deletions(-) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index c3656793abb2..33f9ce38bb98 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -491,7 +491,7 @@ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, if (ret) return DMA_STATUS_INVAL; - ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg); + ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg, NULL); if (ret) return DMA_STATUS_INVAL; @@ -499,6 +499,32 @@ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_write); +enum dma_status dma_write_with_notify(struct dma_device *dma_dev, + struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_notify_data *data) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg || !data || !data->notify_seg) { + pr_err("write with notify input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg, data); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_write_with_notify); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index a0bb3758ae3a..cd4f48c2dce4 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -33,6 +33,7 @@ /* thanks to include/rdma/ib_verbs.h */ enum cdma_sq_opcode { CDMA_OPC_WRITE = 0x3, + CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index 11446f2a8307..ca6daeb03f4c 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -49,7 +49,8 @@ static inline void cdma_fill_sge(struct cdma_sge_info *rmt_sge, } int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, - struct dma_seg *local_seg, struct dma_seg *rmt_seg) + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_notify_data *data) { struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_WRITE }; struct cdma_sge_info rmt_sge, local_sge; @@ -61,6 +62,14 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, return -EINVAL; } + if (data) { + wr.opcode = CDMA_WR_OPC_WRITE_NOTIFY; + wr.rw.notify_addr = data->notify_seg->sva; + wr.rw.notify_data = data->notify_data; + wr.rw.notify_tokenid = data->notify_seg->tid; + wr.rw.notify_tokenvalue = data->notify_seg->token_value; + } + cdma_fill_comm_wr(&wr, queue); cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index 27a3f9495d18..e9394e2c321a 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -9,6 +9,7 @@ #include "cdma.h" int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, - struct dma_seg *local_seg, struct dma_seg *rmt_seg); + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_notify_data *data); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 86bc71851731..e8a1677a1f2e 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -564,6 +564,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) switch (opcode) { case CDMA_WR_OPC_WRITE: return CDMA_OPC_WRITE; + case CDMA_WR_OPC_WRITE_NOTIFY: + return CDMA_OPC_WRITE_WITH_NOTIFY; default: return CDMA_OPC_INVALID; } @@ -577,7 +579,13 @@ static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr) { - return wr->rw.src.num_sge > sq->max_sge_num; + switch (opcode) { + case CDMA_OPC_WRITE_WITH_NOTIFY: + return wr->rw.src.num_sge > CDMA_JFS_MAX_SGE_NOTIFY || + wr->rw.src.num_sge > sq->max_sge_num; + default: + return wr->rw.src.num_sge > sq->max_sge_num; + } } static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, @@ -591,6 +599,7 @@ static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, switch (wr->opcode) { case CDMA_WR_OPC_WRITE: + case CDMA_WR_OPC_WRITE_NOTIFY: sge_info = wr->rw.src.sge; num_sge = wr->rw.src.num_sge; break; @@ -614,6 +623,9 @@ static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, static inline u32 cdma_get_ctl_len(u8 opcode) { + if (opcode == CDMA_OPC_WRITE_WITH_NOTIFY) + return SQE_WRITE_NOTIFY_CTL_LEN; + return SQE_NORMAL_CTL_LEN; } @@ -621,6 +633,7 @@ static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) { + struct cdma_token_info *token_info; struct cdma_sge_info *sge_info; struct cdma_normal_sge *sge; u32 ctrl_len; @@ -643,6 +656,18 @@ static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, (sge_info[0].addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & (u32)SQE_CTL_RMA_ADDR_BIT; + if (sqe_ctl->opcode == CDMA_OPC_WRITE_WITH_NOTIFY) { + token_info = (struct cdma_token_info *) + ((void *)sqe_ctl + SQE_NOTIFY_TOKEN_ID_FIELD); + token_info->token_id = wr->rw.notify_tokenid; + token_info->token_value = wr->rw.notify_tokenvalue; + + memcpy((void *)sqe_ctl + SQE_NOTIFY_ADDR_FIELD, + &wr->rw.notify_addr, sizeof(u64)); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->rw.notify_data, sizeof(u64)); + } + return 0; } @@ -652,6 +677,7 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, { switch (wr->opcode) { case CDMA_WR_OPC_WRITE: + case CDMA_WR_OPC_WRITE_NOTIFY: return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 98374b278737..b94f8aca2d99 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -12,10 +12,14 @@ #define CDMA_SQE_RMT_EID_SIZE 4 #define CDMA_JFS_WQEBB_SIZE 64 #define SQE_NORMAL_CTL_LEN 48 +#define CDMA_JFS_MAX_SGE_NOTIFY 11 #define CDMA_JFS_SGE_SIZE 16 #define SQE_WRITE_NOTIFY_CTL_LEN 80 #define SQE_CTL_RMA_ADDR_OFFSET 32 #define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) +#define SQE_NOTIFY_TOKEN_ID_FIELD 48 +#define SQE_NOTIFY_ADDR_FIELD 56 +#define SQE_ATOMIC_DATA_FIELD 64 #define CDMA_TA_TIMEOUT_128MS 128 #define CDMA_TA_TIMEOUT_1000MS 1000 @@ -28,6 +32,12 @@ struct cdma_jfs_wqebb { u32 value[16]; }; +struct cdma_token_info { + u32 token_id : 20; + u32 rsv : 12; + u32 token_value; +}; + struct cdma_sqe_ctl { /* DW0 */ u32 sqe_bb_idx : 16; @@ -62,7 +72,6 @@ struct cdma_sqe_ctl { u32 rmt_addr_h_or_token_value; }; - union cdma_jfs_wr_flag { struct { /* 0: There is no order with other WR. diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index bc30586a5c4f..39dff8f6378f 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -77,6 +77,11 @@ enum dma_status { DMA_STATUS_INVAL, }; +struct dma_notify_data { + struct dma_seg *notify_seg; + u64 notify_data; +}; + struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); @@ -104,6 +109,11 @@ void dma_unimport_seg(struct dma_seg *dma_seg); enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id); +enum dma_status dma_write_with_notify(struct dma_device *dma_dev, + struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_notify_data *data); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From 5b913496059560390bf10e561272408a3f67a568 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:59:42 +0800 Subject: [PATCH 086/243] ub: cdma: support dma read semantic commit 75367599a7c1a46e8705a01b176762bb8715d233 openEuler This patch implements functionality related to the configuration and issuance of DMA read semantics in the CDMA driver. The implementation includes support for the dma_read interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 24 ++++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_handle.c | 29 ++++++++++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 2 ++ drivers/ub/cdma/cdma_jfs.c | 42 +++++++++++++++++++++++++++++++++++ include/ub/cdma/cdma_api.h | 3 +++ 6 files changed, 101 insertions(+) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 33f9ce38bb98..c4038ad740c5 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -525,6 +525,30 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, } EXPORT_SYMBOL_GPL(dma_write_with_notify); +enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg) { + pr_err("read input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_read(cdev, cdma_queue, local_seg, rmt_seg); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_read); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index cd4f48c2dce4..3858756a9e5b 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -34,6 +34,7 @@ enum cdma_sq_opcode { CDMA_OPC_WRITE = 0x3, CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, + CDMA_OPC_READ = 0x6, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index ca6daeb03f4c..8646e2b08519 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -85,3 +85,32 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, return ret; } + +int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_READ }; + struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "read param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.rw.src.num_sge = 1; + wr.rw.src.sge = &rmt_sge; + wr.rw.dst.num_sge = 1; + wr.rw.dst.sge = &local_sge; + + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for read failed, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index e9394e2c321a..aaf7ad61044f 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -11,5 +11,7 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg, struct dma_notify_data *data); +int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index e8a1677a1f2e..a505a00361cb 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -566,6 +566,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) return CDMA_OPC_WRITE; case CDMA_WR_OPC_WRITE_NOTIFY: return CDMA_OPC_WRITE_WITH_NOTIFY; + case CDMA_WR_OPC_READ: + return CDMA_OPC_READ; default: return CDMA_OPC_INVALID; } @@ -580,6 +582,8 @@ static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr) { switch (opcode) { + case CDMA_OPC_READ: + return wr->rw.dst.num_sge > sq->max_sge_num; case CDMA_OPC_WRITE_WITH_NOTIFY: return wr->rw.src.num_sge > CDMA_JFS_MAX_SGE_NOTIFY || wr->rw.src.num_sge > sq->max_sge_num; @@ -671,6 +675,42 @@ static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, return 0; } +static int cdma_k_fill_read_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + u32 sge_num = 0; + u32 num; + + sge = (struct cdma_normal_sge *)(sqe_ctl + 1); + sge_info = wr->rw.dst.sge; + + for (num = 0; num < wr->rw.dst.num_sge; num++) { + if (!sge_info[num].len) + continue; + sge->va = sge_info[num].addr; + sge->length = sge_info[num].len; + sge->token_id = sge_info[num].seg->tid; + sge++; + sge_num++; + } + + sge_info = wr->rw.src.sge; + sqe_ctl->sge_num = sge_num; + sqe_ctl->toid = sge_info[0].seg->tid; + sqe_ctl->token_en = sge_info[0].seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info[0].seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = + sge_info[0].addr & (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + return 0; +} + static int cdma_fill_normal_sge(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) @@ -679,6 +719,8 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, case CDMA_WR_OPC_WRITE: case CDMA_WR_OPC_WRITE_NOTIFY: return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); + case CDMA_WR_OPC_READ: + return cdma_k_fill_read_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", (u8)wr->opcode); diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 39dff8f6378f..eb425553d6ac 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -114,6 +114,9 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, struct dma_seg *local_seg, int queue_id, struct dma_notify_data *data); +enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From 6bf84d44a6d32accc106935968ed55fd4a1cd12b Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 21:43:46 +0800 Subject: [PATCH 087/243] ub: cdma: support dma cas semantic commit 89f3c2b942a6cc9812e123be53644044d201037a openEuler This patch implements functionality related to the configuration and issuance of DMA cas semantics in the CDMA driver. The implementation includes support for the dma_cas interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 25 ++++++++++++ drivers/ub/cdma/cdma_common.h | 5 +++ drivers/ub/cdma/cdma_handle.c | 36 +++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 3 ++ drivers/ub/cdma/cdma_jfs.c | 73 ++++++++++++++++++++++++++++++++++- drivers/ub/cdma/cdma_jfs.h | 23 +++++++++++ include/ub/cdma/cdma_api.h | 9 +++++ 7 files changed, 173 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index c4038ad740c5..e9fec2437cd5 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -549,6 +549,31 @@ enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_read); +enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_cas_data *data) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg || !data) { + pr_err("cas input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_cas(cdev, cdma_queue, local_seg, rmt_seg, data); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_cas); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 3858756a9e5b..54d0e3e43af4 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -28,6 +28,10 @@ #define CDMA_DB_SIZE 64 +#define CDMA_ATOMIC_LEN_4 4 +#define CDMA_ATOMIC_LEN_8 8 +#define CDMA_ATOMIC_LEN_16 16 + #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) /* thanks to include/rdma/ib_verbs.h */ @@ -35,6 +39,7 @@ enum cdma_sq_opcode { CDMA_OPC_WRITE = 0x3, CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, CDMA_OPC_READ = 0x6, + CDMA_OPC_CAS, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index 8646e2b08519..b172383e10e6 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -114,3 +114,39 @@ int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, return ret; } + +int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_cas_data *data) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_CAS }; + struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "cas param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.cas.src = &local_sge; + wr.cas.dst = &rmt_sge; + + if (local_sge.len <= CDMA_ATOMIC_LEN_8) { + wr.cas.cmp_data = data->compare_data; + wr.cas.swap_data = data->swap_data; + } else { + wr.cas.cmp_addr = data->compare_data; + wr.cas.swap_addr = data->swap_data; + } + + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for cas failed, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index aaf7ad61044f..8c99a0a0cb32 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -13,5 +13,8 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_notify_data *data); int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg); +int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_cas_data *data); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index a505a00361cb..79d5074ff4ef 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -568,6 +568,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) return CDMA_OPC_WRITE_WITH_NOTIFY; case CDMA_WR_OPC_READ: return CDMA_OPC_READ; + case CDMA_WR_OPC_CAS: + return CDMA_OPC_CAS; default: return CDMA_OPC_INVALID; } @@ -575,13 +577,20 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq) { - return tmp_sq->sge_num; + switch (opcode) { + case CDMA_OPC_CAS: + return CDMA_ATOMIC_SGE_NUM_ATOMIC; + default: + return tmp_sq->sge_num; + } } static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr) { switch (opcode) { + case CDMA_OPC_CAS: + return sq->max_sge_num == 0; case CDMA_OPC_READ: return wr->rw.dst.num_sge > sq->max_sge_num; case CDMA_OPC_WRITE_WITH_NOTIFY: @@ -711,6 +720,66 @@ static int cdma_k_fill_read_sqe(struct cdma_dev *cdev, return 0; } +static bool cdma_check_atomic_len(u32 len, u8 opcode) +{ + switch (len) { + case CDMA_ATOMIC_LEN_4: + case CDMA_ATOMIC_LEN_8: + return true; + case CDMA_ATOMIC_LEN_16: + if (opcode == CDMA_WR_OPC_CAS) + return true; + return false; + default: + return false; + } +} + +static int cdma_k_fill_cas_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + + sge_info = wr->cas.src; + if (!cdma_check_atomic_len(sge_info->len, wr->opcode)) { + dev_err(cdev->dev, "cdma cas sge len invalid, len = %u.\n", + sge_info->len); + return -EINVAL; + } + + sge = (struct cdma_normal_sge *)(sqe_ctl + 1); + sge->va = sge_info->addr; + sge->length = sge_info->len; + sge->token_id = sge_info->seg->tid; + + sge_info = wr->cas.dst; + sqe_ctl->sge_num = CDMA_ATOMIC_SGE_NUM; + sqe_ctl->toid = sge_info->seg->tid; + sqe_ctl->token_en = sge_info->seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info->seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & + (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info->addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= CDMA_ATOMIC_LEN_8) { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->cas.swap_data, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + &wr->cas.cmp_data, sge->length); + } else { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (char *)wr->cas.swap_addr, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + (char *)wr->cas.cmp_addr, sge->length); + } + + return 0; +} + static int cdma_fill_normal_sge(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) @@ -721,6 +790,8 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); case CDMA_WR_OPC_READ: return cdma_k_fill_read_sqe(cdev, sqe_ctl, wr); + case CDMA_WR_OPC_CAS: + return cdma_k_fill_cas_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", (u8)wr->opcode); diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index b94f8aca2d99..8637c2a80074 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -15,6 +15,8 @@ #define CDMA_JFS_MAX_SGE_NOTIFY 11 #define CDMA_JFS_SGE_SIZE 16 #define SQE_WRITE_NOTIFY_CTL_LEN 80 +#define CDMA_ATOMIC_SGE_NUM 1 +#define CDMA_ATOMIC_SGE_NUM_ATOMIC 2 #define SQE_CTL_RMA_ADDR_OFFSET 32 #define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) #define SQE_NOTIFY_TOKEN_ID_FIELD 48 @@ -135,6 +137,26 @@ struct cdma_rw_wr { u32 notify_tokenvalue; }; +struct cdma_cas_wr { + struct cdma_sge_info *dst; /* len in the sge is the length of CAS + * operation, only support 8/16/32B + */ + struct cdma_sge_info *src; /* local address for destination original + * value written back + */ + union { + u64 cmp_data; /* when the len is 8B, it indicates the compare value. */ + u64 cmp_addr; /* when the len is 16/32B, it indicates the data address. */ + }; + union { + /* if destination value is the same as cmp_data, + * destination value will be change to swap_data. + */ + u64 swap_data; + u64 swap_addr; + }; +}; + struct cdma_jfs_wr { enum cdma_wr_opcode opcode; union cdma_jfs_wr_flag flag; @@ -142,6 +164,7 @@ struct cdma_jfs_wr { u32 rmt_eid; union { struct cdma_rw_wr rw; + struct cdma_cas_wr cas; }; struct cdma_jfs_wr *next; }; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index eb425553d6ac..8a70bbb4d49c 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -77,6 +77,11 @@ enum dma_status { DMA_STATUS_INVAL, }; +struct dma_cas_data { + u64 compare_data; + u64 swap_data; +}; + struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; @@ -117,6 +122,10 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id); +enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_cas_data *data); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From d7fbd5f220d92b0b197083b379bfe77357f7e119 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 21:48:38 +0800 Subject: [PATCH 088/243] ub: cdma: support dma faa semantic commit 6022413537d64ed9c15cb308617ddb27f6e128ce openEuler This patch implements functionality related to the configuration and issuance of DMA faa semantics in the CDMA driver. The implementation includes support for the dma_faa interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 24 ++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_handle.c | 28 +++++++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 2 ++ drivers/ub/cdma/cdma_jfs.c | 46 +++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfs.h | 14 +++++++++++ include/ub/cdma/cdma_api.h | 3 +++ 7 files changed, 118 insertions(+) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index e9fec2437cd5..8a30d20a1a09 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -574,6 +574,30 @@ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_cas); +enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, u64 add) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg) { + pr_err("faa input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_faa(cdev, cdma_queue, local_seg, rmt_seg, add); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_faa); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 54d0e3e43af4..58855991647d 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -40,6 +40,7 @@ enum cdma_sq_opcode { CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, CDMA_OPC_READ = 0x6, CDMA_OPC_CAS, + CDMA_OPC_FAA = 0xb, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index b172383e10e6..183f802cfdbf 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -150,3 +150,31 @@ int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, return ret; } + +int cdma_faa(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, u64 add) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_FADD }; + struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "faa param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.faa.src = &local_sge; + wr.faa.dst = &rmt_sge; + wr.faa.operand = add; + + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for faa failed, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index 8c99a0a0cb32..00cb8049778e 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -16,5 +16,7 @@ int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg, struct dma_cas_data *data); +int cdma_faa(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, u64 add); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 79d5074ff4ef..cbb47a7f56db 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -570,6 +570,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) return CDMA_OPC_READ; case CDMA_WR_OPC_CAS: return CDMA_OPC_CAS; + case CDMA_WR_OPC_FADD: + return CDMA_OPC_FAA; default: return CDMA_OPC_INVALID; } @@ -579,6 +581,7 @@ static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq { switch (opcode) { case CDMA_OPC_CAS: + case CDMA_OPC_FAA: return CDMA_ATOMIC_SGE_NUM_ATOMIC; default: return tmp_sq->sge_num; @@ -590,6 +593,7 @@ static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, { switch (opcode) { case CDMA_OPC_CAS: + case CDMA_OPC_FAA: return sq->max_sge_num == 0; case CDMA_OPC_READ: return wr->rw.dst.num_sge > sq->max_sge_num; @@ -780,6 +784,46 @@ static int cdma_k_fill_cas_sqe(struct cdma_dev *cdev, return 0; } +static int cdma_k_fill_faa_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + + sge_info = wr->faa.src; + if (!cdma_check_atomic_len(sge_info->len, wr->opcode)) { + dev_err(cdev->dev, "cdma faa sge len invalid, len = %u.\n", + sge_info->len); + return -EINVAL; + } + + sge = (struct cdma_normal_sge *)(sqe_ctl + 1); + sge->va = sge_info->addr; + sge->length = sge_info->len; + sge->token_id = sge_info->seg->tid; + + sge_info = wr->faa.dst; + sqe_ctl->sge_num = CDMA_ATOMIC_SGE_NUM; + sqe_ctl->toid = sge_info->seg->tid; + sqe_ctl->token_en = sge_info->seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info->seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & + (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info->addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= CDMA_ATOMIC_LEN_8) + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->faa.operand, sge->length); + else + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (void *)wr->faa.operand_addr, sge->length); + + return 0; +} + static int cdma_fill_normal_sge(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) @@ -792,6 +836,8 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, return cdma_k_fill_read_sqe(cdev, sqe_ctl, wr); case CDMA_WR_OPC_CAS: return cdma_k_fill_cas_sqe(cdev, sqe_ctl, wr); + case CDMA_WR_OPC_FADD: + return cdma_k_fill_faa_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", (u8)wr->opcode); diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 8637c2a80074..fe46955c925b 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -157,6 +157,19 @@ struct cdma_cas_wr { }; }; +struct cdma_faa_wr { + struct cdma_sge_info *dst; /* len in the sge is the length of FAA + * operation, only support 4/8B + */ + struct cdma_sge_info *src; /* local address for destination original + * value written back + */ + union { + u64 operand; /* Addend */ + u64 operand_addr; + }; +}; + struct cdma_jfs_wr { enum cdma_wr_opcode opcode; union cdma_jfs_wr_flag flag; @@ -165,6 +178,7 @@ struct cdma_jfs_wr { union { struct cdma_rw_wr rw; struct cdma_cas_wr cas; + struct cdma_faa_wr faa; }; struct cdma_jfs_wr *next; }; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 8a70bbb4d49c..6809ba074c05 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -126,6 +126,9 @@ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, struct dma_cas_data *data); +enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, u64 add); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From 57cd2eb629adf6d2c33eef55c79f5ae7e484dc98 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Tue, 26 Aug 2025 09:32:48 +0800 Subject: [PATCH 089/243] ub: cdma: support debugfs interface commit 45ae057d001f39e6e83556d75f18da5aea2b0186 openEuler This patch implements functionality related to debugfs in the CDMA driver. The implementation includes the registration of debugfs and file registration, allowing users to view DFX information of devices and resources by reading the corresponding files. Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 2 + drivers/ub/cdma/cdma_debugfs.c | 783 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_debugfs.h | 58 +++ drivers/ub/cdma/cdma_jfs.h | 5 + drivers/ub/cdma/cdma_main.c | 7 + drivers/ub/cdma/cdma_queue.h | 3 +- 7 files changed, 858 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/cdma/cdma_debugfs.c create mode 100644 drivers/ub/cdma/cdma_debugfs.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index cb3ea219f9e2..2ce4eefa2d84 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -3,6 +3,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o \ - cdma_handle.o + cdma_handle.o cdma_debugfs.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index 8ed8fdb4d6fa..e782b0229943 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -9,6 +9,7 @@ #include #include #include +#include "cdma_debugfs.h" #include extern u32 jfc_arm_mode; @@ -163,6 +164,7 @@ struct cdma_dev { struct auxiliary_device *adev; struct cdma_chardev chardev; struct cdma_caps caps; + struct cdma_dbgfs cdbgfs; u32 eid; u32 upi; diff --git a/drivers/ub/cdma/cdma_debugfs.c b/drivers/ub/cdma/cdma_debugfs.c new file mode 100644 index 000000000000..d0de451e92ca --- /dev/null +++ b/drivers/ub/cdma/cdma_debugfs.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include +#include +#include +#include +#include "cdma_queue.h" +#include "cdma.h" +#include "cdma_jfc.h" +#include "cdma_jfs.h" +#include "cdma_mbox.h" +#include "cdma_cmd.h" +#include "cdma_debugfs.h" + +#define CDMA_DBG_READ_LEN 65536 +#define BUF_10_BASE 10 +#define BUF_SIZE 8 + +/* ctx debugfs start */ +static void cdma_get_ctx_info(struct cdma_dev *cdev, + struct cdma_queue *queue, + enum cdma_dbg_ctx_type ctx_type, + struct cdma_ctx_info *ctx_info) +{ + struct auxiliary_device *adev = cdev->adev; + +#define CDMA_DBG_CTX_SIZE_256 256 +#define UBASE_CTX_SIZE_128 128 + switch (ctx_type) { + case CDMA_DBG_JFS_CTX: + ctx_info->start_idx = queue->jfs_id; + ctx_info->ctx_size = CDMA_DBG_CTX_SIZE_256; + ctx_info->op = UBASE_MB_QUERY_JFS_CONTEXT; + ctx_info->ctx_name = "jfs"; + break; + case CDMA_DBG_SQ_JFC_CTX: + ctx_info->start_idx = queue->jfc_id; + ctx_info->ctx_size = UBASE_CTX_SIZE_128; + ctx_info->op = UBASE_MB_QUERY_JFC_CONTEXT; + ctx_info->ctx_name = "sq_jfc"; + break; + default: + dev_err(&adev->dev, "get ctx info failed, ctx_type = %d.\n", + ctx_type); + break; + } +} + +static void cdma_print_ctx_hw_bytype(struct seq_file *s, + enum cdma_dbg_ctx_type ctx_type, + struct cdma_ctx_info *ctx_info, + struct ubase_cmd_mailbox *mailbox) +{ + struct cdma_jfs_ctx *jfs_ctx; + struct cdma_jfc_ctx *jfc_ctx; + + seq_printf(s, "offset\t%s%u\n", ctx_info->ctx_name, ctx_info->start_idx); + + if (ctx_type == CDMA_DBG_JFS_CTX) { + jfs_ctx = (struct cdma_jfs_ctx *)mailbox->buf; + jfs_ctx->sqe_base_addr_l = 0; + jfs_ctx->sqe_base_addr_h = 0; + jfs_ctx->user_data_l = 0; + jfs_ctx->user_data_h = 0; + ubase_print_context_hw(s, jfs_ctx, ctx_info->ctx_size); + } else if (ctx_type == CDMA_DBG_SQ_JFC_CTX) { + jfc_ctx = (struct cdma_jfc_ctx *)mailbox->buf; + jfc_ctx->cqe_va_l = 0; + jfc_ctx->cqe_va_h = 0; + jfc_ctx->cqe_token_value = 0; + jfc_ctx->record_db_addr_l = 0; + jfc_ctx->record_db_addr_h = 0; + jfc_ctx->remote_token_value = 0; + ubase_print_context_hw(s, jfc_ctx, ctx_info->ctx_size); + } + + seq_puts(s, "\n"); +} + +static int cdma_dbg_dump_ctx_hw(struct seq_file *s, enum cdma_dbg_ctx_type ctx_type) +{ + struct cdma_dev *cdev = dev_get_drvdata(s->private); + struct auxiliary_device *adev = cdev->adev; + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + struct cdma_ctx_info ctx_info = { 0 }; + struct ubase_cmd_mailbox *mailbox; + struct ubase_mbx_attr attr; + struct cdma_queue *queue; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&adev->dev, "find queue[%u] for dump context hw failed.\n", queue_id); + return -EINVAL; + } + + if (!queue->jfs_id) { + spin_unlock(&cdev->queue_table.lock); + dev_warn(&adev->dev, "queue resource is not initialized.\n"); + return -EINVAL; + } + + cdma_get_ctx_info(cdev, queue, ctx_type, &ctx_info); + spin_unlock(&cdev->queue_table.lock); + + cdma_fill_mbx_attr(&attr, ctx_info.start_idx, ctx_info.op, 0); + mailbox = cdma_mailbox_query_ctx(cdev, &attr); + if (!mailbox) { + dev_err(&adev->dev, "cdma dbg post query %s ctx mbx failed.\n", + ctx_info.ctx_name); + return -ENOMEM; + } + + cdma_print_ctx_hw_bytype(s, ctx_type, &ctx_info, mailbox); + + cdma_free_cmd_mailbox(cdev, mailbox); + + return 0; +} + +static int cdma_dbg_dump_jfs_ctx_hw(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx_hw(s, CDMA_DBG_JFS_CTX); +} + +static int cdma_dbg_dump_sq_jfc_ctx_hw(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx_hw(s, CDMA_DBG_SQ_JFC_CTX); +} + +static void cdma_get_jfs_cfg(struct cdma_queue *queue, struct seq_file *s) +{ + struct cdma_jfs_cfg *cfg; + + if (!queue->jfs) + return; + + cfg = &queue->jfs->cfg; + seq_printf(s, "%-13u", cfg->depth); + seq_printf(s, "%-12u", cfg->flag.value); + seq_printf(s, "%-17u", cfg->eid_index); + seq_printf(s, "%-10u", cfg->priority); + seq_printf(s, "%-9u", cfg->max_sge); + seq_printf(s, "%-10u", cfg->max_rsge); + seq_printf(s, "%-11u", cfg->rnr_retry); + seq_printf(s, "%-13u", cfg->err_timeout); + seq_printf(s, "%-14u", cfg->jfc_id); + seq_printf(s, "%-15u", cfg->sqe_pos); + seq_printf(s, "%-11u", cfg->tpn); + seq_printf(s, "%-15u", cfg->pld_pos); + seq_printf(s, "%-16u", cfg->queue_id); +} + +static void cdma_get_jfc_cfg(struct cdma_queue *queue, struct seq_file *s) +{ + struct cdma_jfc_cfg *cfg; + + if (!queue->jfc) + return; + + cfg = &queue->jfc->jfc_cfg; + seq_printf(s, "%-13u", cfg->depth); + seq_printf(s, "%-12u", cfg->ceqn); + seq_printf(s, "%-16u", cfg->queue_id); +} + +static void cdma_get_jfs_title(struct seq_file *s) +{ + seq_puts(s, "depth "); + seq_puts(s, "flag "); + seq_puts(s, "eid_index "); + seq_puts(s, "priority "); + seq_puts(s, "max_sge "); + seq_puts(s, "max_rsge "); + seq_puts(s, "rnr_retry "); + seq_puts(s, "err_timeout "); + seq_puts(s, "jfc_id "); + seq_puts(s, "sqe_pos "); + seq_puts(s, "tpn "); + seq_puts(s, "pld_pos "); + seq_puts(s, "queue_id "); + seq_puts(s, "\n"); +} + +static void cdma_get_jfc_title(struct seq_file *s) +{ + seq_puts(s, "depth "); + seq_puts(s, "flag "); + seq_puts(s, "ceqn "); + seq_puts(s, "queue_id "); + seq_puts(s, "\n"); +} + +static int cdma_dbg_dump_ctx(struct seq_file *s, enum cdma_dbg_ctx_type ctx_type) +{ + struct cdma_dbg_context { + void (*get_title)(struct seq_file *s); + void (*get_cfg)(struct cdma_queue *queue, struct seq_file *s); + } dbg_ctx[] = { + {cdma_get_jfs_title, cdma_get_jfs_cfg}, + {cdma_get_jfc_title, cdma_get_jfc_cfg}, + }; + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + struct cdma_queue *queue; + + dbg_ctx[ctx_type].get_title(s); + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump context failed.\n", queue_id); + return -EINVAL; + } + + dbg_ctx[ctx_type].get_cfg(queue, s); + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} + +int cdma_dbg_dump_jfs_ctx(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx(s, CDMA_DBG_JFS_CTX); +} + +int cdma_dbg_dump_sq_jfc_ctx(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx(s, CDMA_DBG_SQ_JFC_CTX); +} +/* ctx debugfs end */ + +/* resource debugfs start */ +static int cdma_dbg_dump_dev_info(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u8 eu_num = cdev->base.attr.eu_num; + u32 seid_idx, seid, upi, i; + + seq_printf(s, "EU_ENTRY_NUM: %u\n", eu_num); + for (i = 0; i < eu_num; i++) { + seid_idx = cdev->base.attr.eus[i].eid_idx; + seid = cdev->base.attr.eus[i].eid.dw0; + upi = cdev->base.attr.eus[i].upi; + seq_printf(s, "SEID_IDX: %u, SEID: %u, UPI: %u\n", seid_idx, seid, upi); + } + + return 0; +} + +static int cdma_dbg_dump_cap_info(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + struct cdma_caps *caps = &cdev->caps; + + seq_printf(s, "MAX_JFC: %u\n", caps->jfc.max_cnt); + seq_printf(s, "MAX_JFS: %u\n", caps->jfs.max_cnt); + seq_printf(s, "MAX_JFC_DEPTH: %u\n", caps->jfc.depth); + seq_printf(s, "MAX_JFS_DEPTH: %u\n", caps->jfs.depth); + seq_printf(s, "MAX_JFS_SGE: %u\n", caps->jfs_sge); + seq_printf(s, "MAX_JFS_RSGE: %u\n", caps->jfs_rsge); + seq_printf(s, "MAX_MSG_SIZE: %u\n", caps->max_msg_len); + seq_printf(s, "TRANS_MODE: %u\n", caps->trans_mode); + seq_printf(s, "CEQ_CNT: %u\n", caps->comp_vector_cnt); + + return 0; +} + +static int cdma_dbg_dump_queue_info(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + struct cdma_queue *queue; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump queue info failed.\n", queue_id); + return -EINVAL; + } + + seq_printf(s, "QUEUE_DEPTH: %u\n", queue->cfg.queue_depth); + seq_printf(s, "DST CNA: 0x%x\n", queue->cfg.dcna); + seq_printf(s, "RMT EID: 0x%x\n", queue->cfg.rmt_eid.dw0); + seq_printf(s, "PRIORITY: %u\n", queue->cfg.priority); + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} +/* resource debugfs end */ + +/* entry info start */ +static void cdma_dbg_dump_sqe_info(struct cdma_sqe_ctl *sqe_ctl, struct seq_file *s) +{ + seq_printf(s, "sqe bb idx: %u\n", sqe_ctl->sqe_bb_idx); + seq_printf(s, "place odr: %u\n", sqe_ctl->place_odr); + seq_printf(s, "comp order: %u\n", sqe_ctl->comp_order); + seq_printf(s, "fence: %u\n", sqe_ctl->fence); + seq_printf(s, "se: %u\n", sqe_ctl->se); + seq_printf(s, "cqe: %u\n", sqe_ctl->cqe); + seq_printf(s, "owner: %u\n", sqe_ctl->owner); + seq_printf(s, "opcode: %u\n", sqe_ctl->opcode); + seq_printf(s, "tpn: %u\n", sqe_ctl->tpn); + seq_printf(s, "sge num: %u\n", sqe_ctl->sge_num); + seq_printf(s, "rmt eid: %u\n", sqe_ctl->rmt_eid[0]); +} + +static void cdma_dbg_dump_cqe_info(struct cdma_jfc_cqe *cqe, struct seq_file *s) +{ + seq_printf(s, "sr: %u\n", cqe->s_r); + seq_printf(s, "owner: %u\n", cqe->owner); + seq_printf(s, "opcode: %u\n", cqe->opcode); + seq_printf(s, "fd: %u\n", cqe->fd); + seq_printf(s, "substatus: %u\n", cqe->substatus); + seq_printf(s, "status: %u\n", cqe->status); + seq_printf(s, "entry idx: %u\n", cqe->entry_idx); + seq_printf(s, "tpn: %u\n", cqe->tpn); + seq_printf(s, "rmt eid: %u\n", cqe->rmt_eid[0]); + seq_printf(s, "byte cnt: %u\n", cqe->byte_cnt); +} + +static void cdma_dbg_dum_eu(struct cdma_dev *cdev, int i, struct seq_file *s) +{ + struct eu_info *eu = &cdev->base.attr.eus[i]; + + seq_printf(s, "%d: ", i); + seq_printf(s, "idx[0x%x] ", eu->eid_idx); + seq_printf(s, "eid[0x%x] ", eu->eid.dw0); + seq_printf(s, "upi[0x%x]\n", eu->upi); +} + +static int cdma_dbg_dump_sqe(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + u32 entry_pi = cdev->cdbgfs.cfg.entry_pi; + struct cdma_sqe_ctl *sqe_ctl; + struct cdma_queue *queue; + struct cdma_jfs *jfs; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump sqe failed.\n", queue_id); + return -EINVAL; + } + + if (queue->jfs && queue->is_kernel) { + jfs = to_cdma_jfs(queue->jfs); + if (entry_pi >= jfs->base_jfs.cfg.depth) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "pi [%u] overflow for dump sqe.\n", entry_pi); + return -EINVAL; + } + + spin_lock(&jfs->sq.lock); + sqe_ctl = (struct cdma_sqe_ctl *)(jfs->sq.buf.kva + + (entry_pi & (jfs->sq.buf.entry_cnt - 1)) * + jfs->sq.buf.entry_size); + cdma_dbg_dump_sqe_info(sqe_ctl, s); + spin_unlock(&jfs->sq.lock); + } else { + dev_warn(&cdev->adev->dev, "not support queue[%u] for dump sqe.\n", queue_id); + } + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} + +static int cdma_dbg_dump_cqe(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + u32 entry_ci = cdev->cdbgfs.cfg.entry_ci; + struct cdma_queue *queue; + struct cdma_jfc_cqe *cqe; + struct cdma_jfc *jfc; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump cqe failed.\n", queue_id); + return -EINVAL; + } + + if (queue->jfc && queue->is_kernel) { + jfc = to_cdma_jfc(queue->jfc); + if (entry_ci >= jfc->base.jfc_cfg.depth) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "ci [%u] overflow for dump cqe.\n", entry_ci); + return -EINVAL; + } + + spin_lock(&jfc->lock); + cqe = (struct cdma_jfc_cqe *)(jfc->buf.kva + + (entry_ci & (jfc->buf.entry_cnt - 1)) * + jfc->buf.entry_size); + cdma_dbg_dump_cqe_info(cqe, s); + spin_unlock(&jfc->lock); + } else { + dev_warn(&cdev->adev->dev, "not support queue[%u] for dump cqe.\n", queue_id); + } + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} + +/* Dump eu info */ +static int cdma_dbg_dump_eu(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + int ret, i; + + ret = cdma_ctrlq_query_eu(cdev); + if (ret) + return ret; + + for (i = 0; i < cdev->base.attr.eu_num; i++) + cdma_dbg_dum_eu(cdev, i, s); + + return 0; +} +/* entry info end */ + +static bool cdma_dbg_dentry_support(struct device *dev, u32 property) +{ + struct cdma_dev *cdev = dev_get_drvdata(dev); + + return ubase_dbg_dentry_support(cdev->adev, property); +} + +static struct ubase_dbg_dentry_info cdma_dbg_dentry[] = { + { + .name = "context", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, { + .name = "resource_info", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, { + .name = "entry_info", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, + /* keep "cdma" at the bottom and add new directory above */ + { + .name = "cdma", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, +}; + +static struct ubase_dbg_cmd_info cdma_dbg_cmd[] = { + { + .name = "jfs_context", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_jfs_ctx, + }, { + .name = "sq_jfc_context", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_sq_jfc_ctx, + }, { + .name = "jfs_context_hw", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_jfs_ctx_hw, + }, { + .name = "sq_jfc_context_hw", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_sq_jfc_ctx_hw, + }, { + .name = "dev_info", + .dentry_index = CDMA_DBG_DENTRY_RES_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_dev_info, + }, { + .name = "cap_info", + .dentry_index = CDMA_DBG_DENTRY_RES_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_cap_info, + }, { + .name = "queue_info", + .dentry_index = CDMA_DBG_DENTRY_RES_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_queue_info, + }, { + .name = "sqe", + .dentry_index = CDMA_DBG_DENTRY_ENTRY_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_sqe, + }, { + .name = "cqe", + .dentry_index = CDMA_DBG_DENTRY_ENTRY_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_cqe, + }, { + .name = "eu", + .dentry_index = CDMA_DBG_DENTRY_ENTRY_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_eu, + }, +}; + +static ssize_t cdma_dbgfs_cfg_write_val(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos, + enum cdma_dbgfs_cfg_type type) +{ + struct cdma_dbgfs_cfg *cfg = (struct cdma_dbgfs_cfg *)filp->private_data; + char buf[BUF_SIZE] = { 0 }; + ssize_t len, ret; + u32 value; + + len = simple_write_to_buffer(buf, BUF_SIZE - 1, ppos, buffer, count); + if (len < 0) + return len; + + ret = kstrtouint(buf, BUF_10_BASE, &value); + if (ret) + return ret; + + switch (type) { + case CDMA_QUEUE_ID: + cfg->queue_id = value; + break; + case CDMA_ENTRY_PI: + cfg->entry_pi = value; + break; + case CDMA_ENTRY_CI: + cfg->entry_ci = value; + break; + default: + return -EINVAL; + } + + return len; +} + +static ssize_t cdma_dbgfs_cfg_read_val(struct file *filp, + char *buffer, size_t count, loff_t *ppos, + enum cdma_dbgfs_cfg_type type) +{ + struct cdma_dbgfs_cfg *cfg = (struct cdma_dbgfs_cfg *)filp->private_data; + char buf[BUF_SIZE] = { 0 }; + u32 value = 0; + size_t len; + + switch (type) { + case CDMA_QUEUE_ID: + value = cfg->queue_id; + break; + case CDMA_ENTRY_PI: + value = cfg->entry_pi; + break; + case CDMA_ENTRY_CI: + value = cfg->entry_ci; + break; + default: + break; + } + + len = scnprintf(buf, sizeof(buf), "%u\n", value); + + return simple_read_from_buffer(buffer, count, ppos, buf, len); +} + +static ssize_t cdma_dbgfs_cfg_write_queue_id(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return cdma_dbgfs_cfg_write_val(filp, buffer, count, ppos, CDMA_QUEUE_ID); +} + +static ssize_t cdma_dbgfs_cfg_read_queue_id(struct file *filp, + char *buffer, size_t count, + loff_t *ppos) +{ + return cdma_dbgfs_cfg_read_val(filp, buffer, count, ppos, CDMA_QUEUE_ID); +} + +static ssize_t cdma_dbgfs_cfg_write_entry_pi(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return cdma_dbgfs_cfg_write_val(filp, buffer, count, ppos, CDMA_ENTRY_PI); +} + +static ssize_t cdma_dbgfs_cfg_read_entry_pi(struct file *filp, + char *buffer, size_t count, + loff_t *ppos) +{ + return cdma_dbgfs_cfg_read_val(filp, buffer, count, ppos, CDMA_ENTRY_PI); +} + +static ssize_t cdma_dbgfs_cfg_write_entry_ci(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return cdma_dbgfs_cfg_write_val(filp, buffer, count, ppos, CDMA_ENTRY_CI); +} + +static ssize_t cdma_dbgfs_cfg_read_entry_ci(struct file *filp, + char *buffer, size_t count, + loff_t *ppos) +{ + return cdma_dbgfs_cfg_read_val(filp, buffer, count, ppos, CDMA_ENTRY_CI); +} + +static struct cdma_dbgfs_cfg_info cdma_dbg_cfg[] = { + { + .name = "queue_id", + {true, true, true}, + {.owner = THIS_MODULE, + .read = cdma_dbgfs_cfg_read_queue_id, + .write = cdma_dbgfs_cfg_write_queue_id, + .open = simple_open, }, + }, { + .name = "entry_pi", + {false, false, true}, + {.owner = THIS_MODULE, + .read = cdma_dbgfs_cfg_read_entry_pi, + .write = cdma_dbgfs_cfg_write_entry_pi, + .open = simple_open, }, + }, { + .name = "entry_ci", + {false, false, true}, + {.owner = THIS_MODULE, + .read = cdma_dbgfs_cfg_read_entry_ci, + .write = cdma_dbgfs_cfg_write_entry_ci, + .open = simple_open, }, + }, +}; + +static int cdma_dbg_create_cfg_file(struct cdma_dev *cdev, + struct ubase_dbg_dentry_info *dentry_info, + u8 array_size) +{ + struct dentry *debugfs_file; + struct dentry *cur_dir; + size_t i, j; + + for (i = 0; i < array_size - 1; i++) { + cur_dir = dentry_info[i].dentry; + for (j = 0; j < ARRAY_SIZE(cdma_dbg_cfg); j++) { + if (!cdma_dbg_cfg[j].dentry_valid[i]) + continue; + debugfs_file = debugfs_create_file(cdma_dbg_cfg[j].name, + 0400, cur_dir, &cdev->cdbgfs.cfg, + &cdma_dbg_cfg[j].file_ops); + if (!debugfs_file) + return -ENOMEM; + } + } + + return 0; +} + +int cdma_dbg_init(struct auxiliary_device *adev) +{ + struct ubase_dbg_dentry_info dbg_dentry[CDMA_DBG_DENTRY_ROOT + 1] = {0}; + struct dentry *ubase_root_dentry = ubase_diag_debugfs_root(adev); + struct device *dev = &adev->dev; + struct cdma_dev *cdev; + int ret; + + cdev = dev_get_drvdata(dev); + + if (!ubase_root_dentry) { + dev_err(dev, "dbgfs root dentry does not exist.\n"); + return -ENOENT; + } + + memcpy(dbg_dentry, cdma_dbg_dentry, sizeof(cdma_dbg_dentry)); + cdev->cdbgfs.dbgfs.dentry = debugfs_create_dir( + dbg_dentry[ARRAY_SIZE(dbg_dentry) - 1].name, ubase_root_dentry); + if (IS_ERR(cdev->cdbgfs.dbgfs.dentry)) { + dev_err(dev, "create cdma debugfs root dir failed.\n"); + return PTR_ERR(cdev->cdbgfs.dbgfs.dentry); + } + + dbg_dentry[CDMA_DBG_DENTRY_ROOT].dentry = cdev->cdbgfs.dbgfs.dentry; + cdev->cdbgfs.dbgfs.cmd_info = cdma_dbg_cmd; + cdev->cdbgfs.dbgfs.cmd_info_size = ARRAY_SIZE(cdma_dbg_cmd); + + ret = ubase_dbg_create_dentry(dev, &cdev->cdbgfs.dbgfs, dbg_dentry, + ARRAY_SIZE(dbg_dentry) - 1); + if (ret) { + dev_err(dev, "create cdma debugfs dentry failed, ret = %d.\n", ret); + goto create_dentry_err; + } + + ret = cdma_dbg_create_cfg_file(cdev, dbg_dentry, ARRAY_SIZE(dbg_dentry)); + if (ret) { + dev_err(dev, "create cdma debugfs cfg file failed, ret = %d.\n", ret); + goto create_dentry_err; + } + + return 0; + +create_dentry_err: + debugfs_remove_recursive(cdev->cdbgfs.dbgfs.dentry); + cdev->cdbgfs.dbgfs.dentry = NULL; + + return ret; +} + +void cdma_dbg_uninit(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev = dev_get_drvdata(&adev->dev); + + if (!cdev->cdbgfs.dbgfs.dentry) + return; + + debugfs_remove_recursive(cdev->cdbgfs.dbgfs.dentry); + cdev->cdbgfs.dbgfs.dentry = NULL; +} diff --git a/drivers/ub/cdma/cdma_debugfs.h b/drivers/ub/cdma/cdma_debugfs.h new file mode 100644 index 000000000000..1cd0f2ada9dc --- /dev/null +++ b/drivers/ub/cdma/cdma_debugfs.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_DEBUGFS_H__ +#define __CDMA_DEBUGFS_H__ + +#include +#include + +enum cdma_dbg_dentry_type { + CDMA_DBG_DENTRY_CONTEXT, + CDMA_DBG_DENTRY_RES_INFO, + CDMA_DBG_DENTRY_ENTRY_INFO, + /* must be the last entry. */ + CDMA_DBG_DENTRY_ROOT, +}; + +/* ctx debugfs start */ +struct cdma_ctx_info { + u32 start_idx; + u32 ctx_size; + u8 op; + const char *ctx_name; +}; + +enum cdma_dbg_ctx_type { + CDMA_DBG_JFS_CTX = 0, + CDMA_DBG_SQ_JFC_CTX = 1, +}; +/* ctx debugfs end */ + +struct cdma_dbgfs_cfg_info { + const char *name; + bool dentry_valid[CDMA_DBG_DENTRY_ROOT]; + const struct file_operations file_ops; +}; + +struct cdma_dbgfs_cfg { + u32 queue_id; + u32 entry_pi; + u32 entry_ci; +}; + +enum cdma_dbgfs_cfg_type { + CDMA_QUEUE_ID = 0, + CDMA_ENTRY_PI, + CDMA_ENTRY_CI +}; + +struct cdma_dbgfs { + struct ubase_dbgfs dbgfs; + struct cdma_dbgfs_cfg cfg; +}; + +int cdma_dbg_init(struct auxiliary_device *adev); +void cdma_dbg_uninit(struct auxiliary_device *adev); + +#endif /* CDMA_DEBUGFS_H */ diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index fe46955c925b..3d0391b03d97 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -314,6 +314,11 @@ struct cdma_jfs_ctx { u32 taack_nack_bm[32]; }; +static inline struct cdma_jfs *to_cdma_jfs(struct cdma_base_jfs *jfs) +{ + return container_of(jfs, struct cdma_jfs, base_jfs); +} + struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, struct cdma_jfs_cfg *cfg, struct cdma_udata *udata); diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 82dc5ab40cf8..cfdb1869e176 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -11,6 +11,7 @@ #include "cdma_chardev.h" #include #include "cdma_eq.h" +#include "cdma_debugfs.h" #include "cdma_cmd.h" /* Enabling jfc_arm_mode will cause jfc to report cqe; otherwise, it will not. */ @@ -64,6 +65,11 @@ static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev * if (ret) dev_warn(&auxdev->dev, "query eu failed, ret = %d.\n", ret); + ret = cdma_dbg_init(auxdev); + if (ret) + dev_warn(&auxdev->dev, "init cdma debugfs failed, ret = %d.\n", + ret); + return 0; } @@ -108,6 +114,7 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) return; } + cdma_dbg_uninit(auxdev); cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); cdma_destroy_dev(cdev); diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index 5b434ae66bb9..08b24cb0b3fc 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -4,9 +4,10 @@ #ifndef __CDMA_QUEUE_H__ #define __CDMA_QUEUE_H__ +#include + struct cdma_dev; struct cdma_context; -struct queue_cfg; enum cdma_queue_res_type { QUEUE_RES_TP, -- Gitee From 3c338e47bd32fabaa926c4f2f44156a2eaee3115 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 29 Sep 2025 18:44:01 +0800 Subject: [PATCH 090/243] ub: cdma: support RX stop flow function commit 9184f7dd7e6ad431f6d076c1b95e0ab7fad255cb openEuler This patch implements the functionality of stopping RX flow during the UE deregistration process in the CDMA driver and intercepts user-space interfaces during the reset process. Signed-off-by: Zhipeng Lu Signed-off-by: Xinchi Ma Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 14 ++++ drivers/ub/cdma/cdma_api.c | 82 ++++++++++++++++++ drivers/ub/cdma/cdma_chardev.c | 55 +++++++++++- drivers/ub/cdma/cdma_cmd.c | 18 ++++ drivers/ub/cdma/cdma_cmd.h | 3 + drivers/ub/cdma/cdma_context.h | 1 + drivers/ub/cdma/cdma_dev.c | 30 +++++-- drivers/ub/cdma/cdma_dev.h | 2 +- drivers/ub/cdma/cdma_event.c | 47 +++++++---- drivers/ub/cdma/cdma_jfc.c | 8 +- drivers/ub/cdma/cdma_jfs.c | 8 +- drivers/ub/cdma/cdma_main.c | 100 +++++++++++++++++++++- drivers/ub/cdma/cdma_mmap.c | 149 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_mmap.h | 14 ++++ drivers/ub/cdma/cdma_types.h | 16 ++++ drivers/ub/cdma/cdma_uobj.c | 6 +- drivers/ub/cdma/cdma_uobj.h | 2 +- include/ub/cdma/cdma_api.h | 12 +++ 19 files changed, 529 insertions(+), 40 deletions(-) create mode 100644 drivers/ub/cdma/cdma_mmap.c create mode 100644 drivers/ub/cdma/cdma_mmap.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 2ce4eefa2d84..88dc9946a092 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -3,6 +3,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o \ - cdma_handle.o cdma_debugfs.o + cdma_handle.o cdma_debugfs.o cdma_mmap.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index e782b0229943..b7d00bcf39ac 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -14,6 +14,10 @@ extern u32 jfc_arm_mode; extern bool cqe_mode; +extern struct list_head g_client_list; +extern struct rw_semaphore g_clients_rwsem; +extern struct rw_semaphore g_device_rwsem; +extern struct mutex g_cdma_reset_mutex; #define CDMA_HW_PAGE_SHIFT 12 #define CDMA_HW_PAGE_SIZE (1 << CDMA_HW_PAGE_SHIFT) @@ -24,6 +28,8 @@ extern bool cqe_mode; #define CDMA_UPI_MASK 0x7FFF +#define DMA_MAX_DEV_NAME 64 + enum cdma_cqe_size { CDMA_64_CQE_SIZE, CDMA_128_CQE_SIZE, @@ -34,6 +40,12 @@ enum cdma_status { CDMA_SUSPEND, }; +enum cdma_client_ops { + CDMA_CLIENT_STOP, + CDMA_CLIENT_REMOVE, + CDMA_CLIENT_ADD, +}; + enum { CDMA_CAP_FEATURE_AR = BIT(0), CDMA_CAP_FEATURE_JFC_INLINE = BIT(4), @@ -195,6 +207,8 @@ struct cdma_dev { struct mutex file_mutex; struct list_head file_list; struct page *arm_db_page; + atomic_t cmdcnt; + struct completion cmddone; }; struct cdma_jfs_event { diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 8a30d20a1a09..cc3aa6ce4921 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -4,6 +4,8 @@ #define pr_fmt(fmt) "CDMA: " fmt #define dev_fmt pr_fmt +#include +#include #include "cdma_segment.h" #include "cdma_dev.h" #include "cdma_cmd.h" @@ -14,6 +16,10 @@ #include "cdma_handle.h" #include +LIST_HEAD(g_client_list); +DECLARE_RWSEM(g_clients_rwsem); +DECLARE_RWSEM(g_device_rwsem); + struct dma_device *dma_get_device_list(u32 *num_devices) { struct cdma_device_attr *attr; @@ -632,3 +638,79 @@ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, return cdma_poll_jfc(cdma_queue->jfc, cr_cnt, cr); } EXPORT_SYMBOL_GPL(dma_poll_queue); + +int dma_register_client(struct dma_client *client) +{ + struct cdma_dev *cdev = NULL; + struct xarray *cdma_devs_tbl; + unsigned long index = 0; + u32 devs_num; + + if (client == NULL || client->client_name == NULL || + client->add == NULL || client->remove == NULL || + client->stop == NULL) { + pr_err("invalid parameter.\n"); + return -EINVAL; + } + + if (strnlen(client->client_name, DMA_MAX_DEV_NAME) >= DMA_MAX_DEV_NAME) { + pr_err("invalid parameter, client name.\n"); + return -EINVAL; + } + + down_write(&g_device_rwsem); + + cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); + + xa_for_each(cdma_devs_tbl, index, cdev) { + if (client->add && client->add(cdev->eid)) + pr_info("dma client: %s add failed.\n", + client->client_name); + } + down_write(&g_clients_rwsem); + list_add_tail(&client->list_node, &g_client_list); + up_write(&g_clients_rwsem); + up_write(&g_device_rwsem); + + pr_info("dma client: %s register success.\n", client->client_name); + return 0; +} +EXPORT_SYMBOL_GPL(dma_register_client); + +void dma_unregister_client(struct dma_client *client) +{ + struct cdma_dev *cdev = NULL; + struct xarray *cdma_devs_tbl; + unsigned long index = 0; + u32 devs_num; + + if (client == NULL || client->client_name == NULL || + client->add == NULL || client->remove == NULL || + client->stop == NULL) { + pr_err("Invalid parameter.\n"); + return; + } + + if (strnlen(client->client_name, DMA_MAX_DEV_NAME) >= DMA_MAX_DEV_NAME) { + pr_err("invalid parameter, client name.\n"); + return; + } + + down_write(&g_device_rwsem); + cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); + + xa_for_each(cdma_devs_tbl, index, cdev) { + if (client->stop && client->remove) { + client->stop(cdev->eid); + client->remove(cdev->eid); + } + } + + down_write(&g_clients_rwsem); + list_del(&client->list_node); + up_write(&g_clients_rwsem); + up_write(&g_device_rwsem); + + pr_info("dma client: %s unregister success.\n", client->client_name); +} +EXPORT_SYMBOL_GPL(dma_unregister_client); diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index a1a289eb0e91..3614609d683e 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -6,6 +6,7 @@ #include #include +#include "cdma_cmd.h" #include "cdma_ioctl.h" #include "cdma_context.h" #include "cdma_chardev.h" @@ -13,6 +14,7 @@ #include "cdma_types.h" #include "cdma_uobj.h" #include "cdma.h" +#include "cdma_mmap.h" #define CDMA_DEVICE_NAME "cdma/dev" @@ -65,18 +67,27 @@ static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct cdma_ioctl_hdr hdr = { 0 }; int ret; + if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + pr_info("ioctl cdev is invalid.\n"); + return -ENODEV; + } + cdma_cmd_inc(cfile->cdev); + if (cmd == CDMA_SYNC) { ret = copy_from_user(&hdr, (void *)arg, sizeof(hdr)); if (ret || hdr.args_len > CDMA_MAX_CMD_SIZE) { pr_err("copy user ret = %d, input parameter len = %u.\n", ret, hdr.args_len); + cdma_cmd_dec(cfile->cdev); return -EINVAL; } ret = cdma_cmd_parse(cfile, &hdr); + cdma_cmd_dec(cfile->cdev); return ret; } pr_err("invalid ioctl command, command = %u.\n", cmd); + cdma_cmd_dec(cfile->cdev); return -ENOIOCTLCMD; } @@ -115,6 +126,11 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * u32 jfs_id; u32 cmd; + if (cdev->status == CDMA_SUSPEND) { + dev_warn(cdev->dev, "cdev is resetting.\n"); + return -EBUSY; + } + db_addr = cdev->db_base; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -158,21 +174,37 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * static int cdma_mmap(struct file *file, struct vm_area_struct *vma) { struct cdma_file *cfile = (struct cdma_file *)file->private_data; + struct cdma_umap_priv *priv; int ret; + if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + pr_info("mmap cdev is invalid.\n"); + return -ENODEV; + } + if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { pr_err("mmap failed, expect vm area size to be an integer multiple of page size.\n"); return -EINVAL; } + priv = kzalloc(sizeof(struct cdma_umap_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + vma->vm_ops = cdma_get_umap_ops(); + vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK | VM_IO); + mutex_lock(&cfile->ctx_mutex); ret = cdma_remap_pfn_range(cfile, vma); if (ret) { mutex_unlock(&cfile->ctx_mutex); + kfree(priv); return ret; } mutex_unlock(&cfile->ctx_mutex); + cdma_umap_priv_init(priv, vma); + return 0; } @@ -188,7 +220,7 @@ static void cdma_mmu_release(struct mmu_notifier *mn, struct mm_struct *mm) mn_notifier->mm = NULL; mutex_lock(&cfile->ctx_mutex); - cdma_cleanup_context_uobj(cfile); + cdma_cleanup_context_uobj(cfile, CDMA_REMOVE_CLOSE); if (cfile->uctx) cdma_cleanup_context_res(cfile->uctx); cfile->uctx = NULL; @@ -235,6 +267,11 @@ static int cdma_open(struct inode *inode, struct file *file) chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); cdev = container_of(chardev, struct cdma_dev, chardev); + if (cdev->status == CDMA_SUSPEND) { + dev_warn(cdev->dev, "cdev is resetting.\n"); + return -EBUSY; + } + cfile = kzalloc(sizeof(struct cdma_file), GFP_KERNEL); if (!cfile) return -ENOMEM; @@ -254,6 +291,8 @@ static int cdma_open(struct inode *inode, struct file *file) file->private_data = cfile; mutex_init(&cfile->ctx_mutex); list_add_tail(&cfile->list, &cdev->file_list); + mutex_init(&cfile->umap_mutex); + INIT_LIST_HEAD(&cfile->umaps_list); nonseekable_open(inode, file); mutex_unlock(&cdev->file_mutex); @@ -265,19 +304,28 @@ static int cdma_close(struct inode *inode, struct file *file) struct cdma_file *cfile = (struct cdma_file *)file->private_data; struct cdma_dev *cdev; + mutex_lock(&g_cdma_reset_mutex); + cdev = cfile->cdev; + if (!cdev) { + mutex_unlock(&g_cdma_reset_mutex); + kref_put(&cfile->ref, cdma_release_file); + inode->i_cdev = NULL; + return 0; + } mutex_lock(&cdev->file_mutex); list_del(&cfile->list); mutex_unlock(&cdev->file_mutex); mutex_lock(&cfile->ctx_mutex); - cdma_cleanup_context_uobj(cfile); + cdma_cleanup_context_uobj(cfile, CDMA_REMOVE_CLOSE); if (cfile->uctx) cdma_cleanup_context_res(cfile->uctx); cfile->uctx = NULL; mutex_unlock(&cfile->ctx_mutex); + mutex_unlock(&g_cdma_reset_mutex); kref_put(&cfile->ref, cdma_release_file); pr_debug("cdma close success.\n"); @@ -361,7 +409,10 @@ void cdma_release_file(struct kref *ref) { struct cdma_file *cfile = container_of(ref, struct cdma_file, ref); + if (cfile->fault_page) + __free_pages(cfile->fault_page, 0); cdma_unregister_mmu(cfile); + mutex_destroy(&cfile->umap_mutex); mutex_destroy(&cfile->ctx_mutex); idr_destroy(&cfile->idr); kfree(cfile); diff --git a/drivers/ub/cdma/cdma_cmd.c b/drivers/ub/cdma/cdma_cmd.c index 74e6b32a58c7..c8bf01d930ad 100644 --- a/drivers/ub/cdma/cdma_cmd.c +++ b/drivers/ub/cdma/cdma_cmd.c @@ -214,3 +214,21 @@ int cdma_ctrlq_query_eu(struct cdma_dev *cdev) return 0; } + +void cdma_cmd_inc(struct cdma_dev *cdev) +{ + atomic_inc(&cdev->cmdcnt); +} + +void cdma_cmd_dec(struct cdma_dev *cdev) +{ + if (atomic_dec_and_test(&cdev->cmdcnt)) + complete(&cdev->cmddone); +} + +void cdma_cmd_flush(struct cdma_dev *cdev) +{ + cdma_cmd_dec(cdev); + pr_info("cmd flush cmdcnt is %d\n", atomic_read(&cdev->cmdcnt)); + wait_for_completion(&cdev->cmddone); +} diff --git a/drivers/ub/cdma/cdma_cmd.h b/drivers/ub/cdma/cdma_cmd.h index 550f60640b36..f85331c8c51b 100644 --- a/drivers/ub/cdma/cdma_cmd.h +++ b/drivers/ub/cdma/cdma_cmd.h @@ -76,4 +76,7 @@ struct eu_query_out { int cdma_init_dev_caps(struct cdma_dev *cdev); int cdma_ctrlq_query_eu(struct cdma_dev *cdev); +void cdma_cmd_inc(struct cdma_dev *cdev); +void cdma_cmd_dec(struct cdma_dev *cdev); +void cdma_cmd_flush(struct cdma_dev *cdev); #endif diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 47736a281257..0eb40763c29d 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -24,6 +24,7 @@ struct cdma_context { atomic_t ref_cnt; struct list_head queue_list; struct list_head seg_list; + bool invalid; }; struct cdma_ctx_res { diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index f08e60716edc..2b69a44b346e 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -55,15 +55,18 @@ static int cdma_add_device_to_list(struct cdma_dev *cdev) return -EINVAL; } + down_write(&g_device_rwsem); ret = xa_err(xa_store(&cdma_devs_tbl, adev->id, cdev, GFP_KERNEL)); if (ret) { dev_err(cdev->dev, "store cdma device to table failed, adev id = %u.\n", adev->id); + up_write(&g_device_rwsem); return ret; } atomic_inc(&cdma_devs_num); + up_write(&g_device_rwsem); return 0; } @@ -77,8 +80,10 @@ static void cdma_del_device_from_list(struct cdma_dev *cdev) return; } + down_write(&g_device_rwsem); atomic_dec(&cdma_devs_num); xa_erase(&cdma_devs_tbl, adev->id); + up_write(&g_device_rwsem); } static void cdma_tbl_init(struct cdma_table *table, u32 max, u32 min) @@ -393,6 +398,8 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) idr_init(&cdev->ctx_idr); spin_lock_init(&cdev->ctx_lock); + atomic_set(&cdev->cmdcnt, 1); + init_completion(&cdev->cmddone); dev_dbg(&adev->dev, "cdma.%u init succeeded.\n", adev->id); @@ -411,7 +418,7 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) return NULL; } -void cdma_destroy_dev(struct cdma_dev *cdev) +void cdma_destroy_dev(struct cdma_dev *cdev, bool is_remove) { struct cdma_context *tmp; int id; @@ -421,21 +428,26 @@ void cdma_destroy_dev(struct cdma_dev *cdev) ubase_virt_unregister(cdev->adev); - cdma_release_table_res(cdev); + if (is_remove) { + cdma_release_table_res(cdev); - idr_for_each_entry(&cdev->ctx_idr, tmp, id) - cdma_free_context(cdev, tmp); - idr_destroy(&cdev->ctx_idr); + idr_for_each_entry(&cdev->ctx_idr, tmp, id) + cdma_free_context(cdev, tmp); + idr_destroy(&cdev->ctx_idr); + } cdma_destroy_arm_db_page(cdev); ubase_ctrlq_unregister_crq_event(cdev->adev, UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, CDMA_CTRLQ_EU_UPDATE); - cdma_free_dev_tid(cdev); - cdma_del_device_from_list(cdev); - cdma_uninit_dev_param(cdev); - kfree(cdev); + if (is_remove) { + cdma_free_dev_tid(cdev); + + cdma_del_device_from_list(cdev); + cdma_uninit_dev_param(cdev); + kfree(cdev); + } } bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index 85d41cbe0773..d433218934f1 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -24,7 +24,7 @@ enum cdma_ctrlq_eu_op { }; struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); -void cdma_destroy_dev(struct cdma_dev *cdev); +void cdma_destroy_dev(struct cdma_dev *cdev, bool is_remove); struct cdma_dev *get_cdma_dev_by_eid(u32 eid); struct xarray *get_cdma_dev_tbl(u32 *devices_num); bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index f2c51d4833ee..057bf2daefc3 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -520,28 +520,40 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, return -EINVAL; } - INIT_LIST_HEAD(&event_list); - ret = cdma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, - &event_cnt, &event_list); - if (ret < 0) { - pr_err("wait event failed, ret = %d.\n", ret); - return ret; - } - event = list_first_entry(&event_list, struct cdma_jfe_event, node); - if (event == NULL) - return -EIO; - - cdma_set_async_event(&async_event, event); - list_del(&event->node); - kfree(event); - - if (event_cnt > 0) { + if (!jfae->cfile->cdev || jfae->cfile->cdev->status == CDMA_SUSPEND) { + pr_info("wait dev invalid event success.\n"); + async_event.event_data = 0; + async_event.event_type = CDMA_EVENT_DEV_INVALID; ret = (int)copy_to_user((void *)arg, &async_event, sizeof(async_event)); if (ret) { pr_err("dev copy to user failed, ret = %d\n", ret); return -EFAULT; } + } else { + INIT_LIST_HEAD(&event_list); + ret = cdma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, + &event_cnt, &event_list); + if (ret < 0) { + pr_err("wait event failed, ret = %d.\n", ret); + return ret; + } + event = list_first_entry(&event_list, struct cdma_jfe_event, node); + if (event == NULL) + return -EIO; + + cdma_set_async_event(&async_event, event); + list_del(&event->node); + kfree(event); + + if (event_cnt > 0) { + ret = (int)copy_to_user((void *)arg, &async_event, + sizeof(async_event)); + if (ret) { + pr_err("dev copy to user failed, ret = %d\n", ret); + return -EFAULT; + } + } } return 0; @@ -554,6 +566,9 @@ static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait if (!jfae || !jfae->cfile || !jfae->cfile->cdev) return POLLERR; + if (jfae->cfile->cdev->status == CDMA_SUSPEND) + return POLLIN | POLLRDNORM; + return cdma_jfe_poll(&jfae->jfe, filp, wait); } diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index cd92f90461ff..0b3611c3d27d 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -555,9 +555,11 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, return -EINVAL; } - ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); - if (ret) - dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); + if (!(jfc->base.ctx && jfc->base.ctx->invalid)) { + ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); + if (ret) + dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); + } if (refcount_dec_and_test(&jfc->event_refcount)) complete(&jfc->event_comp); diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index cbb47a7f56db..8a62e2a2fd6b 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -538,9 +538,11 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) return -EINVAL; } - ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); - if (ret) - dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); + if (!(jfs->base_jfs.ctx && jfs->base_jfs.ctx->invalid)) { + ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); + if (ret) + dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); + } if (refcount_dec_and_test(&jfs->ae_ref_cnt)) complete(&jfs->ae_comp); diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index cfdb1869e176..817bcd6232e3 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -13,6 +13,14 @@ #include "cdma_eq.h" #include "cdma_debugfs.h" #include "cdma_cmd.h" +#include "cdma_types.h" +#include "cdma_mmap.h" +#include "cdma_context.h" +#include "cdma_uobj.h" +#include "cdma_event.h" + +static bool is_rmmod; +DEFINE_MUTEX(g_cdma_reset_mutex); /* Enabling jfc_arm_mode will cause jfc to report cqe; otherwise, it will not. */ uint jfc_arm_mode; @@ -52,6 +60,47 @@ static inline void cdma_unregister_event(struct auxiliary_device *adev) cdma_unreg_ae_event(adev); } +static void cdma_reset_unmap_vma_pages(struct cdma_dev *cdev, bool is_reset) +{ + struct cdma_file *cfile; + + mutex_lock(&cdev->file_mutex); + list_for_each_entry(cfile, &cdev->file_list, list) { + mutex_lock(&cfile->ctx_mutex); + cdma_unmap_vma_pages(cfile); + if (is_reset && cfile->uctx != NULL) + cfile->uctx->invalid = true; + mutex_unlock(&cfile->ctx_mutex); + } + mutex_unlock(&cdev->file_mutex); +} + +static void cdma_client_handler(struct cdma_dev *cdev, + enum cdma_client_ops client_ops) +{ + struct dma_client *client; + + down_write(&g_clients_rwsem); + list_for_each_entry(client, &g_client_list, list_node) { + switch (client_ops) { + case CDMA_CLIENT_STOP: + if (client->stop) + client->stop(cdev->eid); + break; + case CDMA_CLIENT_REMOVE: + if (client->remove) + client->remove(cdev->eid); + break; + case CDMA_CLIENT_ADD: + if (client->add && client->add(cdev->eid)) + dev_warn(&cdev->adev->dev, "add eid:0x%x, cdev for client:%s failed.\n", + cdev->eid, client->client_name); + break; + } + } + up_write(&g_clients_rwsem); +} + static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) { int ret; @@ -73,9 +122,33 @@ static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev * return 0; } +static void cdma_free_cfile_uobj(struct cdma_dev *cdev) +{ + struct cdma_file *cfile, *next_cfile; + struct cdma_jfae *jfae; + + mutex_lock(&cdev->file_mutex); + list_for_each_entry_safe(cfile, next_cfile, &cdev->file_list, list) { + list_del(&cfile->list); + mutex_lock(&cfile->ctx_mutex); + cdma_cleanup_context_uobj(cfile, CDMA_REMOVE_DRIVER_REMOVE); + cfile->cdev = NULL; + if (cfile->uctx) { + jfae = (struct cdma_jfae *)cfile->uctx->jfae; + if (jfae) + wake_up_interruptible(&jfae->jfe.poll_wait); + cdma_cleanup_context_res(cfile->uctx); + } + cfile->uctx = NULL; + mutex_unlock(&cfile->ctx_mutex); + } + mutex_unlock(&cdev->file_mutex); +} + static int cdma_init_dev(struct auxiliary_device *auxdev) { struct cdma_dev *cdev; + bool is_remove = true; int ret; dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", @@ -87,37 +160,56 @@ static int cdma_init_dev(struct auxiliary_device *auxdev) ret = cdma_create_chardev(cdev); if (ret) { - cdma_destroy_dev(cdev); + cdma_destroy_dev(cdev, is_remove); return ret; } ret = cdma_init_dev_info(auxdev, cdev); if (ret) { cdma_destroy_chardev(cdev); - cdma_destroy_dev(cdev); + cdma_destroy_dev(cdev, is_remove); return ret; } + cdma_client_handler(cdev, CDMA_CLIENT_ADD); return ret; } static void cdma_uninit_dev(struct auxiliary_device *auxdev) { struct cdma_dev *cdev; + int ret; dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", __func__, auxdev->name, auxdev->id); + mutex_lock(&g_cdma_reset_mutex); cdev = dev_get_drvdata(&auxdev->dev); if (!cdev) { dev_err(&auxdev->dev, "get drvdata from ubase failed.\n"); + ubase_reset_unregister(auxdev); + mutex_unlock(&g_cdma_reset_mutex); return; } + cdev->status = CDMA_SUSPEND; + cdma_cmd_flush(cdev); + cdma_client_handler(cdev, CDMA_CLIENT_STOP); + cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); + cdma_reset_unmap_vma_pages(cdev, false); + + if (!is_rmmod) { + ret = ubase_deactivate_dev(auxdev); + dev_info(&auxdev->dev, "ubase deactivate dev ret = %d.\n", ret); + } + + ubase_reset_unregister(auxdev); cdma_dbg_uninit(auxdev); cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); - cdma_destroy_dev(cdev); + cdma_free_cfile_uobj(cdev); + cdma_destroy_dev(cdev, true); + mutex_unlock(&g_cdma_reset_mutex); } static int cdma_probe(struct auxiliary_device *auxdev, @@ -135,6 +227,7 @@ static int cdma_probe(struct auxiliary_device *auxdev, static void cdma_remove(struct auxiliary_device *auxdev) { cdma_uninit_dev(auxdev); + pr_info("cdma device remove success.\n"); } static const struct auxiliary_device_id cdma_id_table[] = { @@ -178,6 +271,7 @@ static int __init cdma_init(void) static void __exit cdma_exit(void) { + is_rmmod = true; auxiliary_driver_unregister(&cdma_driver); class_destroy(cdma_cdev_class); } diff --git a/drivers/ub/cdma/cdma_mmap.c b/drivers/ub/cdma/cdma_mmap.c new file mode 100644 index 000000000000..eaef6a9a4152 --- /dev/null +++ b/drivers/ub/cdma/cdma_mmap.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt + +#include +#include +#include "cdma_mmap.h" + +void cdma_umap_priv_init(struct cdma_umap_priv *priv, + struct vm_area_struct *vma) +{ + struct cdma_file *cfile = (struct cdma_file *)vma->vm_file->private_data; + + priv->vma = vma; + vma->vm_private_data = priv; + + mutex_lock(&cfile->umap_mutex); + list_add(&priv->node, &cfile->umaps_list); + mutex_unlock(&cfile->umap_mutex); +} + +/* thanks to drivers/infiniband/core/ib_core_uverbs.c */ +void cdma_unmap_vma_pages(struct cdma_file *cfile) +{ + struct cdma_umap_priv *priv, *next_priv; + struct mm_struct *mm = NULL; + struct vm_area_struct *vma; + int ret; + + while (1) { + mm = NULL; + mutex_lock(&cfile->umap_mutex); + list_for_each_entry_safe(priv, next_priv, &cfile->umaps_list, node) { + mm = priv->vma->vm_mm; + ret = mmget_not_zero(mm); + if (!ret) { + list_del_init(&priv->node); + mm = NULL; + continue; + } + break; + } + mutex_unlock(&cfile->umap_mutex); + if (!mm) + return; + + mutex_lock(&cfile->umap_mutex); + list_for_each_entry_safe(priv, next_priv, &cfile->umaps_list, node) { + vma = priv->vma; + if (vma->vm_mm != mm) + continue; + list_del_init(&priv->node); + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + mutex_unlock(&cfile->umap_mutex); + + mmput(mm); + } +} + +static void cdma_umap_open(struct vm_area_struct *vma) +{ + struct cdma_umap_priv *priv; + + priv = kzalloc(sizeof(struct cdma_umap_priv), GFP_KERNEL); + if (!priv) + goto out_zap; + + cdma_umap_priv_init(priv, vma); + + return; + +out_zap: + vma->vm_private_data = NULL; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void cdma_umap_close(struct vm_area_struct *vma) +{ + struct cdma_umap_priv *priv = (struct cdma_umap_priv *)vma->vm_private_data; + struct cdma_file *cfile = (struct cdma_file *)vma->vm_file->private_data; + + if (!priv) + return; + + mutex_lock(&cfile->umap_mutex); + list_del(&priv->node); + mutex_unlock(&cfile->umap_mutex); + kfree(priv); + vma->vm_private_data = NULL; + + pr_info("cdma umap close success.\n"); +} + +static vm_fault_t cdma_umap_fault(struct vm_fault *vmf) +{ + struct cdma_umap_priv *priv = (struct cdma_umap_priv *)vmf->vma->vm_private_data; + struct cdma_file *cfile = (struct cdma_file *)vmf->vma->vm_file->private_data; + vm_fault_t ret = 0; + + if (!priv) + return VM_FAULT_SIGBUS; + + if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { + vmf->page = ZERO_PAGE(0); + get_page(vmf->page); + return 0; + } + + mutex_lock(&cfile->umap_mutex); + if (!cfile->fault_page) + cfile->fault_page = alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); + + if (cfile->fault_page) { + vmf->page = cfile->fault_page; + get_page(vmf->page); + } else { + ret = VM_FAULT_SIGBUS; + } + mutex_unlock(&cfile->umap_mutex); + + return ret; +} + +static int cdma_umap_remap(struct vm_area_struct *vma) +{ + pr_err("cdma umap remap is not permitted.\n"); + return -EINVAL; +} + +static int cdma_umap_can_split(struct vm_area_struct *vma, unsigned long addr) +{ + pr_err("cdma umap split is not permitted.\n"); + return -EINVAL; +} + +static const struct vm_operations_struct g_cdma_umap_ops = { + .open = cdma_umap_open, + .close = cdma_umap_close, + .fault = cdma_umap_fault, + .mremap = cdma_umap_remap, + .may_split = cdma_umap_can_split, +}; + +const struct vm_operations_struct *cdma_get_umap_ops(void) +{ + return (const struct vm_operations_struct *)&g_cdma_umap_ops; +} diff --git a/drivers/ub/cdma/cdma_mmap.h b/drivers/ub/cdma/cdma_mmap.h new file mode 100644 index 000000000000..0dd6c609a85e --- /dev/null +++ b/drivers/ub/cdma/cdma_mmap.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_MMAP_H__ +#define __CDMA_MMAP_H__ + +#include +#include "cdma_types.h" + +void cdma_unmap_vma_pages(struct cdma_file *cfile); +const struct vm_operations_struct *cdma_get_umap_ops(void); +void cdma_umap_priv_init(struct cdma_umap_priv *priv, struct vm_area_struct *vma); + +#endif /* CDMA_MMAP_H */ diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 0b861c891558..947c360ba2ef 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -12,6 +12,14 @@ enum cdma_event_type { CDMA_EVENT_JFC_ERR, CDMA_EVENT_JFS_ERR, + CDMA_EVENT_DEV_INVALID, +}; + +enum cdma_remove_reason { + /* Context deletion. This call should delete the actual object itself */ + CDMA_REMOVE_CLOSE, + /* Driver is being hot-unplugged. This call should delete the actual object itself */ + CDMA_REMOVE_DRIVER_REMOVE, }; struct cdma_ucontext { @@ -142,8 +150,16 @@ struct cdma_file { struct cdma_context *uctx; struct idr idr; spinlock_t idr_lock; + struct mutex umap_mutex; + struct list_head umaps_list; + struct page *fault_page; struct cdma_mn mn_notifier; struct kref ref; }; +struct cdma_umap_priv { + struct vm_area_struct *vma; + struct list_head node; +}; + #endif diff --git a/drivers/ub/cdma/cdma_uobj.c b/drivers/ub/cdma/cdma_uobj.c index 3e6e1f9ad1b6..92fe4da441ea 100644 --- a/drivers/ub/cdma/cdma_uobj.c +++ b/drivers/ub/cdma/cdma_uobj.c @@ -2,6 +2,7 @@ /* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ #include +#include "cdma_mmap.h" #include "cdma_uobj.h" #include "cdma_chardev.h" @@ -104,11 +105,14 @@ struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, return uobj; } -void cdma_cleanup_context_uobj(struct cdma_file *cfile) +void cdma_cleanup_context_uobj(struct cdma_file *cfile, enum cdma_remove_reason why) { struct cdma_uobj *uobj; int id; + if (why == CDMA_REMOVE_DRIVER_REMOVE) + cdma_unmap_vma_pages(cfile); + spin_lock(&cfile->idr_lock); idr_for_each_entry(&cfile->idr, uobj, id) cdma_uobj_remove(uobj); diff --git a/drivers/ub/cdma/cdma_uobj.h b/drivers/ub/cdma/cdma_uobj.h index 505a66911960..f343559a33ce 100644 --- a/drivers/ub/cdma/cdma_uobj.h +++ b/drivers/ub/cdma/cdma_uobj.h @@ -28,7 +28,7 @@ struct cdma_uobj *cdma_uobj_create(struct cdma_file *cfile, void cdma_uobj_delete(struct cdma_uobj *uobj); struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, enum UOBJ_TYPE type); -void cdma_cleanup_context_uobj(struct cdma_file *cfile); +void cdma_cleanup_context_uobj(struct cdma_file *cfile, enum cdma_remove_reason why); void cdma_close_uobj_fd(struct cdma_file *cfile); #endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 6809ba074c05..b90a64f128b9 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -87,6 +87,14 @@ struct dma_notify_data { u64 notify_data; }; +struct dma_client { + struct list_head list_node; + char *client_name; + int (*add)(u32 eid); + void (*remove)(u32 eid); + void (*stop)(u32 eid); +}; + struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); @@ -132,4 +140,8 @@ enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); +int dma_register_client(struct dma_client *client); + +void dma_unregister_client(struct dma_client *client); + #endif -- Gitee From c784897855c32cc8b540be9eaf341d3bf46128f3 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 29 Sep 2025 18:47:20 +0800 Subject: [PATCH 091/243] ub: cdma: support reset function commit 710a287ef643833af2d9ac6a4892bb8829a77983 openEuler This patch implements the RX stop flow function during the driver unload or UE reset process in the CDMA driver, the RX resume flow function during the UE reset process, and the process of notifying the control plane to delete the corresponding UE connection information during the UE reset process. Signed-off-by: Zhipeng Lu Signed-off-by: Xinchi Ma Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_main.c | 104 ++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 817bcd6232e3..8ec5849ade39 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -101,6 +101,48 @@ static void cdma_client_handler(struct cdma_dev *cdev, up_write(&g_clients_rwsem); } +static void cdma_reset_down(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev; + + mutex_lock(&g_cdma_reset_mutex); + cdev = get_cdma_dev(adev); + if (!cdev || cdev->status == CDMA_SUSPEND) { + dev_warn(&adev->dev, "cdma device is not ready.\n"); + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + cdev->status = CDMA_SUSPEND; + cdma_cmd_flush(cdev); + cdma_reset_unmap_vma_pages(cdev, true); + cdma_client_handler(cdev, CDMA_CLIENT_STOP); + cdma_unregister_event(adev); + cdma_dbg_uninit(adev); + mutex_unlock(&g_cdma_reset_mutex); +} + +static void cdma_reset_uninit(struct auxiliary_device *adev) +{ + enum ubase_reset_stage stage; + struct cdma_dev *cdev; + + mutex_lock(&g_cdma_reset_mutex); + cdev = get_cdma_dev(adev); + if (!cdev) { + dev_info(&adev->dev, "cdma device is not exist.\n"); + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + stage = ubase_get_reset_stage(adev); + if (stage == UBASE_RESET_STAGE_UNINIT && cdev->status == CDMA_SUSPEND) { + cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); + cdma_destroy_dev(cdev, is_rmmod); + } + mutex_unlock(&g_cdma_reset_mutex); +} + static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) { int ret; @@ -212,6 +254,66 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) mutex_unlock(&g_cdma_reset_mutex); } +static void cdma_reset_init(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev; + + mutex_lock(&g_cdma_reset_mutex); + cdev = get_cdma_dev(adev); + if (!cdev) { + dev_err(&adev->dev, "cdma device is not exist.\n"); + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + if (cdma_register_crq_event(adev)) { + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + if (cdma_create_arm_db_page(cdev)) + goto unregister_crq; + + if (cdma_init_dev_info(adev, cdev)) + goto destory_arm_db_page; + + idr_init(&cdev->ctx_idr); + spin_lock_init(&cdev->ctx_lock); + atomic_set(&cdev->cmdcnt, 1); + cdev->status = CDMA_NORMAL; + cdma_client_handler(cdev, CDMA_CLIENT_ADD); + mutex_unlock(&g_cdma_reset_mutex); + return; + +destory_arm_db_page: + cdma_destroy_arm_db_page(cdev); +unregister_crq: + cdma_unregister_crq_event(adev); + mutex_unlock(&g_cdma_reset_mutex); +} + +static void cdma_reset_handler(struct auxiliary_device *adev, + enum ubase_reset_stage stage) +{ + if (!adev) + return; + + switch (stage) { + case UBASE_RESET_STAGE_DOWN: + cdma_reset_down(adev); + break; + case UBASE_RESET_STAGE_UNINIT: + cdma_reset_uninit(adev); + break; + case UBASE_RESET_STAGE_INIT: + if (!is_rmmod) + cdma_reset_init(adev); + break; + default: + break; + } +} + static int cdma_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { @@ -221,6 +323,8 @@ static int cdma_probe(struct auxiliary_device *auxdev, if (ret) return ret; + ubase_reset_register(auxdev, cdma_reset_handler); + return 0; } -- Gitee From 5cd589774f817cc21cb0c6bb4d4956bca309bce4 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Tue, 11 Nov 2025 15:00:50 +0800 Subject: [PATCH 092/243] ub: cdma: support for cdma kernelspace north-south compatibility requirements commit 34c67ed8f4c1070bd35c18026a36845b26f89b55 openEuler This patch adds north-south compatibility for CDMA. Signed-off-by: Zhipeng Lu Signed-off-by: Lin Yuan Signed-off-by: zhaolichang <943677312@qq.com> --- include/uapi/ub/cdma/cdma_abi.h | 62 +++++++++++++++++++++++++++++++++ include/ub/cdma/cdma_api.h | 16 +++++++++ 2 files changed, 78 insertions(+) diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index b32954f28636..681854ed9765 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -142,6 +142,8 @@ struct cdma_cmd_create_jfs_args { __u32 tpn; __u64 dma_jfs; /* dma jfs pointer */ __u32 trans_mode; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 id; @@ -149,6 +151,8 @@ struct cdma_cmd_create_jfs_args { __u8 max_sge; __u8 max_rsge; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; struct cdma_cmd_udrv_priv udata; }; @@ -163,8 +167,12 @@ struct cdma_cmd_delete_jfs_args { __u32 jfs_id; __u64 handle; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -178,10 +186,14 @@ struct cdma_cmd_create_ctp_args { __u32 seid; __u32 deid; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 tpn; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -190,15 +202,25 @@ struct cdma_cmd_delete_ctp_args { __u32 tpn; __u64 handle; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; struct cdma_cmd_create_jfce_args { + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } in; struct { int fd; int id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -209,11 +231,15 @@ struct cdma_cmd_create_jfc_args { int jfce_id; __u32 ceqn; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 id; __u32 depth; __u64 handle; /* handle of the allocated jfc obj in kernel */ + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; struct cdma_cmd_udrv_priv udata; }; @@ -223,10 +249,14 @@ struct cdma_cmd_delete_jfc_args { __u32 jfcn; __u64 handle; /* handle of jfc */ __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 comp_events_reported; __u32 async_events_reported; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -234,16 +264,26 @@ struct cdma_cmd_register_seg_args { struct { __u64 addr; __u64 len; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; struct cdma_cmd_unregister_seg_args { struct { __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } out; }; struct dev_eid { @@ -285,16 +325,28 @@ struct cdma_device_attr { }; struct cdma_cmd_query_device_attr_args { + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } in; struct { struct cdma_device_attr attr; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; struct cdma_create_context_args { + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } in; struct { __u8 cqe_size; __u8 dwqe_enable; int async_fd; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -324,10 +376,14 @@ struct cdma_cmd_create_queue_args { __u8 priority; __u64 user_ctx; __u32 trans_mode; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { int queue_id; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -335,7 +391,13 @@ struct cdma_cmd_delete_queue_args { struct { __u32 queue_id; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } out; }; struct cdma_cmd_jfce_wait_args { diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index b90a64f128b9..61449ab9ee26 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -11,6 +11,8 @@ struct dma_device { struct cdma_device_attr attr; atomic_t ref_cnt; void *private_data; + u32 rsv_bitmap; + u32 rsvd[4]; }; enum dma_cr_opcode { @@ -40,6 +42,8 @@ struct dma_cr { u32 local_id; u32 remote_id; u32 tpn; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct queue_cfg { @@ -49,6 +53,8 @@ struct queue_cfg { u32 dcna; struct dev_eid rmt_eid; u32 trans_mode; + u32 rsv_bitmap; + u32 rsvd[6]; }; struct dma_seg { @@ -58,6 +64,8 @@ struct dma_seg { u32 tid; /* data valid only in bit 0-19 */ u32 token_value; bool token_value_valid; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_seg_cfg { @@ -65,6 +73,8 @@ struct dma_seg_cfg { u64 len; u32 token_value; bool token_value_valid; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_context { @@ -80,11 +90,15 @@ enum dma_status { struct dma_cas_data { u64 compare_data; u64 swap_data; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_client { @@ -93,6 +107,8 @@ struct dma_client { int (*add)(u32 eid); void (*remove)(u32 eid); void (*stop)(u32 eid); + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_device *dma_get_device_list(u32 *num_devices); -- Gitee From ab89ece0400a9ac97b83310efb82c8241cea5a8c Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 20 Nov 2025 10:20:15 +0800 Subject: [PATCH 093/243] ub: ubase: Fix priqos infomaton interface functions to query and check sl_bitmap commit a2e12d759c3b3669b6318ab54457559d2cd1679e openEuler Previously, in the function 'ubase_get_priqos_info', the input parameter 'sl_priqos->sl_bitmap' was assumed to be provided by the caller. However, the caller doesn't know which SLs can be used. Therefore, this patch sets 'sl_priqos->sl_bitmap' to the union of SLs supported by UNIC and UDMA. And, in the function 'ubase_set_priqos_info', a check process for the input parameter 'sl_priqos->sl_bitmap' is added to ensure that the corresponding SLs in sl_bitmap are those supported by UNIC or UDMA. Fixes: b21a0a4a5d45 ("ub: ubase: Added QoS and traffic management debugging features") Signed-off-by: Zhang Lei Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_dev.h | 1 - drivers/ub/ubase/ubase_qos_hw.c | 36 ++++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index d32d9fb98377..c8ccd5bd107a 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -429,7 +429,6 @@ static inline u32 ubase_ta_timer_align_size(struct ubase_dev *udev) static inline bool ubase_mbx_ue_id_is_valid(u16 mbx_ue_id, struct ubase_dev *udev) { - if (!mbx_ue_id || (mbx_ue_id > udev->caps.dev_caps.ue_num - 1)) return false; diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index 5a5881f79547..bfd5c4e0f5c1 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -409,6 +409,36 @@ int ubase_query_fst_fvt_rqmt(struct ubase_dev *udev, return ret; } +static unsigned long ubase_get_sl_bitmap(struct ubase_dev *udev) +{ + struct ubase_adev_qos *qos = &udev->qos; + unsigned long sl_bitmap = 0; + u8 i; + + for (i = 0; i < qos->nic_sl_num; i++) + sl_bitmap |= 1 << qos->nic_sl[i]; + for (i = 0; i < qos->sl_num; i++) + sl_bitmap |= 1 << qos->sl[i]; + + return sl_bitmap; +} + +static int ubase_check_sl_bitmap(struct ubase_dev *udev, unsigned long sl_bitmap) +{ + unsigned long sl_bitmap_cap; + u8 i; + + sl_bitmap_cap = ubase_get_sl_bitmap(udev); + for (i = 0; i < UBASE_MAX_SL_NUM; i++) { + if (!test_bit(i, &sl_bitmap)) + continue; + if (!test_bit(i, &sl_bitmap_cap)) + return -EINVAL; + } + + return 0; +} + int ubase_check_qos_sch_param(struct auxiliary_device *adev, u16 vl_bitmap, u8 *vl_bw, u8 *vl_tsa, bool is_ets) { @@ -447,6 +477,9 @@ int ubase_set_priqos_info(struct device *dev, struct ubase_sl_priqos *sl_priqos) udev = dev_get_drvdata(dev); + if (ubase_check_sl_bitmap(udev, sl_priqos->sl_bitmap)) + return -EINVAL; + if (sl_priqos->port_bitmap) return ubase_set_ets_priqos(udev, sl_priqos); @@ -458,11 +491,12 @@ int ubase_get_priqos_info(struct device *dev, struct ubase_sl_priqos *sl_priqos) { struct ubase_dev *udev; - if (!dev || !sl_priqos || !sl_priqos->sl_bitmap) + if (!dev || !sl_priqos) return -EINVAL; udev = dev_get_drvdata(dev); + sl_priqos->sl_bitmap = ubase_get_sl_bitmap(udev); if (sl_priqos->port_bitmap) return ubase_get_ets_priqos(udev, sl_priqos); -- Gitee From a4ec6413f379cd8a6c5fc7d2b0350355e5b32b03 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 20 Nov 2025 10:34:28 +0800 Subject: [PATCH 094/243] ub: ubase: add CMDQ&CTRLQ compatibility code commit 313470c59d7f1158d832074319e35b823e4ccd29 openEuler The ubase driver and firmware communicate through the CMDQ interface, which needs to be forward and backward compatible. When pairing new drivers with old firmware or old drivers with new firmware, it is essential to ensure that the program can still work. Therefore, some compatibility design needs to be done in advance, and the relevant interfaces should be implemented. In subsequent versions, the predefined compatibility plan should be followed to handle these scenarios. Specifically, during the ubase dev probe, the driver first queries the firmware for the chip version using a message with opcode 0x0001. It then informs the firmware of its supported feature capability set through a message with opcode 0x0007. Subsequently, it queries the firmware for the feature capability sets supported by both the chip and the firmware using a message with opcode 0x0030. These commands could ensure backward and forward compatibility. In the CTRLQ interface, the version number check is addedd. If the version is not supported or the command is not supported, an error response of EOPNOTSUPP is returned. In the CMDQ interface, we removed some obsolete capability bit definitions and interface parameters. And optimized the code for handling scenarios where firmware returns an unauthorized error. In the CTRLQ interface, we add an version number check for requests coming from the control plane. By the way, we have also optimized the parameters of some commands. Fixes: 8d68017f37fa ("ub: ubase: support for command process") Signed-off-by: Chuan Wu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 1 - drivers/net/ub/unic/unic.h | 5 --- drivers/net/ub/unic/unic_cmd.h | 2 +- drivers/net/ub/unic/unic_dev.c | 21 ++++------- drivers/net/ub/unic/unic_dev.h | 5 --- drivers/net/ub/unic/unic_ethtool.c | 3 -- drivers/net/ub/unic/unic_hw.c | 14 +++---- drivers/net/ub/unic/unic_qos_hw.c | 3 +- drivers/net/ub/unic/unic_rack_ip.c | 7 +++- drivers/net/ub/unic/unic_stats.c | 29 ++++++-------- drivers/ub/ubase/debugfs/ubase_debugfs.c | 10 +---- drivers/ub/ubase/ubase_cmd.c | 16 ++++---- drivers/ub/ubase/ubase_cmd.h | 18 ++++++--- drivers/ub/ubase/ubase_ctrlq.c | 41 +++++++++++++++++--- drivers/ub/ubase/ubase_dev.c | 22 ++++++++--- drivers/ub/ubase/ubase_hw.c | 10 ++--- drivers/ub/ubase/ubase_hw.h | 44 +++++++++------------- drivers/ub/ubase/ubase_qos_hw.c | 9 ++--- include/ub/ubase/ubase_comm_cmd.h | 1 + include/ub/ubase/ubase_comm_dev.h | 5 --- 20 files changed, 132 insertions(+), 134 deletions(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 63703934613d..0a6dbdaffedc 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -46,7 +46,6 @@ static const struct unic_dbg_cap_bit_info { {"\tsupport_ubl: %u\n", &unic_dev_ubl_supported}, {"\tsupport_ets: %u\n", &unic_dev_ets_supported}, {"\tsupport_fec: %u\n", &unic_dev_fec_supported}, - {"\tsupport_rss: %u\n", &unic_dev_rss_supported}, {"\tsupport_tc_speed_limit: %u\n", &unic_dev_tc_speed_limit_supported}, {"\tsupport_tx_csum_offload: %u\n", &unic_dev_tx_csum_offload_supported}, {"\tsupport_rx_csum_offload: %u\n", &unic_dev_rx_csum_offload_supported}, diff --git a/drivers/net/ub/unic/unic.h b/drivers/net/ub/unic/unic.h index 7f6572c50a0c..e63ee6e900ff 100644 --- a/drivers/net/ub/unic/unic.h +++ b/drivers/net/ub/unic/unic.h @@ -20,16 +20,11 @@ enum { UNIC_SUPPORT_ETS_B = 1, UNIC_SUPPORT_FEC_B = 2, UNIC_SUPPORT_PAUSE_B = 3, - UNIC_SUPPORT_GRO_B = 5, UNIC_SUPPORT_ETH_B = 7, - UNIC_SUPPORT_TSO_B = 8, - UNIC_SUPPORT_RSS_B = 9, UNIC_SUPPORT_SERIAL_SERDES_LB_B = 10, UNIC_SUPPORT_TC_SPEED_LIMIT_B = 12, UNIC_SUPPORT_TX_CSUM_OFFLOAD_B = 13, - UNIC_SUPPORT_TUNNEL_CSUM_OFFLOAD_B = 14, - UNIC_SUPPORT_PTP_B = 15, UNIC_SUPPORT_RX_CSUM_OFFLOAD_B = 16, UNIC_SUPPORT_APP_LB_B = 17, diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index bf3e11e343cd..125802234e6b 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -150,7 +150,7 @@ struct unic_config_vl_map_cmd { }; struct unic_config_vl_speed_cmd { - __le16 bus_ue_id; + u8 resv0[2]; __le16 vl_bitmap; __le32 max_speed[UBASE_MAX_VL_NUM]; u8 resv1[20]; diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index ef79194c24bb..f8d5676bfc1f 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -248,10 +248,12 @@ static int unic_init_vl_info(struct unic_dev *unic_dev) return ret; ret = unic_init_vl_maxrate(unic_dev); - if (ret) + if (ret && ret != -EPERM) return ret; - return unic_init_vl_sch(unic_dev); + ret = unic_init_vl_sch(unic_dev); + + return ret == -EPERM ? 0 : ret; } static int unic_init_channels_attr(struct unic_dev *unic_dev) @@ -559,17 +561,12 @@ static int unic_dev_init_mtu(struct unic_dev *unic_dev) { struct net_device *netdev = unic_dev->comdev.netdev; struct unic_caps *caps = &unic_dev->caps; - int ret; netdev->mtu = UB_DATA_LEN; netdev->max_mtu = caps->max_trans_unit; netdev->min_mtu = caps->min_trans_unit; - ret = unic_config_mtu(unic_dev, netdev->mtu); - if (ret == -EPERM) - return 0; - - return ret; + return unic_config_mtu(unic_dev, netdev->mtu); } static int unic_init_mac(struct unic_dev *unic_dev) @@ -583,11 +580,11 @@ static int unic_init_mac(struct unic_dev *unic_dev) ret = unic_set_mac_speed_duplex(unic_dev, mac->speed, mac->duplex, mac->lanes); - if (ret && ret != -EPERM) + if (ret) return ret; ret = unic_set_mac_autoneg(unic_dev, mac->autoneg); - if (ret && ret != -EPERM) + if (ret) return ret; ret = unic_dev_fec_supported(unic_dev) && mac->user_fec_mode ? @@ -621,9 +618,7 @@ int unic_set_mtu(struct unic_dev *unic_dev, int new_mtu) new_mtu = max(new_mtu, UB_DATA_LEN); ret = unic_check_validate_dump_mtu(unic_dev, new_mtu, &max_frame_size); - if (ret == -EPERM) { - return 0; - } else if (ret < 0) { + if (ret) { unic_err(unic_dev, "invalid MTU(%d), please check, ret = %d.\n", new_mtu, ret); return -EINVAL; diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index a20744b810e8..51708850e38d 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -293,11 +293,6 @@ static inline bool unic_dev_fec_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_B); } -static inline bool unic_dev_rss_supported(struct unic_dev *unic_dev) -{ - return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_RSS_B); -} - static inline bool unic_dev_tc_speed_limit_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_TC_SPEED_LIMIT_B); diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index c9593ba74fe4..22f530c45107 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -67,9 +67,6 @@ static int unic_get_fecparam(struct net_device *ndev, struct unic_dev *unic_dev = netdev_priv(ndev); struct unic_mac *mac = &unic_dev->hw.mac; - if (!unic_dev_fec_supported(unic_dev)) - return -EOPNOTSUPP; - fec->fec = mac->fec_ability; fec->active_fec = mac->fec_mode; diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index d2b7026514c4..be606bfb6495 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -76,7 +76,7 @@ int unic_set_mac_autoneg(struct unic_dev *unic_dev, u8 autoneg) sizeof(req), &req); ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); - if (ret && ret != -EPERM) + if (ret) dev_err(unic_dev->comdev.adev->dev.parent, "failed to send cmd in config autoneg(%u), ret = %d.\n", autoneg, ret); @@ -105,7 +105,7 @@ int unic_set_mac_speed_duplex(struct unic_dev *unic_dev, u32 speed, u8 duplex, sizeof(req), &req); ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); - if (ret && ret != -EPERM) + if (ret) dev_err(unic_dev->comdev.adev->dev.parent, "failed to send cmd in config speed(%u), ret = %d.\n", speed, ret); @@ -334,9 +334,7 @@ int unic_set_promisc_mode(struct unic_dev *unic_dev, time_out = unic_cmd_timeout(unic_dev); ret = ubase_cmd_send_in_ex(adev, &in, time_out); - if (ret == -EPERM) - return 0; - else if (ret) + if (ret) unic_err(unic_dev, "failed to set promisc mode, ret = %d.\n", ret); @@ -561,15 +559,15 @@ static int unic_query_flush_status(struct unic_dev *unic_dev, u8 *status) struct ubase_cmd_buf in; int ret; + if (unic_dev_ubl_supported(unic_dev)) + return -EOPNOTSUPP; + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_FLUSH_STATUS, true, 0, NULL); ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_FLUSH_STATUS, false, sizeof(resp), &resp); ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); if (ret) { - if (ret == -EPERM) - return -EOPNOTSUPP; - unic_err(unic_dev, "failed to send cmd when query flush status, ret = %d.\n", ret); diff --git a/drivers/net/ub/unic/unic_qos_hw.c b/drivers/net/ub/unic/unic_qos_hw.c index 79fb7271c08e..bba05964156b 100644 --- a/drivers/net/ub/unic/unic_qos_hw.c +++ b/drivers/net/ub/unic/unic_qos_hw.c @@ -59,7 +59,6 @@ int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, u32 vl_rate; int i, ret; - req.bus_ue_id = cpu_to_le16(USHRT_MAX); req.vl_bitmap = cpu_to_le16(vl_bitmap); for (i = 0; i < caps->vl_num; i++) { vl_rate = vl_maxrate[i] / UNIC_MBYTE_PER_SEND; @@ -72,7 +71,7 @@ int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, sizeof(req), &req); ret = ubase_cmd_send_in(adev, &in); - if (ret) + if (ret && ret != -EPERM) dev_err(adev->dev.parent, "failed to config vl rate limit, ret = %d.\n", ret); diff --git a/drivers/net/ub/unic/unic_rack_ip.c b/drivers/net/ub/unic/unic_rack_ip.c index f2ff97304e78..529856dcff73 100644 --- a/drivers/net/ub/unic/unic_rack_ip.c +++ b/drivers/net/ub/unic/unic_rack_ip.c @@ -41,7 +41,7 @@ static void unic_update_rack_addr_state(struct unic_vport *vport, addr_node->state = UNIC_COMM_ADDR_TO_ADD; unic_format_masked_ip_addr(format_masked_ip_addr, addr); unic_info(unic_dev, - "deleted an existing ip %s by accident and need to add it.\n", + "stack deleted an planned ip %s, need to re-add it.\n", format_masked_ip_addr); } break; @@ -90,7 +90,7 @@ static int unic_update_stack_ip_addr(struct unic_vport *vport, list_add_tail(&addr_node->node, list); unic_format_masked_ip_addr(format_masked_ip_addr, addr); unic_info(unic_dev, - "added a new ip %s by accident and need to delete it.\n", + "stack added a non-planned ip %s, need to delete it.\n", format_masked_ip_addr); set_bit(UNIC_VPORT_STATE_IP_TBL_CHANGE, &vport->state); goto unlock_and_exit; @@ -469,6 +469,9 @@ int unic_handle_notify_ip_event(struct auxiliary_device *adev, u8 service_ver, struct unic_stack_ip_info st_ip; int ret; + if (service_ver != UBASE_CTRLQ_SER_VER_01) + return -EOPNOTSUPP; + if (len < sizeof(*req)) { unic_err(priv, "failed to verify ip info size, len = %u.\n", len); return -EINVAL; diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index dcefeab1bb2d..d8ccd1876500 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -88,7 +88,7 @@ static int unic_get_dfx_reg_num(struct unic_dev *unic_dev, u32 *reg_num, ubase_fill_inout_buf(&out, UBASE_OPC_DFX_REG_NUM, true, reg_arr_size * sizeof(u32), reg_num); ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); - if (ret && ret != -EPERM) + if (ret) unic_err(unic_dev, "failed to query dfx reg num, ret = %d.\n", ret); @@ -149,17 +149,13 @@ int unic_get_regs_len(struct net_device *netdev) return -ENOMEM; ret = unic_get_dfx_reg_num(unic_dev, reg_num, reg_arr_size); - if (!ret) { - count += unic_get_dfx_regs_len(unic_dev, unic_dfx_reg_arr, - reg_arr_size, reg_num); - } else if (ret != -EPERM) { - unic_err(unic_dev, - "failed to get dfx regs length, ret = %d.\n", ret); + if (ret) { kfree(reg_num); - return -EBUSY; } + count += unic_get_dfx_regs_len(unic_dev, unic_dfx_reg_arr, + reg_arr_size, reg_num); kfree(reg_num); return count; @@ -286,17 +282,16 @@ void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, pdata += unic_get_res_regs(unic_dev, pdata); ret = unic_get_dfx_reg_num(unic_dev, reg_num, reg_arr_size); - if (!ret) { - ret = unic_get_dfx_regs(unic_dev, pdata, unic_dfx_reg_arr, - reg_arr_size, reg_num); - if (ret) - unic_err(unic_dev, - "failed to get dfx regs, ret = %d.\n", ret); - } else if (ret != -EPERM) { - unic_err(unic_dev, - "failed to get dfx reg num, ret = %d.\n", ret); + if (ret) { + kfree(reg_num); + return; } + ret = unic_get_dfx_regs(unic_dev, pdata, unic_dfx_reg_arr, + reg_arr_size, reg_num); + if (ret) + unic_err(unic_dev, "failed to get dfx regs, ret = %d.\n", ret); + kfree(reg_num); } diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index ad97d7a58188..bf49fc3fdc93 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -94,6 +94,7 @@ static void ubase_dbg_dump_caps_info(struct seq_file *s, struct ubase_dev *udev) {"\tdie_id: %u\n", dev_caps->die_id}, {"\tue_id: %u\n", dev_caps->ue_id}, {"\tnl_id: %u\n", dev_caps->nl_id}, + {"\tfw_version: %u\n", dev_caps->fw_version}, }; int i; @@ -123,29 +124,20 @@ static void ubase_dbg_dump_adev_caps(struct seq_file *s, u32 caps_info; } ubase_adev_caps_info[] = { {"\tjfs_max_cnt: %u\n", caps->jfs.max_cnt}, - {"\tjfs_reserved_cnt: %u\n", caps->jfs.reserved_cnt}, {"\tjfs_depth: %u\n", caps->jfs.depth}, {"\tjfr_max_cnt: %u\n", caps->jfr.max_cnt}, - {"\tjfr_reserved_cnt: %u\n", caps->jfr.reserved_cnt}, {"\tjfr_depth: %u\n", caps->jfr.depth}, {"\tjfc_max_cnt: %u\n", caps->jfc.max_cnt}, - {"\tjfc_reserved_cnt: %u\n", caps->jfc.reserved_cnt}, {"\tjfc_depth: %u\n", caps->jfc.depth}, {"\ttp_max_cnt: %u\n", caps->tp.max_cnt}, - {"\ttp_reserved_cnt: %u\n", caps->tp.reserved_cnt}, {"\ttp_depth: %u\n", caps->tp.depth}, {"\ttpg_max_cnt: %u\n", caps->tpg.max_cnt}, - {"\ttpg_reserved_cnt: %u\n", caps->tpg.reserved_cnt}, {"\ttpg_depth: %u\n", caps->tpg.depth}, {"\tcqe_size: %hu\n", caps->cqe_size}, {"\tutp_port_bitmap: 0x%x\n", caps->utp_port_bitmap}, {"\tjtg_max_cnt: %u\n", caps->jtg_max_cnt}, {"\trc_max_cnt: %u\n", caps->rc_max_cnt}, {"\trc_depth: %u\n", caps->rc_que_depth}, - {"\tccc_max_cnt: %u\n", caps->ccc_max_cnt}, - {"\tdest_addr_max_cnt: %u\n", caps->dest_addr_max_cnt}, - {"\tseid_upi_max_cnt: %u\n", caps->seid_upi_max_cnt}, - {"\ttpm_max_cnt: %u\n", caps->tpm_max_cnt}, {"\tprealloc_mem_dma_len: %llu\n", caps->pmem.dma_len}, }; int i; diff --git a/drivers/ub/ubase/ubase_cmd.c b/drivers/ub/ubase/ubase_cmd.c index 4526f92ac117..4eedb6eff530 100644 --- a/drivers/ub/ubase/ubase_cmd.c +++ b/drivers/ub/ubase/ubase_cmd.c @@ -306,10 +306,11 @@ int ubase_send_cmd(struct ubase_dev *udev, return ret; } -static int ubase_cmd_query_version(struct ubase_dev *udev, u32 *fw_version) +static int ubase_cmd_query_version(struct ubase_dev *udev) { struct ubase_query_version_cmd *resp; struct ubase_cmdq_desc desc; + u32 fw_ver; int ret; ubase_cmd_setup_basic_desc(&desc, UBASE_OPC_QUERY_FW_VER, true, 1); @@ -321,13 +322,14 @@ static int ubase_cmd_query_version(struct ubase_dev *udev, u32 *fw_version) } resp = (struct ubase_query_version_cmd *)desc.data; - *fw_version = le32_to_cpu(resp->firmware); + udev->caps.dev_caps.fw_version = le32_to_cpu(resp->fw_version); + fw_ver = udev->caps.dev_caps.fw_version; ubase_info(udev, "The firmware version is %u.%u.%u.%u\n", - u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE3_MASK), - u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE2_MASK), - u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE1_MASK), - u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE0_MASK)); + u32_get_bits(fw_ver, UBASE_FW_VERSION_BYTE3_MASK), + u32_get_bits(fw_ver, UBASE_FW_VERSION_BYTE2_MASK), + u32_get_bits(fw_ver, UBASE_FW_VERSION_BYTE1_MASK), + u32_get_bits(fw_ver, UBASE_FW_VERSION_BYTE0_MASK)); return 0; } @@ -366,7 +368,7 @@ int ubase_cmd_init(struct ubase_dev *udev) clear_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state); - ret = ubase_cmd_query_version(udev, &udev->caps.dev_caps.fw_version); + ret = ubase_cmd_query_version(udev); if (ret) goto err_query_version; diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index 63b67179f2fb..263e88c9fa49 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -43,10 +43,16 @@ enum ubase_cmd_state { }; struct ubase_query_version_cmd { - __le32 firmware; - __le32 hardware; - __le32 rsv; - __le32 caps[UBASE_CAP_LEN]; + __le32 fw_version; + u8 rsv[20]; +}; + +enum ubase_drv_cap_bit { + UBASE_CAP_SUP_ACTIVATE_B = 0, +}; + +struct ubase_notify_drv_cap_cmd { + u8 cap_bits[24]; /* see ubase_drv_cap_bit */ }; #define UBASE_UBCL_CFG_DATA_ALIGN 4 @@ -106,10 +112,10 @@ struct ubase_cfg_ets_vl_sch_cmd { }; struct ubase_cfg_tm_vl_sch_cmd { - __le16 bus_ue_id; + u8 rsvd0[2]; __le16 vl_bitmap; __le16 vl_tsa; - u8 rsvd[2]; + u8 rsvd1[2]; u8 vl_bw[UBASE_MAX_VL_NUM]; }; diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index d590463b0efc..b8d7681dd518 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -742,6 +742,32 @@ static void ubase_ctrlq_read_msg_data(struct ubase_dev *udev, u8 num, u8 *msg) } } +static void ubase_ctrlq_send_unsupported_resp(struct ubase_dev *udev, + struct ubase_ctrlq_base_block *head, + void *msg_data, u16 msg_data_len, + u16 seq) +{ + struct ubase_ctrlq_msg msg = {0}; + int ret; + + msg.service_ver = head->service_ver; + msg.service_type = head->service_type; + msg.opcode = head->opcode; + msg.in_size = msg_data_len; + msg.in = msg_data; + msg.is_resp = 1; + msg.resp_seq = seq; + msg.resp_ret = EOPNOTSUPP; + + ubase_info(udev, "ctrlq received unsupported req. seq=%u, ser_type=%d, ser_ver=%d, opc=%u.", + seq, head->service_type, head->service_ver, head->opcode); + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret) + ubase_warn(udev, "failed to send ctrlq unsupported resp. seq=%u, ser_type=%d, ser_ver=%d, opc=%u.", + seq, head->service_type, head->service_ver, head->opcode); +} + static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, struct ubase_ctrlq_base_block *head, void *msg_data, u16 msg_data_len, @@ -749,20 +775,25 @@ static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, { struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; struct ubase_ctrlq_crq_event_nbs *nbs; + int ret = -EOPNOTSUPP; mutex_lock(&crq_tab->lock); list_for_each_entry(nbs, &crq_tab->crq_nbs.list, list) { if (nbs->crq_nb.service_type == head->service_type && nbs->crq_nb.opcode == head->opcode) { - nbs->crq_nb.crq_handler(nbs->crq_nb.back, - head->service_ver, - msg_data, - msg_data_len, - seq); + ret = nbs->crq_nb.crq_handler(nbs->crq_nb.back, + head->service_ver, + msg_data, + msg_data_len, + seq); break; } } mutex_unlock(&crq_tab->lock); + + if (ret == -EOPNOTSUPP) + ubase_ctrlq_send_unsupported_resp(udev, head, msg_data, + msg_data_len, seq); } static void ubase_ctrlq_notify_completed(struct ubase_dev *udev, diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 3921cd0ff824..fa61159d4295 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -454,11 +454,6 @@ static int ubase_handle_ue2ue_ctrlq_req(struct ubase_dev *udev, return -EINVAL; } - if (cmd->in_size > (len - (sizeof(*cmd) + UBASE_CTRLQ_HDR_LEN))) { - ubase_err(udev, "ubase e2e cmd len = %u error.\n", cmd->in_size); - return -EINVAL; - } - msg.service_ver = head->service_ver; msg.service_type = head->service_type; msg.opcode = head->opcode; @@ -621,6 +616,19 @@ static int ubase_register_cmdq_crq_event(struct ubase_dev *udev) return ret; } +static int ubase_notify_drv_capbilities(struct ubase_dev *udev) +{ + struct ubase_notify_drv_cap_cmd req = {0}; + struct ubase_cmd_buf in; + + set_bit(UBASE_CAP_SUP_ACTIVATE_B, (unsigned long *)req.cap_bits); + + __ubase_fill_inout_buf(&in, UBASE_OPC_NOTIFY_DRV_CAPS, false, + sizeof(req), &req); + + return __ubase_cmd_send_in(udev, &in); +} + static const struct ubase_init_function ubase_init_func_map[] = { { "init work queue", UBASE_SUP_ALL, 0, @@ -630,6 +638,10 @@ static const struct ubase_init_function ubase_init_func_map[] = { "init cmd queue", UBASE_SUP_ALL, 1, ubase_cmd_init, ubase_cmd_uninit }, + { + "notify drv capbilities", UBASE_SUP_ALL, 0, + ubase_notify_drv_capbilities, NULL + }, { "query dev res", UBASE_SUP_ALL, 0, ubase_query_dev_res, NULL diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index d728dd47f116..290a03a74b0e 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -156,13 +156,10 @@ static void ubase_parse_dev_caps_unic(struct ubase_dev *udev, struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; unic_caps->jfs.max_cnt = le32_to_cpu(resp->nic_jfs_max_cnt); - unic_caps->jfs.reserved_cnt = le32_to_cpu(resp->nic_jfs_reserved_cnt); unic_caps->jfs.depth = le32_to_cpu(resp->nic_jfs_depth); unic_caps->jfr.max_cnt = le32_to_cpu(resp->nic_jfr_max_cnt); - unic_caps->jfr.reserved_cnt = le32_to_cpu(resp->nic_jfr_reserved_cnt); unic_caps->jfr.depth = le32_to_cpu(resp->nic_jfr_depth); unic_caps->jfc.max_cnt = le32_to_cpu(resp->nic_jfc_max_cnt); - unic_caps->jfc.reserved_cnt = le32_to_cpu(resp->nic_jfc_reserved_cnt); unic_caps->jfc.depth = le32_to_cpu(resp->nic_jfc_depth); unic_caps->cqe_size = le16_to_cpu(resp->nic_cqe_size); unic_caps->utp_port_bitmap = le32_to_cpu(resp->port_bitmap); @@ -174,13 +171,10 @@ static void ubase_parse_dev_caps_udma(struct ubase_dev *udev, struct ubase_adev_caps *udma_caps = &udev->caps.udma_caps; udma_caps->jfs.max_cnt = le32_to_cpu(resp->udma_jfs_max_cnt); - udma_caps->jfs.reserved_cnt = le32_to_cpu(resp->udma_jfs_reserved_cnt); udma_caps->jfs.depth = le32_to_cpu(resp->udma_jfs_depth); udma_caps->jfr.max_cnt = le32_to_cpu(resp->udma_jfr_max_cnt); - udma_caps->jfr.reserved_cnt = le32_to_cpu(resp->udma_jfr_reserved_cnt); udma_caps->jfr.depth = le32_to_cpu(resp->udma_jfr_depth); udma_caps->jfc.max_cnt = le32_to_cpu(resp->udma_jfc_max_cnt); - udma_caps->jfc.reserved_cnt = le32_to_cpu(resp->udma_jfc_reserved_cnt); udma_caps->jfc.depth = le32_to_cpu(resp->udma_jfc_depth); udma_caps->cqe_size = le16_to_cpu(resp->udma_cqe_size); udma_caps->jtg_max_cnt = le32_to_cpu(resp->jtg_max_cnt); @@ -951,7 +945,7 @@ int __ubase_perf_stats(struct ubase_dev *udev, u64 port_bitmap, u32 period, struct ubase_perf_stats_result *data, u32 data_size) { #define UBASE_MS_TO_US(ms) (1000 * (ms)) - struct ubase_stop_perf_stats_cmd resp = {0}; + struct ubase_stop_perf_stats_cmd resp; unsigned long logic_port_bitmap; int ret, j, k, port_num; u8 i; @@ -988,6 +982,8 @@ int __ubase_perf_stats(struct ubase_dev *udev, u64 port_bitmap, u32 period, for (i = 0, k = 0; i < UBASE_MAX_PORT_NUM && k < port_num; i++) { if (!test_bit(i, (unsigned long *)&port_bitmap)) continue; + + memset(&resp, 0, sizeof(resp)); ret = ubase_stop_perf_stats(udev, &resp, period, i); if (ret) goto unlock; diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h index ee455ba317d1..12bc101cd04f 100644 --- a/drivers/ub/ubase/ubase_hw.h +++ b/drivers/ub/ubase/ubase_hw.h @@ -30,7 +30,7 @@ struct ubase_caps_item { struct ubase_res_cmd_resp { __le32 cap_bits[UBASE_CAP_LEN]; - __le32 rsvd[3]; + __le32 rsvd0[3]; u8 rsvd1[2]; __le16 ceq_vector_num; @@ -43,37 +43,32 @@ struct ubase_res_cmd_resp { __le32 aeqe_depth; __le32 ceqe_depth; __le32 udma_jfs_max_cnt; - __le32 udma_jfs_reserved_cnt; + u8 rsvd2[4]; __le32 udma_jfs_depth; __le32 udma_jfr_max_cnt; - __le32 udma_jfr_reserved_cnt; + u8 rsvd3[4]; __le32 udma_jfr_depth; u8 nic_vl_num; - u8 rsvd2[3]; + u8 rsvd4[3]; u8 nic_vl[UBASE_MAX_REQ_VL_NUM]; __le32 udma_jfc_max_cnt; - __le32 udma_jfc_reserved_cnt; + u8 rsvd5[4]; __le32 udma_jfc_depth; - __le32 udma_tp_max_cnt; - __le32 udma_tp_reserved_cnt; - __le32 udma_tp_depth; - __le32 udma_tpg_max_cnt; - __le32 udma_tpg_reserved_cnt; - __le32 udma_tpg_depth; + u8 rsvd6[24]; __le32 nic_jfs_max_cnt; - __le32 nic_jfs_reserved_cnt; + u8 rsvd7[4]; __le32 nic_jfs_depth; __le32 nic_jfr_max_cnt; - __le32 nic_jfr_reserved_cnt; + u8 rsvd8[4]; __le32 nic_jfr_depth; - __le32 rsvd3[2]; + __le32 rsvd9[2]; - __le32 rsvd4; + __le32 rsvd10; __le32 nic_jfc_max_cnt; - __le32 nic_jfc_reserved_cnt; + u8 rsvd11[4]; __le32 nic_jfc_depth; __le32 nic_tp_max_cnt; __le32 nic_tp_reserved_cnt; @@ -83,10 +78,7 @@ struct ubase_res_cmd_resp { __le32 nic_tpg_reserved_cnt; __le32 nic_tpg_depth; __le32 total_ue_num; - __le32 jfs_ctx_size; - __le32 jfr_ctx_size; - __le32 jfc_ctx_size; - __le32 tp_ctx_size; + u8 rsvd12[16]; __le16 rsvd_jetty_cnt; __le16 mac_stats_num; @@ -95,24 +87,22 @@ struct ubase_res_cmd_resp { __le32 public_jetty_cnt; __le32 tp_extdb_buf_size; __le32 tp_timer_buf_size; - u8 port_work_mode; + u8 resv13; u8 udma_vl_num; u8 udma_tp_resp_vl_offset; u8 ue_num; __le32 port_bitmap; - u8 rsvd5[4]; + u8 rsvd14[4]; /* include udma tp and ctp req vl */ u8 udma_req_vl[UBASE_MAX_REQ_VL_NUM]; __le32 udma_rc_depth; - u8 rsvd6[4]; + u8 rsvd15[4]; __le32 jtg_max_cnt; __le32 rc_max_cnt_per_vl; - __le32 dest_addr_max_cnt; - __le32 seid_upi_max_cnt; + u8 rsvd16[8]; - __le32 tpm_max_cnt; - __le32 ccc_max_cnt; + u8 rsvd17[32]; }; struct ubase_query_oor_resp { diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index bfd5c4e0f5c1..8145edc4401c 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -195,13 +195,10 @@ static int __ubase_config_tm_vl_sch(struct ubase_dev *udev, u16 vl_bitmap, int ret; u8 i; - /* the configuration takes effect for all entities. */ - req.bus_ue_id = cpu_to_le16(USHRT_MAX); - req.vl_bitmap = cpu_to_le16(vl_bitmap); - for (i = 0; i < UBASE_MAX_VL_NUM; i++) tsa_bitmap |= vl_tsa[i] ? 1 << i : 0; + req.vl_bitmap = cpu_to_le16(vl_bitmap); req.vl_tsa = cpu_to_le16(tsa_bitmap); memcpy(req.vl_bw, vl_bw, UBASE_MAX_VL_NUM); @@ -209,7 +206,7 @@ static int __ubase_config_tm_vl_sch(struct ubase_dev *udev, u16 vl_bitmap, sizeof(req), &req); ret = __ubase_cmd_send_in(udev, &in); - if (ret) + if (ret && ret != -EPERM) ubase_err(udev, "failed to config tm vl sch, ret = %d", ret); return ret; @@ -230,7 +227,7 @@ static int __ubase_config_ets_vl_sch(struct ubase_dev *udev, u16 vl_bitmap, &req); ret = __ubase_cmd_send_in(udev, &in); - if (ret) + if (ret && ret != -EPERM) ubase_err(udev, "failed to cfg ets vl sch, ret = %d.", ret); return ret; diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 4eb3c435a8f9..492b5e8513ea 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -30,6 +30,7 @@ enum ubase_opcode_type { /* Generic commands */ UBASE_OPC_QUERY_FW_VER = 0x0001, UBASE_OPC_QUERY_CTL_INFO = 0x0003, + UBASE_OPC_NOTIFY_DRV_CAPS = 0x0007, UBASE_OPC_QUERY_COMM_RSRC_PARAM = 0x0030, UBASE_OPC_QUERY_NIC_RSRC_PARAM = 0x0031, UBASE_OPC_QUERY_LINK_STATUS = 0x0032, diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index ac85c20311dd..35e0496ac01d 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -103,7 +103,6 @@ struct ubase_caps { struct ubase_res_caps { u32 max_cnt; u32 start_idx; - u32 reserved_cnt; u32 depth; }; @@ -123,10 +122,6 @@ struct ubase_adev_caps { u32 jtg_max_cnt; u32 rc_max_cnt; u32 rc_que_depth; - u32 ccc_max_cnt; - u32 dest_addr_max_cnt; - u32 seid_upi_max_cnt; - u32 tpm_max_cnt; u16 cqe_size; }; -- Gitee From 0bc1563f7bc09440cff67f6b1e5cf5b138868793 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Mon, 10 Nov 2025 20:33:36 +0800 Subject: [PATCH 095/243] net: unic: Fix the ethtool stats and basic capability information query interface commit 15bde327170a99bf1cc5e4869d70872c53b69ba6 openEuler The ethtool tool supports querying stats information for the ub network port. Resolving issues with stats information collection and failure to query FEC and speed-related configurations. Fixes: 4420dfeed31d ("net: unic: Support to query and clear historical NIC link status information") Signed-off-by: Xiongchuan Zhou Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_ethtool.c | 50 ++++++++++ drivers/net/ub/unic/unic_stats.c | 142 +++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_stats.h | 21 +++++ 3 files changed, 213 insertions(+) diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 22f530c45107..960477451df5 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -24,6 +24,46 @@ static u32 unic_get_link_status(struct net_device *netdev) return unic_dev->sw_link_status; } +static void unic_get_port_type(struct unic_dev *unic_dev, + struct ethtool_link_ksettings *cmd) +{ + u8 module_type = unic_dev->hw.mac.module_type; + u8 media_type = unic_dev->hw.mac.media_type; + + switch (media_type) { + case UNIC_MEDIA_TYPE_NONE: + case UNIC_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + break; + case UNIC_MEDIA_TYPE_FIBER: + if (module_type == UNIC_MODULE_TYPE_CR) + cmd->base.port = PORT_DA; + else + cmd->base.port = PORT_FIBRE; + break; + default: + cmd->base.port = PORT_NONE; + break; + } +} + +static void unic_get_ksettings(struct unic_dev *unic_dev, + struct ethtool_link_ksettings *cmd) +{ + struct unic_mac *mac = &unic_dev->hw.mac; + + unic_get_port_type(unic_dev, cmd); + + cmd->base.speed = mac->speed; + cmd->base.duplex = mac->duplex; + cmd->base.autoneg = mac->autoneg; + + linkmode_copy(cmd->link_modes.supported, mac->supported); + linkmode_copy(cmd->link_modes.advertising, mac->advertising); + + cmd->lanes = mac->lanes; +} + static int unic_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { @@ -32,6 +72,13 @@ static int unic_get_link_ksettings(struct net_device *netdev, /* Ensure that the latest information is obtained. */ unic_update_port_info(unic_dev); + unic_get_ksettings(unic_dev, cmd); + + if (!unic_get_link_status(netdev)) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + return 0; } @@ -326,6 +373,9 @@ static const struct ethtool_ops unic_ethtool_ops = { .get_drvinfo = unic_get_driver_info, .get_regs_len = unic_get_regs_len, .get_regs = unic_get_regs, + .get_ethtool_stats = unic_get_stats, + .get_strings = unic_get_stats_strings, + .get_sset_count = unic_get_sset_count, .get_channels = unic_get_channels, .set_channels = unic_set_channels, .get_ringparam = unic_get_channels_param, diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index d8ccd1876500..4647ba5e3e5e 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -78,6 +78,36 @@ static struct unic_dfx_regs_group unic_dfx_reg_arr[] = { }, }; +static const struct unic_stats_desc unic_sq_stats_str[] = { + {"pad_err", UNIC_SQ_STATS_FIELD_OFF(pad_err)}, + {"packets", UNIC_SQ_STATS_FIELD_OFF(packets)}, + {"bytes", UNIC_SQ_STATS_FIELD_OFF(bytes)}, + {"busy", UNIC_SQ_STATS_FIELD_OFF(busy)}, + {"more", UNIC_SQ_STATS_FIELD_OFF(more)}, + {"restart_queue", UNIC_SQ_STATS_FIELD_OFF(restart_queue)}, + {"over_max_sge_num", UNIC_SQ_STATS_FIELD_OFF(over_max_sge_num)}, + {"csum_err", UNIC_SQ_STATS_FIELD_OFF(csum_err)}, + {"ci_mismatch", UNIC_SQ_STATS_FIELD_OFF(ci_mismatch)}, + {"vlan_err", UNIC_SQ_STATS_FIELD_OFF(vlan_err)}, + {"fd_cnt", UNIC_SQ_STATS_FIELD_OFF(fd_cnt)}, + {"drop_cnt", UNIC_SQ_STATS_FIELD_OFF(drop_cnt)}, + {"cfg5_drop_cnt", UNIC_SQ_STATS_FIELD_OFF(cfg5_drop_cnt)} +}; + +static const struct unic_stats_desc unic_rq_stats_str[] = { + {"alloc_skb_err", UNIC_RQ_STATS_FIELD_OFF(alloc_skb_err)}, + {"packets", UNIC_RQ_STATS_FIELD_OFF(packets)}, + {"bytes", UNIC_RQ_STATS_FIELD_OFF(bytes)}, + {"err_pkt_len_cnt", UNIC_RQ_STATS_FIELD_OFF(err_pkt_len_cnt)}, + {"doi_cnt", UNIC_RQ_STATS_FIELD_OFF(doi_cnt)}, + {"trunc_cnt", UNIC_RQ_STATS_FIELD_OFF(trunc_cnt)}, + {"multicast", UNIC_RQ_STATS_FIELD_OFF(multicast)}, + {"l2_err", UNIC_RQ_STATS_FIELD_OFF(l2_err)}, + {"l3_l4_csum_err", UNIC_RQ_STATS_FIELD_OFF(l3_l4_csum_err)}, + {"alloc_frag_err", UNIC_RQ_STATS_FIELD_OFF(alloc_frag_err)}, + {"csum_complete", UNIC_RQ_STATS_FIELD_OFF(csum_complete)}, +}; + static int unic_get_dfx_reg_num(struct unic_dev *unic_dev, u32 *reg_num, u32 reg_arr_size) { @@ -295,6 +325,118 @@ void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, kfree(reg_num); } +static u64 *unic_get_queues_stats(struct unic_dev *unic_dev, + const struct unic_stats_desc *stats, + u32 stats_size, enum unic_queue_type type, + u64 *data) +{ + struct unic_channel *c; + u32 i, j; + u8 *q; + + for (i = 0; i < unic_dev->channels.num; i++) { + c = &unic_dev->channels.c[i]; + q = (type == UNIC_QUEUE_TYPE_SQ) ? (u8 *)c->sq : (u8 *)c->rq; + for (j = 0; j < stats_size; j++) { + *data = UNIC_STATS_READ(q, stats[j].offset); + data++; + } + } + + return data; +} + +void unic_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + u64 *p = data; + + if (unic_resetting(netdev) || !unic_dev->channels.c) { + unic_err(unic_dev, + "dev resetting or channel is null, could not get stats.\n"); + return; + } + + p = unic_get_queues_stats(unic_dev, unic_sq_stats_str, + ARRAY_SIZE(unic_sq_stats_str), + UNIC_QUEUE_TYPE_SQ, p); + + p = unic_get_queues_stats(unic_dev, unic_rq_stats_str, + ARRAY_SIZE(unic_rq_stats_str), + UNIC_QUEUE_TYPE_RQ, p); +} + +static u8 *unic_get_strings(u8 *data, const char *prefix, u32 num, + const struct unic_stats_desc *strs, u32 stats_size) +{ + u32 i, j; + + for (i = 0; i < num; i++) { + for (j = 0; j < stats_size; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + if (prefix) + scnprintf(data, ETH_GSTRING_LEN - 1, "%s%u_%s", + prefix, i, strs[j].desc); + else + scnprintf(data, ETH_GSTRING_LEN - 1, "%s", + strs[j].desc); + + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *unic_get_queues_strings(struct unic_dev *unic_dev, u8 *data) +{ + u32 channel_num = unic_dev->channels.num; + + /* get desc for Tx */ + data = unic_get_strings(data, "txq", channel_num, unic_sq_stats_str, + ARRAY_SIZE(unic_sq_stats_str)); + + /* get desc for Rx */ + data = unic_get_strings(data, "rxq", channel_num, unic_rq_stats_str, + ARRAY_SIZE(unic_rq_stats_str)); + + return data; +} + +void unic_get_stats_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + p = unic_get_queues_strings(unic_dev, p); + break; + default: + break; + } +} + +int unic_get_sset_count(struct net_device *netdev, int stringset) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + u32 channel_num = unic_dev->channels.num; + int count; + + switch (stringset) { + case ETH_SS_STATS: + count = ARRAY_SIZE(unic_sq_stats_str) * channel_num; + count += ARRAY_SIZE(unic_rq_stats_str) * channel_num; + break; + default: + return -EOPNOTSUPP; + } + + return count; +} + static void unic_get_fec_stats_total(struct unic_dev *unic_dev, u8 stats_flags, struct ethtool_fec_stats *fec_stats) { diff --git a/drivers/net/ub/unic/unic_stats.h b/drivers/net/ub/unic/unic_stats.h index 2a2a8746d838..623b732f3d8e 100644 --- a/drivers/net/ub/unic/unic_stats.h +++ b/drivers/net/ub/unic/unic_stats.h @@ -12,6 +12,13 @@ #include #include +#define UNIC_SQ_STATS_FIELD_OFF(fld) (offsetof(struct unic_sq, stats) + \ + offsetof(struct unic_sq_stats, fld)) +#define UNIC_RQ_STATS_FIELD_OFF(fld) (offsetof(struct unic_rq, stats) + \ + offsetof(struct unic_rq_stats, fld)) + +#define UNIC_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) + #define UNIC_FEC_CORR_BLOCKS BIT(0) #define UNIC_FEC_UNCORR_BLOCKS BIT(1) #define UNIC_FEC_CORR_BITS BIT(2) @@ -58,6 +65,11 @@ enum unic_reg_tag { UNIC_TAG_MAX, }; +enum unic_queue_type { + UNIC_QUEUE_TYPE_SQ = 0, + UNIC_QUEUE_TYPE_RQ, +}; + struct unic_res_regs_group { u16 tag; u32 *regs_addr; @@ -83,9 +95,18 @@ struct unic_dfx_regs_group { bool (*is_supported)(struct unic_dev *unic_dev, u32 property); }; +struct unic_stats_desc { + char desc[ETH_GSTRING_LEN]; + u16 offset; +}; + int unic_get_regs_len(struct net_device *netdev); void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, void *data); +void unic_get_stats_strings(struct net_device *netdev, u32 stringset, u8 *data); +int unic_get_sset_count(struct net_device *netdev, int stringset); +void unic_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); void unic_get_fec_stats(struct net_device *ndev, struct ethtool_fec_stats *fec_stats); -- Gitee From a07236c507f6b6f8cc6108c9cff048db6a598b67 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 12 Nov 2025 18:21:17 +0800 Subject: [PATCH 096/243] ub: ubase: Fix verification to ctrlq message seq commit e87c0f1d745bd8cea6ec52d5b79c163ba12d4a1d openEuler The data sent by the UE to the UE will be accessed through seq, but the value of seq may not be trustworthy. Accessing untrusted seq data can cause anomalies. a new check for seq values has been added. Fixes: d7ce08663cc5 ("ub: ubase: Supports for ctrl queue management.") Signed-off-by: Yixi Shen Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.c | 13 +++++++++---- drivers/ub/ubase/ubase_ctrlq.h | 1 + drivers/ub/ubase/ubase_dev.c | 5 +++++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index b8d7681dd518..c49f713226c5 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -569,6 +569,11 @@ static int ubase_ctrlq_msg_check(struct ubase_dev *udev, return -EINVAL; } + if (msg->is_resp && msg->need_resp) { + ubase_err(udev, "ctrlq input resp type is invalid.\n"); + return -EINVAL; + } + if (msg->is_resp && !(msg->resp_seq & UBASE_CTRLQ_SEQ_MASK)) { ubase_err(udev, "ctrlq input resp_seq(%u) is invalid.\n", msg->resp_seq); @@ -759,13 +764,13 @@ static void ubase_ctrlq_send_unsupported_resp(struct ubase_dev *udev, msg.resp_seq = seq; msg.resp_ret = EOPNOTSUPP; - ubase_info(udev, "ctrlq received unsupported req. seq=%u, ser_type=%d, ser_ver=%d, opc=%u.", + ubase_info(udev, "ctrlq received unsupported req. seq=%u, ser_type=%d, ser_ver=%d, opc=0x%x.", seq, head->service_type, head->service_ver, head->opcode); ret = __ubase_ctrlq_send(udev, &msg, NULL); if (ret) - ubase_warn(udev, "failed to send ctrlq unsupported resp. seq=%u, ser_type=%d, ser_ver=%d, opc=%u.", - seq, head->service_type, head->service_ver, head->opcode); + ubase_warn(udev, "failed to send ctrlq unsupported resp. seq=%u, ser_type=%d, ser_ver=%d, opc=0x%x, ret=%d.", + seq, head->service_type, head->service_ver, head->opcode, ret); } static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, @@ -809,7 +814,7 @@ static void ubase_ctrlq_notify_completed(struct ubase_dev *udev, complete(&ctx->done); } -static bool ubase_ctrlq_check_seq(struct ubase_dev *udev, u16 seq) +bool ubase_ctrlq_check_seq(struct ubase_dev *udev, u16 seq) { bool is_pushed = !!(seq & UBASE_CTRLQ_SEQ_MASK); u16 max_seq = ubase_ctrlq_max_seq(udev); diff --git a/drivers/ub/ubase/ubase_ctrlq.h b/drivers/ub/ubase/ubase_ctrlq.h index 431253df5054..b868ace06ab1 100644 --- a/drivers/ub/ubase/ubase_ctrlq.h +++ b/drivers/ub/ubase/ubase_ctrlq.h @@ -94,6 +94,7 @@ void ubase_ctrlq_disable(struct ubase_dev *udev); int __ubase_ctrlq_send(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg, struct ubase_ctrlq_ue_info *ue_info); +bool ubase_ctrlq_check_seq(struct ubase_dev *udev, u16 seq); void ubase_ctrlq_service_task(struct ubase_delay_work *ubase_work); void ubase_ctrlq_handle_crq_msg(struct ubase_dev *udev, struct ubase_ctrlq_base_block *head, diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index fa61159d4295..76d179093abe 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -492,6 +492,11 @@ static int ubase_handle_ue2ue_ctrlq_event(struct ubase_dev *udev, void *data, if (ubase_dev_ctrlq_supported(udev)) return ubase_handle_ue2ue_ctrlq_req(udev, cmd, len); + if (!ubase_ctrlq_check_seq(udev, cmd->seq)) { + ubase_err(udev, "invalid ue2ue ctrlq seq(%u).\n", cmd->seq); + return -EINVAL; + } + head = (struct ubase_ctrlq_base_block *)(cmd + 1); data_len = len - sizeof(*cmd) - UBASE_CTRLQ_HDR_LEN; ubase_ctrlq_handle_crq_msg(udev, head, cmd->seq, -- Gitee From 18ce4ddc14d2ee2e749b486ffe83c21faf88d7e9 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 12 Nov 2025 18:37:23 +0800 Subject: [PATCH 097/243] ub: ubase: Optimization of ubase_ctrlq_send_msg interface parameters commit 0ee149f4767688691a85b306b128d9d84f4232ec openEuler This patch optimizes the parameters of the ubase_ctrlq_send_msg interface by adding the is_async parameter to specify whether to call it synchronously. Fixes: d7ce08663cc5 ("ub: ubase: Supports for ctrl queue management.") Signed-off-by: Guangwei Zhang Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_cmd.h | 3 +- drivers/ub/ubase/ubase_ctrlq.c | 96 +++++++++++++++++++---------- drivers/ub/ubase/ubase_ctrlq.h | 20 ++++++ drivers/ub/ubase/ubase_dev.c | 3 + include/ub/ubase/ubase_comm_ctrlq.h | 3 +- 5 files changed, 91 insertions(+), 34 deletions(-) diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index 263e88c9fa49..a99422b59dc4 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -83,7 +83,8 @@ struct ubase_ue2ue_ctrlq_head { u16 out_size; u8 need_resp : 1; u8 is_resp : 1; - u8 rsv : 6; + u8 is_async : 1; + u8 rsv : 5; }; struct ubase_start_perf_stats_cmd { diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index c49f713226c5..352342ee01dc 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -369,11 +369,13 @@ static void ubase_ctrlq_fill_first_bb(struct ubase_dev *udev, head->service_type = msg->service_type; head->opcode = msg->opcode; head->mbx_ue_id = ue_info ? ue_info->mbx_ue_id : 0; - head->ret = msg->is_resp ? msg->resp_ret : 0; + head->ret = ubase_ctrlq_msg_is_resp(msg) ? msg->resp_ret : 0; head->bus_ue_id = cpu_to_le16(ue_info ? ue_info->bus_ue_id : ue->entity_idx); - memcpy(head->data, msg->in, min(msg->in_size, UBASE_CTRLQ_DATA_LEN)); + if (msg->in) + memcpy(head->data, msg->in, + min(msg->in_size, UBASE_CTRLQ_DATA_LEN)); } static inline void ubase_ctrlq_csq_report_irq(struct ubase_dev *udev) @@ -408,10 +410,12 @@ static int ubase_ctrlq_send_to_cmdq(struct ubase_dev *udev, ue2ue_head.out_size = msg->out_size; ue2ue_head.need_resp = msg->need_resp; ue2ue_head.is_resp = msg->is_resp; + ue2ue_head.is_async = msg->is_async; memcpy(req, &ue2ue_head, sizeof(ue2ue_head)); memcpy((u8 *)req + sizeof(ue2ue_head), head, UBASE_CTRLQ_HDR_LEN); - memcpy((u8 *)req + sizeof(ue2ue_head) + UBASE_CTRLQ_HDR_LEN, msg->in, - msg->in_size); + if (msg->in) + memcpy((u8 *)req + sizeof(ue2ue_head) + UBASE_CTRLQ_HDR_LEN, + msg->in, msg->in_size); __ubase_fill_inout_buf(&in, UBASE_OPC_UE2UE_UBASE, false, req_len, req); ret = __ubase_cmd_send_in(udev, &in); @@ -541,18 +545,17 @@ static void ubase_ctrlq_addto_msg_queue(struct ubase_dev *udev, u16 seq, { struct ubase_ctrlq_msg_ctx *ctx; - if (!msg->need_resp) + if (!(ubase_ctrlq_msg_is_sync_req(msg) || + ubase_ctrlq_msg_is_async_req(msg))) return; ctx = &udev->ctrlq.msg_queue[seq]; ctx->valid = 1; - ctx->is_sync = msg->need_resp && msg->out && msg->out_size ? 1 : 0; + ctx->is_sync = ubase_ctrlq_msg_is_sync_req(msg) ? 1 : 0; ctx->result = ETIME; ctx->dead_jiffies = jiffies + msecs_to_jiffies(UBASE_CTRLQ_DEAD_TIME); - if (ctx->is_sync) { - ctx->out = msg->out; - ctx->out_size = msg->out_size; - } + ctx->out = msg->out; + ctx->out_size = msg->out_size; if (ue_info) { ctx->ue_seq = ue_info->seq; @@ -564,30 +567,58 @@ static void ubase_ctrlq_addto_msg_queue(struct ubase_dev *udev, u16 seq, static int ubase_ctrlq_msg_check(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg) { - if (!msg || !msg->in || !msg->in_size) { - ubase_err(udev, "ctrlq input buf is invalid.\n"); - return -EINVAL; - } - - if (msg->is_resp && msg->need_resp) { - ubase_err(udev, "ctrlq input resp type is invalid.\n"); + if ((!msg->in && msg->in_size) || (msg->in && !msg->in_size)) { + ubase_err(udev, "ctrlq msg in param error.\n"); return -EINVAL; } - if (msg->is_resp && !(msg->resp_seq & UBASE_CTRLQ_SEQ_MASK)) { - ubase_err(udev, "ctrlq input resp_seq(%u) is invalid.\n", - msg->resp_seq); + if ((!msg->out && msg->out_size) || (msg->out && !msg->out_size)) { + ubase_err(udev, "ctrlq msg out param error.\n"); return -EINVAL; } if (msg->in_size > UBASE_CTRLQ_MAX_DATA_SIZE) { ubase_err(udev, - "requested ctrlq space(%u) exceeds the maximum(%u).\n", + "ctrlq msg in_size(%u) exceeds the maximum(%u).\n", msg->in_size, UBASE_CTRLQ_MAX_DATA_SIZE); return -EINVAL; } - return 0; + if (ubase_ctrlq_msg_is_sync_req(msg)) + return 0; + + if (ubase_ctrlq_msg_is_async_req(msg)) { + if (msg->out) { + ubase_err(udev, "ctrlq msg out is not NULL in async req.\n"); + return -EINVAL; + } + return 0; + } + + if (ubase_ctrlq_msg_is_notify_req(msg)) { + if (msg->out) { + ubase_err(udev, "ctrlq msg out is not NULL in notify req.\n"); + return -EINVAL; + } + return 0; + } + + if (ubase_ctrlq_msg_is_resp(msg)) { + if (msg->out) { + ubase_err(udev, "ctrlq msg out is not NULL in resp.\n"); + return -EINVAL; + } + if (!(msg->resp_seq & UBASE_CTRLQ_SEQ_MASK)) { + ubase_err(udev, "ctrlq msg resp_seq error, resp_seq=%u.\n", + msg->resp_seq); + return -EINVAL; + } + return 0; + } + + ubase_err(udev, "ctrlq msg param error, is_resp=%u, is_async=%u, need_resp=%u.\n", + msg->is_resp, msg->is_async, msg->need_resp); + return -EINVAL; } static int ubase_ctrlq_check_csq_enough(struct ubase_dev *udev, u16 num) @@ -612,8 +643,6 @@ static int ubase_ctrlq_send_real(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg, struct ubase_ctrlq_ue_info *ue_info) { - bool sync_req = msg->out && msg->out_size && msg->need_resp; - bool no_resp = !msg->is_resp && !msg->need_resp; struct ubase_ctrlq_ring *csq = &udev->ctrlq.csq; struct ubase_ctrlq_base_block head = {0}; u16 seq, num; @@ -627,7 +656,7 @@ static int ubase_ctrlq_send_real(struct ubase_dev *udev, if (ret) goto unlock; - if (!msg->is_resp) { + if (!ubase_ctrlq_msg_is_resp(msg)) { ret = ubase_ctrlq_alloc_seq(udev, msg, &seq); if (ret) { ubase_warn(udev, "no enough seq in ctrlq.\n"); @@ -645,20 +674,22 @@ static int ubase_ctrlq_send_real(struct ubase_dev *udev, ret = ubase_ctrlq_send_msg_to_sq(udev, &head, msg, num); if (ret) { spin_unlock_bh(&csq->lock); - goto free_seq; + if (!ubase_ctrlq_msg_is_resp(msg)) + ubase_ctrlq_free_seq(udev, seq); + return ret; } spin_unlock_bh(&csq->lock); - if (sync_req) + if (ubase_ctrlq_msg_is_sync_req(msg)) ret = ubase_ctrlq_wait_completed(udev, seq, msg); -free_seq: - /* Only the seqs in synchronous requests and no response requests need to be released. */ - /* The seqs are released in periodic tasks of asynchronous requests. */ - if (sync_req || no_resp) + if (ubase_ctrlq_msg_is_sync_req(msg) || + ubase_ctrlq_msg_is_notify_req(msg)) ubase_ctrlq_free_seq(udev, seq); + return ret; + unlock: spin_unlock_bh(&csq->lock); return ret; @@ -809,7 +840,8 @@ static void ubase_ctrlq_notify_completed(struct ubase_dev *udev, ctx = &udev->ctrlq.msg_queue[seq]; ctx->result = head->ret; - memcpy(ctx->out, msg, min(msg_len, ctx->out_size)); + if (ctx->out) + memcpy(ctx->out, msg, min(msg_len, ctx->out_size)); complete(&ctx->done); } diff --git a/drivers/ub/ubase/ubase_ctrlq.h b/drivers/ub/ubase/ubase_ctrlq.h index b868ace06ab1..c3d5f55db87d 100644 --- a/drivers/ub/ubase/ubase_ctrlq.h +++ b/drivers/ub/ubase/ubase_ctrlq.h @@ -87,6 +87,26 @@ struct ubase_ctrlq_reset_ctrl_req { u8 rsv[3]; }; +static inline bool ubase_ctrlq_msg_is_sync_req(struct ubase_ctrlq_msg *msg) +{ + return !msg->is_resp && !msg->is_async && msg->need_resp; +} + +static inline bool ubase_ctrlq_msg_is_async_req(struct ubase_ctrlq_msg *msg) +{ + return !msg->is_resp && msg->is_async && msg->need_resp; +} + +static inline bool ubase_ctrlq_msg_is_notify_req(struct ubase_ctrlq_msg *msg) +{ + return !msg->is_resp && !msg->is_async && !msg->need_resp; +} + +static inline bool ubase_ctrlq_msg_is_resp(struct ubase_ctrlq_msg *msg) +{ + return msg->is_resp && !msg->is_async && !msg->need_resp; +} + int ubase_ctrlq_init(struct ubase_dev *udev); void ubase_ctrlq_uninit(struct ubase_dev *udev); void ubase_ctrlq_disable(struct ubase_dev *udev); diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 76d179093abe..7331afa03c80 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -459,11 +459,14 @@ static int ubase_handle_ue2ue_ctrlq_req(struct ubase_dev *udev, msg.opcode = head->opcode; msg.need_resp = cmd->need_resp; msg.is_resp = cmd->is_resp; + msg.is_async = cmd->is_async; msg.resp_seq = cmd->seq; msg.in = (u8 *)head + UBASE_CTRLQ_HDR_LEN; msg.in_size = cmd->in_size; msg.out = NULL; msg.out_size = 0; + if (ubase_ctrlq_msg_is_sync_req(&msg)) + msg.is_async = 1; ue_info.bus_ue_id = le16_to_cpu(cmd->head.bus_ue_id); ue_info.seq = cmd->seq; diff --git a/include/ub/ubase/ubase_comm_ctrlq.h b/include/ub/ubase/ubase_comm_ctrlq.h index 7e772dcb0746..da3337573b61 100644 --- a/include/ub/ubase/ubase_comm_ctrlq.h +++ b/include/ub/ubase/ubase_comm_ctrlq.h @@ -61,7 +61,8 @@ struct ubase_ctrlq_msg { u8 opcode; u8 need_resp : 1; u8 is_resp : 1; - u8 resv : 6; + u8 is_async : 1; + u8 resv : 5; u8 resp_ret; /* must set when the is_resp field is true. */ u16 resp_seq; /* must set when the is_resp field is true. */ u16 in_size; -- Gitee From 5bd8ee69879ea99dc0b0c91b770f1ed35198576b Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 20 Nov 2025 11:03:26 +0800 Subject: [PATCH 098/243] ub: ubase: Fix CTRLQ white list commit aca32b8e11f4d793529e2d06a7453d9dcbcf2f43 openEuler According to the design requirements for CtrlQ interface compatibility, when the Ubase receives a message that it does not support, it needs to return a notsupport response to the peer. Currently, Ubase determines whether a message is supported by checking whether the corresponding callback function has been registered. This leads to an edge case: when the auxiliary drivers has not registered, the corresponding callback function is null, and in this case, Ubase also returns notsupport. This causes the peer to mistakenly believe that we does not support the message, and subsequently, the peer will no longer send this message, which is not in line with the design expectations. Therefore, it is necessary for Ubase to distinguish between the two scenarios: when a message is supported but the callback is not registered, and when a message is not supported at all. So we add a message whitelist in Ubase. Messages not included in the whitelist will return notsupport while those included will return a specific error code 255 indicating "DRV NOT EXIST." Fixes: d7ce08663cc5 ("ub: ubase: Supports for ctrl queue management.") Signed-off-by: Chuan Wu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.c | 202 ++++++++++++++++++++-------- drivers/ub/ubase/ubase_dev.h | 8 +- include/ub/ubase/ubase_comm_ctrlq.h | 23 ++-- 3 files changed, 160 insertions(+), 73 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 352342ee01dc..9351ed2c70fd 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -11,19 +11,102 @@ #include "ubase_cmd.h" #include "ubase_ctrlq.h" -static inline void ubase_ctrlq_crq_table_init(struct ubase_dev *udev) +/* UNIC ctrlq msg white list */ +static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_unic[] = { + {UBASE_CTRLQ_SER_TYPE_IP_ACL, UBASE_CTRLQ_OPC_NOTIFY_IP, NULL, NULL}, +}; + +/* UDMA ctrlq msg white list */ +static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_udma[] = { + {UBASE_CTRLQ_SER_TYPE_TP_ACL, UBASE_CTRLQ_OPC_CHECK_TP_ACTIVE, NULL, NULL}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_UPDATE_SEID, NULL, NULL}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_NOTIFY_RES_RATIO, NULL, NULL}, +}; + +/* CDMA ctrlq msg white list */ +static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_cdma[] = { + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_UPDATE_SEID, NULL, NULL}, +}; + +static int ubase_ctrlq_alloc_crq_tbl_mem(struct ubase_dev *udev) { struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; + u16 cnt = 0; + + if (ubase_dev_cdma_supported(udev)) { + cnt = ARRAY_SIZE(ubase_ctrlq_wlist_cdma); + } else if (ubase_dev_urma_supported(udev)) { + if (ubase_dev_unic_supported(udev)) + cnt += ARRAY_SIZE(ubase_ctrlq_wlist_unic); + if (ubase_dev_udma_supported(udev)) + cnt += ARRAY_SIZE(ubase_ctrlq_wlist_udma); + } + + if (!cnt) + return -EINVAL; + + crq_tab->crq_nbs = kcalloc(cnt, sizeof(struct ubase_ctrlq_event_nb), GFP_KERNEL); + if (!crq_tab->crq_nbs) + return -ENOMEM; + + crq_tab->crq_nb_cnt = cnt; + + return 0; +} + +static void ubase_ctrlq_free_crq_tbl_mem(struct ubase_dev *udev) +{ + struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; + + kfree(crq_tab->crq_nbs); + crq_tab->crq_nbs = NULL; + crq_tab->crq_nb_cnt = 0; +} + +static void ubase_ctrlq_init_crq_wlist(struct ubase_dev *udev) +{ + struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; + u32 offset = 0; + + if (ubase_dev_cdma_supported(udev)) { + memcpy(crq_tab->crq_nbs, ubase_ctrlq_wlist_cdma, + sizeof(ubase_ctrlq_wlist_cdma)); + } else if (ubase_dev_urma_supported(udev)) { + if (ubase_dev_unic_supported(udev)) { + memcpy(crq_tab->crq_nbs, ubase_ctrlq_wlist_unic, + sizeof(ubase_ctrlq_wlist_unic)); + offset = ARRAY_SIZE(ubase_ctrlq_wlist_unic); + } + if (ubase_dev_udma_supported(udev)) { + memcpy(&crq_tab->crq_nbs[offset], ubase_ctrlq_wlist_udma, + sizeof(ubase_ctrlq_wlist_udma)); + } + } +} + +static int ubase_ctrlq_crq_table_init(struct ubase_dev *udev) +{ + struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; + int ret; + + ret = ubase_ctrlq_alloc_crq_tbl_mem(udev); + if (ret) + return ret; + + ubase_ctrlq_init_crq_wlist(udev); mutex_init(&crq_tab->lock); - INIT_LIST_HEAD(&crq_tab->crq_nbs.list); + + return 0; } -static inline void ubase_ctrlq_crq_table_uninit(struct ubase_dev *udev) +static void ubase_ctrlq_crq_table_uninit(struct ubase_dev *udev) { struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; mutex_destroy(&crq_tab->lock); + + ubase_ctrlq_free_crq_tbl_mem(udev); } static inline u16 ubase_ctrlq_msg_queue_depth(struct ubase_dev *udev) @@ -233,10 +316,15 @@ int ubase_ctrlq_init(struct ubase_dev *udev) if (ret) goto err_msg_queue_init; + ret = ubase_ctrlq_crq_table_init(udev); + if (ret) + goto err_crq_table_init; + udev->ctrlq.csq_next_seq = 1; atomic_set(&udev->ctrlq.req_cnt, 0); - ubase_ctrlq_crq_table_init(udev); +err_crq_table_init: + ubase_ctrlq_msg_queue_uninit(udev); success: set_bit(UBASE_CTRLQ_STATE_ENABLE, &udev->ctrlq.state); return 0; @@ -780,8 +868,7 @@ static void ubase_ctrlq_read_msg_data(struct ubase_dev *udev, u8 num, u8 *msg) static void ubase_ctrlq_send_unsupported_resp(struct ubase_dev *udev, struct ubase_ctrlq_base_block *head, - void *msg_data, u16 msg_data_len, - u16 seq) + u16 resp_seq, u8 resp_ret) { struct ubase_ctrlq_msg msg = {0}; int ret; @@ -789,19 +876,14 @@ static void ubase_ctrlq_send_unsupported_resp(struct ubase_dev *udev, msg.service_ver = head->service_ver; msg.service_type = head->service_type; msg.opcode = head->opcode; - msg.in_size = msg_data_len; - msg.in = msg_data; msg.is_resp = 1; - msg.resp_seq = seq; - msg.resp_ret = EOPNOTSUPP; - - ubase_info(udev, "ctrlq received unsupported req. seq=%u, ser_type=%d, ser_ver=%d, opc=0x%x.", - seq, head->service_type, head->service_ver, head->opcode); + msg.resp_seq = resp_seq; + msg.resp_ret = resp_ret; ret = __ubase_ctrlq_send(udev, &msg, NULL); if (ret) - ubase_warn(udev, "failed to send ctrlq unsupported resp. seq=%u, ser_type=%d, ser_ver=%d, opc=0x%x, ret=%d.", - seq, head->service_type, head->service_ver, head->opcode, ret); + ubase_warn(udev, "failed to send ctrlq unsupport resp, ret=%d.", + ret); } static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, @@ -809,27 +891,43 @@ static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, void *msg_data, u16 msg_data_len, u16 seq) { +#define EDRVNOEXIST 255 struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; - struct ubase_ctrlq_crq_event_nbs *nbs; - int ret = -EOPNOTSUPP; + int ret = -ENOENT; + u32 i; + + ubase_info(udev, + "ctrlq recv notice req: seq=%u, ser_type=%u, ser_ver=%u, opc=0x%x.", + seq, head->service_type, head->service_ver, head->opcode); mutex_lock(&crq_tab->lock); - list_for_each_entry(nbs, &crq_tab->crq_nbs.list, list) { - if (nbs->crq_nb.service_type == head->service_type && - nbs->crq_nb.opcode == head->opcode) { - ret = nbs->crq_nb.crq_handler(nbs->crq_nb.back, - head->service_ver, - msg_data, - msg_data_len, - seq); + for (i = 0; i < crq_tab->crq_nb_cnt; i++) { + if (crq_tab->crq_nbs[i].service_type == head->service_type && + crq_tab->crq_nbs[i].opcode == head->opcode) { + if (!crq_tab->crq_nbs[i].crq_handler) { + ret = -EDRVNOEXIST; + break; + } + ret = crq_tab->crq_nbs[i].crq_handler(crq_tab->crq_nbs[i].back, + head->service_ver, + msg_data, + msg_data_len, + seq); break; } } mutex_unlock(&crq_tab->lock); - if (ret == -EOPNOTSUPP) - ubase_ctrlq_send_unsupported_resp(udev, head, msg_data, - msg_data_len, seq); + if (ret == -ENOENT) { + ubase_info(udev, "this notice is not supported."); + ubase_ctrlq_send_unsupported_resp(udev, head, seq, EOPNOTSUPP); + } else if (ret == -EOPNOTSUPP) { + ubase_info(udev, "the notice processor return not support."); + ubase_ctrlq_send_unsupported_resp(udev, head, seq, EOPNOTSUPP); + } else if (ret == -EDRVNOEXIST) { + ubase_info(udev, "the notice processor is unregistered."); + ubase_ctrlq_send_unsupported_resp(udev, head, seq, EDRVNOEXIST); + } } static void ubase_ctrlq_notify_completed(struct ubase_dev *udev, @@ -1064,10 +1162,10 @@ void ubase_ctrlq_clean_service_task(struct ubase_delay_work *ubase_work) int ubase_ctrlq_register_crq_event(struct auxiliary_device *aux_dev, struct ubase_ctrlq_event_nb *nb) { - struct ubase_ctrlq_crq_event_nbs *nbs, *tmp, *new_nbs; struct ubase_ctrlq_crq_table *crq_tab; struct ubase_dev *udev; - int ret; + int ret = -ENOENT; + u32 i; if (!aux_dev || !nb || !nb->crq_handler) return -EINVAL; @@ -1075,31 +1173,21 @@ int ubase_ctrlq_register_crq_event(struct auxiliary_device *aux_dev, udev = __ubase_get_udev_by_adev(aux_dev); crq_tab = &udev->ctrlq.crq_table; mutex_lock(&crq_tab->lock); - list_for_each_entry_safe(nbs, tmp, &crq_tab->crq_nbs.list, list) { - if (nbs->crq_nb.service_type == nb->service_type && - nbs->crq_nb.opcode == nb->opcode) { - ret = -EEXIST; - goto err_crq_register; + for (i = 0; i < crq_tab->crq_nb_cnt; i++) { + if (crq_tab->crq_nbs[i].service_type == nb->service_type && + crq_tab->crq_nbs[i].opcode == nb->opcode) { + if (crq_tab->crq_nbs[i].crq_handler) { + ret = -EEXIST; + break; + } + crq_tab->crq_nbs[i].back = nb->back; + crq_tab->crq_nbs[i].crq_handler = nb->crq_handler; + ret = 0; + break; } } - new_nbs = kzalloc(sizeof(*new_nbs), GFP_KERNEL); - if (!new_nbs) { - ret = -ENOMEM; - goto err_crq_register; - } - - new_nbs->crq_nb = *nb; - list_add_tail(&new_nbs->list, &crq_tab->crq_nbs.list); - mutex_unlock(&crq_tab->lock); - - return 0; - -err_crq_register: mutex_unlock(&crq_tab->lock); - ubase_err(udev, - "failed to register ctrlq crq event, opcode = 0x%x, service_type = 0x%x, ret = %d.\n", - nb->opcode, nb->service_type, ret); return ret; } @@ -1108,9 +1196,9 @@ EXPORT_SYMBOL(ubase_ctrlq_register_crq_event); void ubase_ctrlq_unregister_crq_event(struct auxiliary_device *aux_dev, u8 service_type, u8 opcode) { - struct ubase_ctrlq_crq_event_nbs *nbs, *tmp; struct ubase_ctrlq_crq_table *crq_tab; struct ubase_dev *udev; + u32 i; if (!aux_dev) return; @@ -1118,11 +1206,11 @@ void ubase_ctrlq_unregister_crq_event(struct auxiliary_device *aux_dev, udev = __ubase_get_udev_by_adev(aux_dev); crq_tab = &udev->ctrlq.crq_table; mutex_lock(&crq_tab->lock); - list_for_each_entry_safe(nbs, tmp, &crq_tab->crq_nbs.list, list) { - if (nbs->crq_nb.service_type == service_type && - nbs->crq_nb.opcode == opcode) { - list_del(&nbs->list); - kfree(nbs); + for (i = 0; i < crq_tab->crq_nb_cnt; i++) { + if (crq_tab->crq_nbs[i].service_type == service_type && + crq_tab->crq_nbs[i].opcode == opcode) { + crq_tab->crq_nbs[i].back = NULL; + crq_tab->crq_nbs[i].crq_handler = NULL; break; } } diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index c8ccd5bd107a..8d21ff16ef19 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -178,15 +178,11 @@ struct ubase_ctrlq_msg_ctx { struct completion done; }; -struct ubase_ctrlq_crq_event_nbs { - struct list_head list; - struct ubase_ctrlq_event_nb crq_nb; -}; - struct ubase_ctrlq_crq_table { struct mutex lock; unsigned long last_crq_scheduled; - struct ubase_ctrlq_crq_event_nbs crq_nbs; + u16 crq_nb_cnt; + struct ubase_ctrlq_event_nb *crq_nbs; }; struct ubase_ctrlq { diff --git a/include/ub/ubase/ubase_comm_ctrlq.h b/include/ub/ubase/ubase_comm_ctrlq.h index da3337573b61..b1ee65524052 100644 --- a/include/ub/ubase/ubase_comm_ctrlq.h +++ b/include/ub/ubase/ubase_comm_ctrlq.h @@ -28,16 +28,17 @@ enum ubase_ctrlq_ser_ver { }; enum ubase_ctrlq_ser_type { - UBASE_CTRLQ_SER_TYPE_TP_ACL = 0x01, - UBASE_CTRLQ_SER_TYPE_DEV_REGISTER = 0x02, - UBASE_CTRLQ_SER_TYPE_IP_ACL = 0x03, - UBASE_CTRLQ_SER_TYPE_QOS = 0x04, + UBASE_CTRLQ_SER_TYPE_TP_ACL = 0x01, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER = 0x02, + UBASE_CTRLQ_SER_TYPE_IP_ACL = 0x03, + UBASE_CTRLQ_SER_TYPE_QOS = 0x04, }; -enum ubase_ctrlq_opc_type { +enum ubase_ctrlq_opc_type_tp { UBASE_CTRLQ_OPC_CREATE_TP = 0x11, UBASE_CTRLQ_OPC_DESTROY_TP = 0x12, UBASE_CTRLQ_OPC_TP_FLUSH_DONE = 0x14, + UBASE_CTRLQ_OPC_CHECK_TP_ACTIVE = 0x15, }; enum ubase_ctrlq_opc_type_qos { @@ -45,14 +46,16 @@ enum ubase_ctrlq_opc_type_qos { UBASE_CTRLQ_OPC_QUERY_SL = 0x02, }; -enum ubase_ctrlq_opc_ip { - UBASE_CTRLQ_OPC_NOTIFY_IP = 0x01, - UBASE_CTRLQ_OPC_QUERY_IP = 0x02, +enum ubase_ctrlq_opc_type_ip { + UBASE_CTRLQ_OPC_NOTIFY_IP = 0x01, + UBASE_CTRLQ_OPC_QUERY_IP = 0x02, }; enum ubase_ctrlq_opc_type_dev_register { - UBASE_CTRLQ_OPC_CTRLQ_CTRL = 0x14, - UBASE_CTRLQ_OPC_UE_RESET_CTRL = 0x15, + UBASE_CTRLQ_OPC_UPDATE_SEID = 0x02, + UBASE_CTRLQ_OPC_NOTIFY_RES_RATIO = 0x13, + UBASE_CTRLQ_OPC_CTRLQ_CTRL = 0x14, + UBASE_CTRLQ_OPC_UE_RESET_CTRL = 0x15, }; struct ubase_ctrlq_msg { -- Gitee From ab244ec24ac8fd4e0e4753efd17bdd440489ecc4 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 13 Nov 2025 11:20:06 +0800 Subject: [PATCH 099/243] net: unic: Fix configure coal parameters without deactivate commit 300514e725420aa5309724a6f8e5f43845602ac3 openEuler When activating/deactivating, it is necessary to open/close the protocol stack with link. When configuring parameters, it is necessary to disable the network interface before making changes. Previously, flag was used to intercept and ensure that the two did not run concurrently. After analysis, the UNIC_ACTIVATE_FLAG check in the current code is redundant and can be removed. Fixes: e0ccc63cc72e ("net: unic: support querying and configuring coalesce parameters.") Signed-off-by: Zhenyu Wan Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_ethtool.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 960477451df5..f2cfa3df1126 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -276,12 +276,6 @@ static int unic_set_coalesce(struct net_device *netdev, struct unic_coalesce old_tx_coal, old_rx_coal; int ret, ret1; - if (test_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state)) { - unic_err(unic_dev, - "failed to set coalesce, due to dev deacitve.\n"); - return -EBUSY; - } - if (unic_resetting(netdev)) return -EBUSY; @@ -298,7 +292,6 @@ static int unic_set_coalesce(struct net_device *netdev, tx_coal->int_ql = cmd->tx_max_coalesced_frames; rx_coal->int_ql = cmd->rx_max_coalesced_frames; - unic_net_stop_no_link_change(netdev); unic_uninit_channels(unic_dev); ret = unic_init_channels(unic_dev, unic_dev->channels.num); @@ -307,18 +300,12 @@ static int unic_set_coalesce(struct net_device *netdev, memcpy(tx_coal, &old_tx_coal, sizeof(struct unic_coalesce)); memcpy(rx_coal, &old_rx_coal, sizeof(struct unic_coalesce)); ret1 = unic_init_channels(unic_dev, unic_dev->channels.num); - if (ret1) { + if (ret1) unic_err(unic_dev, "failed to recover old channels, ret = %d.\n", ret1); - return ret; - } } - ret1 = unic_net_open_no_link_change(netdev); - if (ret1) - unic_err(unic_dev, "failed to set net open, ret = %d.\n", ret1); - return ret; } -- Gitee From 1570158d0886d19d9788f45e40ebb8706f8f3ec8 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 13 Nov 2025 17:52:07 +0800 Subject: [PATCH 100/243] ub: ubase: Fix link status timestamp information commit f348a2d07557f2bd87d672f1c006d68436a4d179 openEuler In the current version, the timestamp recorded in the link status record changes with the system time because the TIME is obtained through the ktime_get_real_seconds function when printing the link status record. This patch fixes the issue by changing the value of TIME to the historical moment recorded in the stats array. Fixes: 58daddb6f308 ("net: unic: support subscribes to the RX stream stop and recovery interface.") Signed-off-by: Xuanyu Pu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 0a6dbdaffedc..f58ed6f7567a 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -222,7 +222,7 @@ static int unic_dbg_query_link_record(struct seq_file *s, void *data) total--; idx = total % LINK_STAT_MAX_IDX; seq_printf(s, "\t%-2d\t", cnt); - ubase_dbg_format_time(ktime_get_real_seconds(), s); + ubase_dbg_format_time(record->stats[idx].link_tv_sec, s); seq_printf(s, "\t%s\n", record->stats[idx].link_status ? "LINK UP" : "LINK DOWN"); cnt++; -- Gitee From fd494d8d4ae9b53611c8f1efbe337267582961f2 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Thu, 13 Nov 2025 15:08:53 +0800 Subject: [PATCH 101/243] net: unic: Fix lose vport_ctx and vport_buf information query in debugfs commit 0bc3e46d0f2d581c68f2825235a8ab10f92c71a9 openEuler Added vport_ctx and vport_buf queries in debugfs to facilitate viewing the related configuration information of pf. Fixes: 4420dfeed31d ("net: unic: Support to query and clear historical NIC link status information") Signed-off-by: Xiongchuan Zhou Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 73 ++++++++++++++++++++++ drivers/net/ub/unic/debugfs/unic_debugfs.h | 1 + drivers/net/ub/unic/unic_cmd.h | 8 +++ drivers/net/ub/unic/unic_hw.c | 23 +++++++ drivers/net/ub/unic/unic_hw.h | 2 + include/ub/ubase/ubase_comm_cmd.h | 1 + 6 files changed, 108 insertions(+) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index f58ed6f7567a..4c7bf3bb83fc 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -39,6 +39,61 @@ static int unic_dbg_dump_dev_info(struct seq_file *s, void *data) return 0; } +static int unic_dbg_dump_vport_buf(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + + seq_printf(s, "vport buffer num: %u\n", unic_dev->caps.vport_buf_num); + seq_printf(s, "vport buffer size: %u\n", unic_dev->caps.vport_buf_size); + return 0; +} + +static void unic_dbg_fill_vport_ctx_content(struct unic_vport_ctx_cmd *resp, + struct seq_file *s) +{ + u32 i, j; + + for (i = 0; i < UNIC_VORT_CTX_DATA_NUM; i += UNIC_VORT_CTX_DATA_ALIGN) { + seq_printf(s, "%08X: ", i * UNIC_VORT_CTX_DATA_ALIGN); + for (j = 0; j < UNIC_VORT_CTX_DATA_ALIGN; j++) { + if ((i + j) == UNIC_VORT_CTX_DATA_NUM) + break; + seq_printf(s, "%08X ", resp->data[i + j]); + } + seq_puts(s, "\n"); + } +} + +static int unic_dbg_query_vport_ctx(struct seq_file *s) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_vport_ctx_cmd resp; + u16 offset = 0; + int ret; + + do { + memset(&resp, 0, sizeof(resp)); + ret = unic_query_vport_ctx(unic_dev, offset, &resp); + if (ret) + return ret; + offset = resp.offset; + + unic_dbg_fill_vport_ctx_content(&resp, s); + } while (resp.offset); + + return 0; +} + +static int unic_dbg_dump_vport_ctx(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + + if (__unic_resetting(unic_dev)) + return -EBUSY; + + return unic_dbg_query_vport_ctx(s); +} + static const struct unic_dbg_cap_bit_info { const char *format; bool (*get_bit)(struct unic_dev *dev); @@ -265,6 +320,10 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { .name = "context", .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, .support = unic_dbg_dentry_support, + }, { + .name = "vport", + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .support = unic_dbg_dentry_support, }, { .name = "qos", .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, @@ -328,6 +387,20 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dev_info, + }, { + .name = "vport_buf", + .dentry_index = UNIC_DBG_DENTRY_VPORT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_vport_buf, + }, { + .name = "vport_ctx", + .dentry_index = UNIC_DBG_DENTRY_VPORT, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_vport_ctx, }, { .name = "caps_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 73a75b091b4b..853597b90f45 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -15,6 +15,7 @@ enum unic_dbg_dentry_type { UNIC_DBG_DENTRY_IP = 0, UNIC_DBG_DENTRY_CONTEXT, + UNIC_DBG_DENTRY_VPORT, UNIC_DBG_DENTRY_QOS, /* must be the last entry. */ UNIC_DBG_DENTRY_ROOT diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index 125802234e6b..ac571815be6a 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -109,6 +109,14 @@ struct unic_cfg_vport_buf_cmd { __le32 buf_addr[UNIC_MAX_VPORT_BUF_NUM * U32S_PER_U64]; }; +#define UNIC_VORT_CTX_DATA_NUM 13 +#define UNIC_VORT_CTX_DATA_ALIGN 4 +struct unic_vport_ctx_cmd { + u8 resv[2]; + __le16 offset; + __le32 data[UNIC_VORT_CTX_DATA_NUM]; +}; + struct unic_cfg_fec_cmd { __le32 fec_mode; u8 rsv[20]; diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index be606bfb6495..565ac56fb638 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -674,6 +674,29 @@ int unic_cfg_vport_buf(struct unic_dev *unic_dev, bool init) return ret; } +int unic_query_vport_ctx(struct unic_dev *unic_dev, u16 offset, + struct unic_vport_ctx_cmd *resp) +{ + struct unic_vport_ctx_cmd req; + struct ubase_cmd_buf in, out; + int ret; + + memset(&req, 0, sizeof(req)); + req.offset = cpu_to_le16(offset); + + ubase_fill_inout_buf(&in, UBASE_OPC_VPORT_CTX, true, + sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_VPORT_CTX, true, + sizeof(*resp), resp); + + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) + unic_err(unic_dev, + "failed to query vport ctx, offset = %u, ret = %d.\n", + offset, ret); + return ret; +} + int unic_set_fec_mode(struct unic_dev *unic_dev, u32 fec_mode) { struct unic_cfg_fec_cmd req = {0}; diff --git a/drivers/net/ub/unic/unic_hw.h b/drivers/net/ub/unic/unic_hw.h index cfd46b6eadf4..ba64d398e44b 100644 --- a/drivers/net/ub/unic/unic_hw.h +++ b/drivers/net/ub/unic/unic_hw.h @@ -95,6 +95,8 @@ int unic_set_promisc_mode(struct unic_dev *unic_dev, struct unic_promisc_en *promisc_en); int unic_cfg_vport_buf(struct unic_dev *unic_dev, bool init); +int unic_query_vport_ctx(struct unic_dev *unic_dev, u16 offset, + struct unic_vport_ctx_cmd *resp); int unic_set_fec_mode(struct unic_dev *unic_dev, u32 fec_mode); int unic_update_fec_stats(struct unic_dev *unic_dev); int unic_set_rss_tc_mode(struct unic_dev *unic_dev, u8 tc_vaild); diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 492b5e8513ea..b0f97fb6cba7 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -102,6 +102,7 @@ enum ubase_opcode_type { UBASE_OPC_NOTIFY_UE_RESET = 0xF006, UBASE_OPC_QUERY_UE_RST_RDY = 0xF007, UBASE_OPC_RESET_DONE = 0xF008, + UBASE_OPC_VPORT_CTX = 0xF009, UBASE_OPC_DESTROY_CTX_RESOURCE = 0xF00D, UBASE_OPC_UE2UE_UBASE = 0xF00E, UBASE_OPC_ACTIVATE_REQ = 0xF00F, -- Gitee From ba56a0c7da8911e63a596a95ab5aeac64abc4291 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 20 Nov 2025 11:29:44 +0800 Subject: [PATCH 102/243] ub: ubase: flush the work queue. commit b056b8147b7551de6c6a1652032f55d0b5f51a20 openEuler Fixed an Error issue when the ctrlq crq work queue accesses the ctrlq memory during the ELR reset. Wait until the work queue is complete in the suspend phase to ensure that no work queue is executed during the reset. Fixes: 727362c0978c ("ub: ubase: Support for ELR and entity reset.") Signed-off-by: ShiLong Signed-off-by: Jianqiang Li Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_dev.c | 8 ++++++++ drivers/ub/ubase/ubase_dev.h | 2 ++ drivers/ub/ubase/ubase_reset.c | 1 + 3 files changed, 11 insertions(+) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 7331afa03c80..9ea0d14a4d39 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -1317,6 +1317,14 @@ int ubase_deactivate_handler(struct ubase_dev *udev, u32 bus_ue_id) return ubase_send_activate_dev_req(udev, false, (u16)bus_ue_id); } +void ubase_flush_workqueue(struct ubase_dev *udev) +{ + flush_workqueue(udev->ubase_wq); + flush_workqueue(udev->ubase_async_wq); + flush_workqueue(udev->ubase_period_wq); + flush_workqueue(udev->ubase_arq_wq); +} + int ubase_activate_dev(struct auxiliary_device *adev) { struct ubase_dev *udev; diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index 8d21ff16ef19..45605409de6b 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -451,4 +451,6 @@ void ubase_virt_handler(struct ubase_dev *udev, u16 bus_ue_id, bool is_en); int ubase_activate_handler(struct ubase_dev *udev, u32 bus_ue_id); int ubase_deactivate_handler(struct ubase_dev *udev, u32 bus_ue_id); +void ubase_flush_workqueue(struct ubase_dev *udev); + #endif diff --git a/drivers/ub/ubase/ubase_reset.c b/drivers/ub/ubase/ubase_reset.c index bb1c281c02d3..7da51b021026 100644 --- a/drivers/ub/ubase/ubase_reset.c +++ b/drivers/ub/ubase/ubase_reset.c @@ -229,6 +229,7 @@ void ubase_suspend(struct ubase_dev *udev) ubase_ctrlq_disable_remote(udev); ubase_ctrlq_disable(udev); ubase_irq_table_free(udev); + ubase_flush_workqueue(udev); } void ubase_resume(struct ubase_dev *udev) -- Gitee From 99b1a6d0b34fd45527caa06d782b1c348ec61954 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 14 Nov 2025 15:55:34 +0800 Subject: [PATCH 103/243] ub: ubase: Fix the lose of HIMAC reset. commit 6006cbfe442fd607d4042a4d6d82f8239d93aae4 openEuler Added support for HIMAC reset, including state detection, reset command sending, and debug information output. The modifications include: Added HIMAC reset handling logic to trigger reset operations when HIMAC anomalies are detected. Included HIMAC reset count in debug information for tracking and analysis. Defined a new command code UBASE_OPC_HIMAC_RESET for sending HIMAC reset commands. These changes enable comprehensive support for HIMAC reset, covering state management, command execution, and debug output. Fixes: 727362c0978c ("ub: ubase: Support for ELR and entity reset.") Signed-off-by: Xu Wang Signed-off-by: Xiongchuan Zhou Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 1 + drivers/ub/ubase/ubase_err_handle.c | 20 ++++++++++++++++++++ include/ub/ubase/ubase_comm_cmd.h | 1 + 3 files changed, 22 insertions(+) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index bf49fc3fdc93..21fe4d1fa9b6 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -24,6 +24,7 @@ static int ubase_dbg_dump_rst_info(struct seq_file *s, void *data) seq_printf(s, "ELR reset count: %u\n", udev->reset_stat.elr_reset_cnt); seq_printf(s, "port reset count: %u\n", udev->reset_stat.port_reset_cnt); + seq_printf(s, "himac reset count: %u\n", udev->reset_stat.himac_reset_cnt); seq_printf(s, "reset done count: %u\n", udev->reset_stat.reset_done_cnt); seq_printf(s, "HW reset done count: %u\n", udev->reset_stat.hw_reset_done_cnt); seq_printf(s, "reset fail count: %u\n", udev->reset_stat.reset_fail_cnt); diff --git a/drivers/ub/ubase/ubase_err_handle.c b/drivers/ub/ubase/ubase_err_handle.c index d91ab0f80b6b..179e59acec0a 100644 --- a/drivers/ub/ubase/ubase_err_handle.c +++ b/drivers/ub/ubase/ubase_err_handle.c @@ -9,6 +9,19 @@ #include "ubase_reset.h" #include "ubase_err_handle.h" +static void ubase_notify_himac_reset(struct ubase_dev *udev) +{ + struct ubase_cmd_buf in; + int ret; + + __ubase_fill_inout_buf(&in, UBASE_OPC_HIMAC_RESET, false, 0, NULL); + + ret = __ubase_cmd_send_in(udev, &in); + if (ret) + ubase_err(udev, + "failed to send himac reset cmd, ret = %d.\n", ret); +} + void ubase_errhandle_service_task(struct ubase_delay_work *ubase_work) { struct ubase_dev *udev; @@ -23,6 +36,13 @@ void ubase_errhandle_service_task(struct ubase_delay_work *ubase_work) return; } + if (test_and_clear_bit(UBASE_STATE_HIMAC_RESETTING_B, &udev->state_bits)) { + ubase_err(udev, "ras occurred, ubase need to reset himac.\n"); + ubase_notify_himac_reset(udev); + udev->reset_stat.himac_reset_cnt++; + return; + } + if (test_and_clear_bit(UBASE_STATE_PORT_RESETTING_B, &udev->state_bits)) { ubase_err(udev, "ras occurred, ubase need to reset port.\n"); ubase_port_reset(udev); diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index b0f97fb6cba7..7ae969c897b9 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -87,6 +87,7 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_PORT_INFO = 0x6200, UBASE_OPC_QUERY_CHIP_INFO = 0x6201, UBASE_OPC_QUERY_FEC_STATS = 0x6202, + UBASE_OPC_HIMAC_RESET = 0x6302, /* Mailbox commands */ UBASE_OPC_POST_MB = 0x7000, -- Gitee From b8260e7d15f767dd99e1473ded7489651f4cbc19 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 20 Oct 2025 16:32:16 +0800 Subject: [PATCH 104/243] net: ubl: Add a doc for ubl module commit 5581b34fafcb3d4c4d1e98b6bf6a543ca47db91b openEuler Add a doc to describe the overview and API of ubl module in Documentation/networking/ub/ubl.rst. Signed-off-by: Yunsheng Lin Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/networking/index.rst | 1 + Documentation/networking/ub/index.rst | 14 ++++++ Documentation/networking/ub/ubl.rst | 69 +++++++++++++++++++++++++++ drivers/net/ub/dev/ubl.c | 17 ++++--- 4 files changed, 92 insertions(+), 9 deletions(-) create mode 100644 Documentation/networking/ub/index.rst create mode 100644 Documentation/networking/ub/ubl.rst diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 5b75c3f7a137..366e402b798d 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -114,6 +114,7 @@ Contents: tproxy tuntap udplite + ub/index vrf vxlan x25 diff --git a/Documentation/networking/ub/index.rst b/Documentation/networking/ub/index.rst new file mode 100644 index 000000000000..10ee2632f14b --- /dev/null +++ b/Documentation/networking/ub/index.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============================== +The UB Networking documentation +=============================== + +.. toctree:: + :maxdepth: 1 + + UB Link Layer diff --git a/Documentation/networking/ub/ubl.rst b/Documentation/networking/ub/ubl.rst new file mode 100644 index 000000000000..67abc16ba052 --- /dev/null +++ b/Documentation/networking/ub/ubl.rst @@ -0,0 +1,69 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============= +UB Link Layer +============= + +Overview +======== +The ubl module implements core UB (UnifiedBus) networking functionality that +serves as the foundation for all UB networking device drivers. This module +provides essential UB link protocol handling, device setup utilities, and +standard operations that are shared across different UB networking hardware +implementations. + +UB is a new interconnection protocol that defines its own Layer 2 protocol +when integrating into the networking stack for the Linux kernel, see more +in the UB spec . + +The format of a complete UB packet is as follows: +UB Link header (UBL HDR) includes UB LINK, CC and NPI. +UB Network header consists of CC, NPI, and traditional network packet headers. + +.. code-block:: none + + <-------- UBL HDR -----------> + +--------------+------+------+---------+----------+ + | UB LINK | CC | NPI | Network | Payload | + +--------------+------+------+---------+----------+ + <------ UB Network -----> + + UB LINK: Data link layer defined by UB protocol. + CC: Congestion Control. + NPI: Network Partition Identifier. + Network: Traditional L3 header, like IPv4, IPv6 or the network control header + defined in UB. + +What the ubl module sees is as follows, as the 'cfg' field is carried through +BD (Buffer Description) for hw to construct UB LINK, the 'sw_ctype' is used +in ubl module, which corresponds to the 'cfg' field defined in UB LINK, +indicating which kind of network packet is encapsulated. + +.. kernel-doc:: include/net/ub/ubl.h + :identifiers: ublhdr + +API interface +============= +Before registering `struct net_device` to the networking stack, a UB networking +driver is supposed to allocate and set up a `struct net_device` by calling +alloc_ubldev_mqs(). + +Before passing a skb to the driver for sending, networking stack will insert the +necessary UB link layer by calling ubl_create_header() through create ops in +struct header_ops. + +Also, the driver is supposed to call ubl_type_trans() to set up the skb +correctly when it receives a packet. + +.. kernel-doc:: drivers/net/ub/dev/ubl.c + :identifiers: alloc_ubldev_mqs ubl_create_header ubl_type_trans + +An example of using the above API is the unic driver, see more detail using in +:ref:`Documentation/networking/ub/unic.rst` + +Technical Discussion +==================== +If there is any technical question about UB link layer, please start a technical +discussion by sending a mail to . diff --git a/drivers/net/ub/dev/ubl.c b/drivers/net/ub/dev/ubl.c index b3637476b1fb..453e4a1c9ff5 100644 --- a/drivers/net/ub/dev/ubl.c +++ b/drivers/net/ub/dev/ubl.c @@ -116,7 +116,7 @@ static const struct header_ops ubl_header_ops ____cacheline_aligned = { }; /** - * ubl_setup - setup ub link network device + * ubl_setup - set up a ubl netdev * @dev: network device * * Fill in the fields of the device structure with ubl-generic values. @@ -138,18 +138,17 @@ void ubl_setup(struct net_device *dev) EXPORT_SYMBOL(ubl_setup); /** - * alloc_ubldev_mqs - Allocates and sets up a ub-link device + * alloc_ubldev_mqs - allocate and set up a ubl netdev * @sizeof_priv: Size of additional driver-private structure to be allocated - * for this ubl device + * for this ubl netdev * @txqs: The number of TX queues this device has. * @rxqs: The number of RX queues this device has. * * Fill in the fields of the device structure with ubl-generic * values. Basically done everything except registering the device. * - * Constructs a new net device, completing with a private data area of - * size (sizeof_priv). A 32-byte (not bit) alignment is enforced for - * this private data area. + * Allocate a new net device, with a private data area of size (sizeof_priv). + * A 32-byte alignment is enforced for this private data area. */ struct net_device *alloc_ubldev_mqs(int sizeof_priv, unsigned int txqs, @@ -161,13 +160,13 @@ struct net_device *alloc_ubldev_mqs(int sizeof_priv, unsigned int txqs, EXPORT_SYMBOL(alloc_ubldev_mqs); /** - * ubl_type_trans - obtains skb->protocol and adds sw_ptype to the packet + * ubl_type_trans - obtain packet type and insert sw_ptype to the packet header * @skb: buffer to alter * @dev: source device * @type: packet type * - * Obtains the packet type and translates it to skb->protocol and adds sw_ptype - * to the packet data. + * Obtain the packet type, translate it to skb->protocol and insert sw_ptype + * to the packet header. */ __be16 ubl_type_trans(struct sk_buff *skb, struct net_device *dev, u8 type) { -- Gitee From 3803dc291308565815b62017cd5af826b8fe96f8 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 20 Oct 2025 16:34:33 +0800 Subject: [PATCH 105/243] net: unic: Add a doc for unic driver commit 22011d4678c0cbd6a7e136a52e3f8da53d3bccc9 openEuler Add a doc to describe the overview, additional features and configurations of unic driver in: Documentation/networking/ub/unic.rst. Signed-off-by: Yunsheng Lin Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/networking/ub/index.rst | 1 + Documentation/networking/ub/unic.rst | 282 ++++++++++++++++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 Documentation/networking/ub/unic.rst diff --git a/Documentation/networking/ub/index.rst b/Documentation/networking/ub/index.rst index 10ee2632f14b..a494d7c04c01 100644 --- a/Documentation/networking/ub/index.rst +++ b/Documentation/networking/ub/index.rst @@ -12,3 +12,4 @@ The UB Networking documentation :maxdepth: 1 UB Link Layer + UNIC Driver diff --git a/Documentation/networking/ub/unic.rst b/Documentation/networking/ub/unic.rst new file mode 100644 index 000000000000..7ccba33a20f9 --- /dev/null +++ b/Documentation/networking/ub/unic.rst @@ -0,0 +1,282 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=========== +UNIC Driver +=========== + +Overview +======== +unic is a UB (UnifiedBus) networking driver based on ubase driver's auxiliary +device through auxiliary bus, supporting both ethernet and UB link layer. +See :ref:`Documentation/ub/ubase/ubase.rst` for more information about ubase +driver and :ref:`Documentation/networking/ub/ubl.rst` for more information about +UB link layer. + +.. code-block:: none + + +---------------------------------------------------------------+ + | networking stack | + +---------------------------------------------------------------+ + ^ ^ ^ + | | | + | | | + v | v + +----------------+ | +---------------+ + | Ethernet Layer | | | UB Link Layer | + +----------------+ | +---------------+ + ^ | ^ + | | | + | | | + v v v + +---------------------------------------------------------------+ + | | + | unic | + | | + | +------+ +-----+ +--------+ +---------+ +-------+ +---------+ | + | | main | | dev | | netdev | | ethtool | | dcbnl | | tx & rx | | + | +------+ +-----+ +--------+ +---------+ +-------+ +---------+ | + | +---------+ +-----------+ +-----+ +---------+ +-------+ | + | | channel | | comm_addr | | crq | | rack_ip | | reset | | + | +---------+ +-----------+ +-----+ +---------+ +-------+. | + | +----+ +--------+ +-------+ +-------+ +------+ | + | | hw | | qos_hw | | event | | stats | | guid | | + | +----+ +--------+ +-------+ +-------+ +------+ | + +---------------------------------------------------------------+ + ^ ^ + | | + | | + v | + +-------------------------------+ | + | auxiliary_bus | | + +-------------------------------+ | + ^ | + | | + | | + v v + +---------------------------------------------------------------+ + | ubase | + +---------------------------------------------------------------+ + +The main submodules in unic driver: + +:: + + main + implement module_init(), module_exit() and 'struct auxiliary_driver' for + the auxiliary device of ubase driver. + + dev + implement init & uninit function and periodic task handling for unic's + netdev. + + netdev + implement 'struct net_device_ops' for unic's netdev. + + ethtool + implement 'struct ethtool_ops' for unic's netdev. + + dcbnl + implement 'dcbnl_rtnl_ops' for unic's netdev. + + tx & rx + implement packet send and receive handling. + + channel + implement channel handling for unic's netdev. + + comm_addr & rack_ip + implement the ip address handling in UB mode. + + reset + implement the entity reset handling. + + crq + implement link status change handling through ctrl (Control) receive queue. + + hw & qos_hw + implement generic hw and qos related configuration access function. + + stats + implement hw statistics collecting funciton. + + event + implement asynchronous event reporting interface. + + guid + implement the GUI (Globally Unique Identifier) querying in UB mode. + +Hardware Supported +================== + +This driver is compatible with below UB devices: + +.. code-block:: none + + +--------------+--------------+ + | Vendor ID | Device ID | + +==============+==============+ + | 0xCC08 | 0xA001 | + +--------------+--------------+ + | 0xCC08 | 0xD802 | + +--------------+--------------+ + | 0xCC08 | 0xD80B | + +--------------+--------------+ + +Note 'lsub' from ubutils package can be used to tell if the above device is +available in the system, see : + +:: + + # lsub + <00009> UB network controller <0002>: Huawei Technologies Co., Ltd. URMA management ub entity : + +Additional Features and Configurations +====================================== + +UB Link +------- +UB Link is a link layer defined by UB, which has the same layer of the existing +ethernet, and firmware will report the mode of the hardware port to the driver +through hardware capability reporting, UB_Link or ETH_MAC. + +In UB mode, the link layer is UB and its L2 address is the GUID as below +example: + +:: + + # ip -s addr + 1: ublc0d0e2: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 + link/ub cc:08:d8:02:d2:0a:00:00:00:00:00:00:00:00:00:01 peer 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 + +Note: port speed auto-negotiation is not supported in UB mode. + + +IP Address Configuration +------------------------ +IP address configuration must be performed by the management software, after +receiving the IP address configuration through crq event, the driver will +update the IP address configuration to networking stack using the netlink API. + +ELR support +----------- +ELR (Entity Level Reset) is the error recovery defined in UB, which can be +triggered by packet transmiting timeout, see unic_tx_timeout() or using the +below cmd to trigger ELR manually: + +:: + + # ethtool --reset dedicated + # echo 1 > /sys/class/net//device/reset + +Debugging +========= + +module parameters +----------------- +Enable more verbose unic driver specific debug message log by setting **debug** +to non-zero, and enable network interface debug message log by configuring +**netif_debug**, for example: + +:: + + # insmod unic.ko debug=1 netif_debug=0xFFFFF + +Debugfs Interface +----------------- +When CONFIG_DEBUG_FS is enabed, below debug info is accessible through +/sys/kernel/debug/ubase/$entity_num/unic/: + +.. code-block:: none + + ├── clear_link_status_record: clear the link status record by reading + ├── link_status_record: show the link status record debug info + ├── promisc_cfg_hw: show the promisc configuration in hardware + ├── rss_cfg_hw: show the rss configuration in hardware + ├── page_pool_info: show the rx page_pool buffer debug info + ├── caps_info: show the capability debug info + ├── dev_info: show the device info, such as max MTU + ├── qos/: show the qos related debug info + ├── vport/: show the UE (UB Entity) debug info of MUE (Management UB Entity) + ├── context/: show object context debug info, such as JFS (Jetty For Send) + ├── ip_tbl/: show the IP address configuration debug info + +Note, the bus-info in the output of below cmd can be used to query the entity +number for a unic driver's netdev, which has an entity number of "00002" as +below example: + +:: + + # ethtool -i + driver: unic + version: 1.0 + firmware-version: 1.0 + expansion-rom-version: + bus-info: 00002 + supports-statistics: yes + supports-test: yes + supports-eeprom-access: no + supports-register-dump: yes + supports-priv-flags: no + +Register Dumping +---------------- +Dump the hardware registers and report the dumpping log through vendor's support +channel using below cmd: + +:: + + # ethtool -d + +Performance tuning +================== +For different workload, the interrupt for the driver may have different cpu +pinnig policy, the below cmd can be used to set cpu pinnig policy for unic +driver's ceq (Completion Event Queue) interrupt, which is used to notify +the driver about the arrival of rx packet and completion of tx packet: + +:: + + # irq_num_list=$(cat /proc/interrupts | grep "ubase$entity_num" | grep ceq) + # echo $cpu_num > /proc/irq/$irq_num/smp_affinity_list + +CPU Intensive Workload +---------------------- +It is recommended to pin different cores to unic driver's interrupt and service +process, adjust interrupt coalesce parameters appropriately to limit interrupts +for lower CPU utilization: + +:: + + # ethtool -C rx-usecs XXX tx-usecs XXX + +Note, the **max_int_gl** in '/sys/kernel/debug/ubase/$entity_num/unic/caps_info' +is the max value of coalesce parameter. + +Latency-sensitive Workload +-------------------------- +It is recommended to pin the same core to unic driver's interrupt and service +process, disable unic driver's interrupt coalesce feature to ensure that +interrupt is triggered as soon as possible: + +:: + + # ethtool -C rx-usecs 0 tx-usecs 0 + +Manage Software +=============== +There is a manage software for UB, QOS & object context & IP address which are +related to unic driver depend on the configuration from that manage software, +refer to below debugfs for more detail on the configuration: + +:: + + QOS: /sys/kernel/debug/ubase/$entity_num/unic/qos/ + Object Context: /sys/kernel/debug/ubase/$entity_num/unic/context + IP address: /sys/kernel/debug/ubase/$entity_num/unic/ip_tbl/ip_tbl_list + +Support +======= +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. -- Gitee From d8940b01e313c8b7615fb21ee20c24451a158112 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 29 Oct 2025 16:41:06 +0800 Subject: [PATCH 106/243] ub: ubase: Add ubase opensource document commit d19422bfe49bd504f56a19bcece9e8e7a7ce4de0 openEuler Add ubase opensource document to introduce ubase related functions and capabilities for users in opensource society. Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/subsystem-apis.rst | 1 + Documentation/ub/index.rst | 13 ++ Documentation/ub/ubase/index.rst | 14 ++ Documentation/ub/ubase/ubase.rst | 259 +++++++++++++++++++++++++++++++ 4 files changed, 287 insertions(+) create mode 100644 Documentation/ub/index.rst create mode 100644 Documentation/ub/ubase/index.rst create mode 100644 Documentation/ub/ubase/ubase.rst diff --git a/Documentation/subsystem-apis.rst b/Documentation/subsystem-apis.rst index 90a0535a932a..e2b286b79701 100644 --- a/Documentation/subsystem-apis.rst +++ b/Documentation/subsystem-apis.rst @@ -86,3 +86,4 @@ Storage interfaces misc-devices/index peci/index wmi/index + ub/index diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst new file mode 100644 index 000000000000..34fd8d871f19 --- /dev/null +++ b/Documentation/ub/index.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +===================== +UnifiedBus Subsystem +===================== + +.. toctree:: + :maxdepth: 2 + + ubase/index \ No newline at end of file diff --git a/Documentation/ub/ubase/index.rst b/Documentation/ub/ubase/index.rst new file mode 100644 index 000000000000..5fcc9347e1e9 --- /dev/null +++ b/Documentation/ub/ubase/index.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============= +UBASE Driver +============= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ubase \ No newline at end of file diff --git a/Documentation/ub/ubase/ubase.rst b/Documentation/ub/ubase/ubase.rst new file mode 100644 index 000000000000..b4cb90820a53 --- /dev/null +++ b/Documentation/ub/ubase/ubase.rst @@ -0,0 +1,259 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============================== +UNIFIEDBUS BASE DRIVER (UBASE) +============================== + +UB is a new interconnection protocol and architecture designed for computing +systems, see more on the UB spec . + +This document will introduce the composition of the UBASE Driver and how to +write a UB auxiliary device driver under the Auxiliary Bus framework of UBASE, +also include related debugging information. + +Overview +======== + +UBASE driver is one of the base driver for UB network, based on UB hardware +interface, providing public resource management and abstraction of common +interfaces for the upper layer UB device drivers such as unic, udma, cdma, fwctl, +pmu and uvb, which are auxiliary devcie drivers. UBASE driver also offers +device-driver matching interfaces for the upper-layer drivers based on auxiliary +bus, isolating different auxiliary device drivers, like udma driver using urma +core and unic driver with TCP/IP stack. And ubase has the ability of extending +auxiliary device list to transfer the UB hardware for further use and richer +features. + +UBASE includes the functionalities of ubus device management, resource management, +auxiliary device management, entity management, query the specific capabilities +of the device and so on. It's also the base of other auxiliary device drivers +which must be loaded before them. + +.. code-block:: none + + +----------+ +----------+ +----------+ +---------+ +-----------+ +---------+ + | unic | | udma | | cdma | | pmu | | ubctl | | uvb | + +----------+ +----------+ +----------+ +---------+ +-----------+ +---------+ + ^ ^ ^ ^ ^ ^ + | | | | | | + v v v v v v + +--------------------------------------------------------------------------+ + | auxiliary bus | + +--------------------------------------------------------------------------+ + ^ + | + v + +--------------------------------------------------------------------------+ + | ubase | + | +-----+ +------+ +---------+ +-------+ +----+ +-----+ +-----+ +------+ | + | | dev | | main | | debugfs | | ctrlq | | eq | | arq | | ras | | ubus | | + | +-----+ +------+ +---------+ +-------+ +----+ +-----+ +-----+ +------+ | + | +-----+ +----+ +---------+ +------+ +-----+ +-------+ +----+ +-------+ | + | | cmd | | hw | | mailbox | | pmem | | qos | | reset | | tp | | stats | | + | +-----+ +----+ +---------+ +------+ +-----+ +-------+ +----+ +-------+ | + +--------------------------------------------------------------------------+ + ^ ^ ^ + | | | + | v v + | +------------------+ +------------------+ + | | ubus | | ummu | + | +------------------+ +------------------+ + | ^ ^ + | | | + v v v + +--------------------------------------------------------------------------+ + | firmware | + +--------------------------------------------------------------------------+ + +Below is the summary for the submodules in ubase driver: + + - 1) main: implement module_init/exit(). + - 2) dev: implement auxiliary bus init/uninit function, resource creating and + auxiliary device enable/disable. + - 3) cmd: implement 'command queue' to interact with firmware. + - 4) ctrlq: implement 'control queue' to interact with management software. + - 5) mailbox: implement 'mailbox' configuration to interact with hardware + through `cmdq`. + - 6) hw: implement interaction with firmware and hardware for functions. + - 7) reset: implement hardware reset handling for ubase driver. + - 8) tp: implement tp layer context BA and context creation. + - 9) debugfs: implement kernel debugfs to obtain debugging information. + - 10) qos: implement quality of service for upper communication modules. + - 11) ras: implement hardware error handler. + - 12) ubus: implement interaction with module `ubus`. + - 13) eq: event queue including asynchronous and completion event. + +Supported Hardware +================== + +UBUS vendor/device pairs: + +========= =========== ====================================== +Vendor ID Device ID Description +========= =========== ====================================== +0xCC08 0xA001 Kunpeng URMA MUE (Management UB Entity) +0xCC08 0xA002 Kunpeng URMA UE (UB Entity) +0xCC08 0xA003 Kunpeng CDMA MUE (Management UB Entity) +0xCC08 0xA004 Kunpeng CDMA UE (UB Entity) +0xCC08 0xA005 Kunpeng PMU MUE (Management UB Entity) +0xCC08 0xA006 Kunpeng PMU UE (UB Entity) +0xCC08 0xD802 Ascend URMA MUE (Management UB Entity) +0xCC08 0xD803 Ascend URMA UE (UB Entity) +0xCC08 0xD804 Ascend CDMA MUE (Management UB Entity) +0xCC08 0xD805 Ascend CDMA UE (UB Entity) +0xCC08 0xD806 Ascend PMU MUE (Management UB Entity) +0xCC08 0xD807 Ascend PMU UE (UB Entity) +0xCC08 0xD80B Ascend UBOE MUE (Management UB Entity) +0xCC08 0xD80C Ascend UBOE UE (UB Entity) +========= =========== ====================================== + +Supported Auxiliary device +========================== + +UB Auxiliary bus device/driver pairs: + +========= ==== ==== ==== ===== === === +Device ID unic udma cdma fwctl pmu uvb +========= ==== ==== ==== ===== === === +0xA001 O O X X X O +0xA002 X O X X X O +0xA003 X X O X X X +0xA004 X X O X X X +0xA005 X X X O O X +0xA006 X X X O O X +0xD802 O O X X X O +0xD803 X O X X X O +0xD804 X X O X X X +0xD805 X X O X X X +0xD806 X X X O O X +0xD807 X X X O O X +0xD80B O O X X X O +0xD80C X O X X X O +========= ==== ==== ==== ===== === === + +If anyone wants to support a new auxiliary device driver based on ubase, after +adding an specific device id matched with vendor id, extending the driver +list is necessary as follows:: + enum ubase_drv_type { + UBASE_DRV_UNIC, + UBASE_DRV_UDMA, + UBASE_DRV_CDMA, + UBASE_DRV_FWCTL, + UBASE_DRV_PMU, + UBASE_DRV_UVB, + UBASE_DRV_MAX, + }; + +Next, `struct ubase_adev_device` is supposed to be extended by the new device +driver with its name filled in `suffix` and supported capabilities function +hooking up to the handling named `is_supported`. Following is an example driver +`unic` in ``ubase_dev.c``:: + static struct ubase_adev_device { + const char *suffix; + bool (*is_supported)(struct ubase_dev *dev); + } ubase_adev_devices[UBASE_DRV_MAX] = { + [UBASE_DRV_UNIC] = { + .suffix = "unic", + .is_supported = &ubase_dev_unic_supported + }, + }; + +Then the new driver can fulfill `struct auxiliary_driver` ops allowing auxiliary +bus transfer handling `probe` to initialize the new driver and handling `remove` +to uninitialize it. + +Module parameters +================= +UBASE driver includes one module parameter for now as `debug`. The default +parameter can support full function of ubase and related drivers, but in some +special scene like locating problems, debug information is necessary. + +debug + +This parameter controls the print level of ubase driver, preventing printing +debug information like `UBUS ELR start` to locate the position of driver running, +which may be helpful when doing problem identification to clarify the line of +code where the problem occurs. + +This parameter is not supposed to set in loading driver but changed in a system +configuration file created by ubase, which set to be `0` means disable in default +as not showing all debug information. If the user wants to enable the debug printing, +the file `/sys/module/ubase/parameters/debug` can be set to an integer value +`except 0` as enable like `1` through the command `echo`, following line shows:: + echo 1 > /sys/module/ubase/parameters/debug + +Or set the insmod parameter `debug` to value `1`, like the following: + +.. code-block:: none + + insmod ubase.ko debug=1 + +Debugging +========= +UBASE driver supports to obtain debug related information for users through +`debug filesystem` set by Linux kernel, which helps a lot for problem locating +and quick overview of ubase driver. The ubase debugfs includes +`context querying in software and hardware`, `reset information`, +`capabilities information`, `activate record`, `qos information`, +`prealloc memory information`. + +Through debugfs interfaces when `CONFIG_DEBUG_FS` is enabled, the users can obtain +these information from the directory in system:: + /sys/kernel/debug/ubase// + +1) context querying + + UBASE driver supports to query the context created in ubase for all auxiliary + device drivers, including aeq, ceq, tp and tpg context stored in both software + and hardware to verify whether configuration satisfy using demand. Note, the + context ended with `hw` means hardware, like `aeq_context_hw`, and another one + without `hw` means that stored in software, such as `aeq_context`. + +2) reset information + + UBASE driver supports to query all kinds of reset implementation counts, + including ELR reset, port reset, himac reset, total finish count in software + and hardware, and failed count. + +3) capabilities information + + UBASE driver supports to query the capabilities information in self device, + which can be used for upper-layer drivers, and also resource size for creating. + +4) activate record + + UBASE driver supports to query activate record about the hardware, including + activate and deactivate counts and the exact time of these actions. + +5) qos information + + UBASE driver supports to query quality of service(qos) information configured + in hardware, about configuration set by ets, tm and `management software`, + including `tc &tc group`, `rqmt table`, `mapping in vl, sl and dscp`, + `tm port`, `tm priority`, `qset and queue information` and so on. + +6) prealloc memory information + + UBASE driver supports to query the hulk pages allocated by ubase for both + common use and udma use. + +Functionality dependencies +========================== +Some functions in ubase driver rely on configuration of `management software` as +the manager, the following shows dependencies of ubase: + - 1) Qos configuration: `management software` take the responsibility of + `entity creation and distribute TQMS queue` and `mapping from sl to vl`. + - 2) TP context: `management software` take the response of creating TP layer + context for common use, including tp context basic address (BA), + tp group(tpg) context BA, TP extdb buff, TP timer buff, CC context BA, + Dest address BA, Seid_upi BA, TPM BA and so on. + - 3) Reset: The reset process needs collaborative cooperation between ubase + driver and `management software`, including stop flow, resume flow, + reconstruct TP layer context and so on. + +Support +======= +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. \ No newline at end of file -- Gitee From fd886d2146de178128a5a89324d7b6a5489115ee Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Fri, 21 Nov 2025 15:56:01 +0800 Subject: [PATCH 107/243] ub: ubase: Introduces the functions and data structures exposed by the ubase driver commit c7286df8de9606db5221782319ac1a0e103f9524 openEuler This patch adds comments for the functions and data structures exposed by the ubase driver, and includes a documentation file /ub/ubase.rst under the document /driver-api path in the open-source community to help users use them correctly during development. Signed-off-by: Haiqing Fang Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/driver-api/index.rst | 1 + Documentation/driver-api/ub/index.rst | 16 ++ Documentation/driver-api/ub/ubase.rst | 67 ++++++ drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c | 13 ++ drivers/ub/ubase/debugfs/ubase_debugfs.c | 57 ++++++ drivers/ub/ubase/ubase_cmd.c | 98 ++++++++- drivers/ub/ubase/ubase_ctrlq.c | 46 +++++ drivers/ub/ubase/ubase_dev.c | 204 +++++++++++++++++++ drivers/ub/ubase/ubase_eq.c | 46 +++++ drivers/ub/ubase/ubase_hw.c | 16 ++ drivers/ub/ubase/ubase_mailbox.c | 29 +++ drivers/ub/ubase/ubase_pmem.c | 9 + drivers/ub/ubase/ubase_qos_hw.c | 78 +++++++ drivers/ub/ubase/ubase_reset.c | 9 + drivers/ub/ubase/ubase_stats.c | 11 + include/ub/ubase/ubase_comm_cmd.h | 31 +++ include/ub/ubase/ubase_comm_ctrlq.h | 23 ++- include/ub/ubase/ubase_comm_debugfs.h | 28 ++- include/ub/ubase/ubase_comm_dev.h | 146 ++++++++++++- include/ub/ubase/ubase_comm_eq.h | 28 ++- include/ub/ubase/ubase_comm_hw.h | 40 ++++ include/ub/ubase/ubase_comm_mbx.h | 24 ++- include/ub/ubase/ubase_comm_qos.h | 7 + include/ub/ubase/ubase_comm_stats.h | 11 + 24 files changed, 1023 insertions(+), 15 deletions(-) create mode 100644 Documentation/driver-api/ub/index.rst create mode 100644 Documentation/driver-api/ub/ubase.rst diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index f0f8f521f65b..f2c5d67d47e9 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -115,6 +115,7 @@ available subsections can be seen below. hte/index wmi crypto/index + ub/index .. only:: subproject and html diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst new file mode 100644 index 000000000000..85be3e89ea81 --- /dev/null +++ b/Documentation/driver-api/ub/index.rst @@ -0,0 +1,16 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============================================= +The Linux UnifiedBus implementer's API guide +============================================= + +.. class:: toc-title + + Table of contents + +.. toctree:: + :maxdepth: 1 + + ubase diff --git a/Documentation/driver-api/ub/ubase.rst b/Documentation/driver-api/ub/ubase.rst new file mode 100644 index 000000000000..bb43d31d1949 --- /dev/null +++ b/Documentation/driver-api/ub/ubase.rst @@ -0,0 +1,67 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +UBASE Driver Support Library +----------------------------- + +.. kernel-doc:: include/ub/ubase/ubase_comm_cmd.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_ctrlq.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_debugfs.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_dev.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_eq.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_hw.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_mbx.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_qos.h + :functions: + +.. kernel-doc:: include/ub/ubase/ubase_comm_stats.h + :functions: + +.. kernel-doc:: drivers/ub/ubase/ubase_cmd.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_ctrlq.c + :export: + +.. kernel-doc:: drivers/ub/ubase/debugfs/ubase_debugfs.c + :export: + +.. kernel-doc:: drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_dev.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_pmem.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_eq.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_mailbox.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_qos_hw.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_stats.c + :export: + +.. kernel-doc:: drivers/ub/ubase/ubase_hw.c + :export: diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c index 33221a90edd9..63f27093aec1 100644 --- a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c @@ -252,6 +252,19 @@ static void __ubase_print_context_hw(struct seq_file *s, void *ctx_addr, } } +/** + * ubase_print_context_hw() - formatted the context output to seq file + * @s: seq_file + * @ctx_addr: context address + * @ctx_len: context length + * + * This function outputs the contents of `ctx_addr` to a seq_file according to + * the specified format. + * Each line in the file is 32 bits, and the number of lines is `ctx_len / sizeof(u32)`. + * If `ctx_len` is not an integer multiple of 4, there will be truncation at the end. + * + * Context: Any context. + */ void ubase_print_context_hw(struct seq_file *s, void *ctx_addr, u32 ctx_len) { if (!s || !ctx_addr) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 21fe4d1fa9b6..9476cf17fa01 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -399,6 +399,18 @@ static bool __ubase_dbg_dentry_support(struct device *dev, u32 property) return false; } +/** + * ubase_dbg_dentry_support() - determine whether to create debugfs dentries and debugfs cmd files + * @adev: auxiliary device + * @property: property of debugfs dentry or debufs cmd file + * + * The function is used in the 'support' functions of 'struct ubase_dbg_cmd_info' + * and 'struct ubase_dbg_cmd_info‘ to determine whether to create debugfs dentries + * and debugfs cmd files. + * + * Context: Any context. + * Return: true or false + */ bool ubase_dbg_dentry_support(struct auxiliary_device *adev, u32 property) { if (!adev) @@ -426,6 +438,19 @@ static int __ubase_dbg_seq_file_init(struct device *dev, return 0; } +/** + * ubase_dbg_seq_file_init() - ubase init debugfs cmd file + * @dev: the device + * @dirs: ubase debugfs dentry information + * @dbgfs: ubase debugfs data structure + * @idx: index of dirs + * + * This function is used in the 'init' function within 'struct ubase_dbg_cmd_info' + * to create a ubase debugfs cmd file. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ int ubase_dbg_seq_file_init(struct device *dev, struct ubase_dbg_dentry_info *dirs, struct ubase_dbgfs *dbgfs, u32 idx) @@ -714,6 +739,18 @@ static int ubase_dbg_create_file(struct device *dev, struct ubase_dbgfs *dbgfs, return 0; } +/** + * ubase_dbg_create_dentry() - ubase debugfs create dentry + * @dev: the device + * @dbgfs: ubase debugfs data structure + * @dirs: ubase debugfs dentry information + * @root_idx: index of the root dentry in dirs, and the root dentry must be the last one in the path + * + * This function is used to create a ubase debugfs cmd file. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ int ubase_dbg_create_dentry(struct device *dev, struct ubase_dbgfs *dbgfs, struct ubase_dbg_dentry_info *dirs, u32 root_idx) { @@ -794,6 +831,15 @@ void ubase_dbg_unregister_debugfs(void) debugfs_remove_recursive(ubase_dbgfs_root); } +/** + * ubase_diag_debugfs_root() - get ubase debugfs root dentry + * @adev: auxiliary device + * + * This function is used to get ubase debugfs root dentry. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct dentry + */ struct dentry *ubase_diag_debugfs_root(struct auxiliary_device *adev) { if (!adev) @@ -803,6 +849,17 @@ struct dentry *ubase_diag_debugfs_root(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_diag_debugfs_root); +/** + * ubase_dbg_format_time() - formatted the time output to seq file + * @time: time value + * @s: seq_file + * + * The function outputs the time in the format of + * 'week month day hour:minute:second year' to seq_file. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ int ubase_dbg_format_time(time64_t time, struct seq_file *s) { #define YEAR_OFFSET 1900 diff --git a/drivers/ub/ubase/ubase_cmd.c b/drivers/ub/ubase/ubase_cmd.c index 4eedb6eff530..ab554343136f 100644 --- a/drivers/ub/ubase/ubase_cmd.c +++ b/drivers/ub/ubase/ubase_cmd.c @@ -777,9 +777,21 @@ int __ubase_cmd_send_in(struct ubase_dev *udev, struct ubase_cmd_buf *in) } /** - * When uninstalling, cmdq needs to be successfully sended as much as possible, - * but the cmd may be disabled during reset, this interface attempts to send cmd - * when it is enabled. + * ubase_cmd_send_inout_ex() - query(and write) cmd extension function + * @aux_dev: auxiliary device + * @in: the intput cmd buff + * @out: the output cmd buff + * @time_out: timeout duration, unit: ms + * + * When the timeout parameter is set to 0, this function behaves the same as + * 'ubase_cmd_send_inout'. When the timeout parameter is not 0, if the cmdq + * channel is disabled and it recovers within the timeout period, the cmdq can + * still process commands normally. + * This function is applicable to scenarios such as concurrent resets, where the + * cmdq channel is first set to be disabled and then restored to normal operation. + * + * Context: Process context. Takes and releases , BH-safe. May sleep. + * Return: 0 on success, negative error code otherwise */ int ubase_cmd_send_inout_ex(struct auxiliary_device *aux_dev, struct ubase_cmd_buf *in, struct ubase_cmd_buf *out, @@ -812,6 +824,22 @@ int ubase_cmd_send_inout_ex(struct auxiliary_device *aux_dev, } EXPORT_SYMBOL(ubase_cmd_send_inout_ex); +/** + * ubase_cmd_send_inout() - query(and write) cmd function + * @aux_dev: auxiliary device + * @in: the intput cmd buff + * @out: the output cmd buff + * + * The firmware determines the processing behavior based on 'in->opcode'. + * 'in->data_size' and 'in->data' represent the length of valid data and the + * address where the data is stored for interaction with the firmware. + * 'in->is_read' determines whether to read the query results. + * 'out->data_size' and 'out->data' represent the length of valid data and the + * address where the data is stored for the reading the query results. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_cmd_send_inout(struct auxiliary_device *aux_dev, struct ubase_cmd_buf *in, struct ubase_cmd_buf *out) @@ -823,6 +851,22 @@ int ubase_cmd_send_inout(struct auxiliary_device *aux_dev, } EXPORT_SYMBOL(ubase_cmd_send_inout); +/** + * ubase_cmd_send_in_ex() - write cmd extension function + * @aux_dev: auxiliary device + * @in: the intput cmd buff + * @time_out: timeout duration, unit: ms + * + * When the timeout parameter is set to 0, this function behaves the same as + * 'ubase_cmd_send_in'. When the timeout parameter is not 0, if the cmdq + * channel is disabled and it recovers within the timeout period, the cmdq can + * still process commands normally. + * This function is applicable to scenarios such as concurrent resets, where the + * cmdq channel is first set to disabled and then restored to normal operation. + * + * Context: Process context. Takes and releases , BH-safe. May sleep. + * Return: 0 on success, negative error code otherwise + */ int ubase_cmd_send_in_ex(struct auxiliary_device *aux_dev, struct ubase_cmd_buf *in, u32 time_out) { @@ -837,6 +881,19 @@ int ubase_cmd_send_in_ex(struct auxiliary_device *aux_dev, } EXPORT_SYMBOL(ubase_cmd_send_in_ex); +/** + * ubase_cmd_send_in() - write cmd function + * @aux_dev: auxiliary device + * @in: the intput cmd buff + * + * This function is only used for writing cmdq opcodes. 'in->is_read' must be + * false. The firmware determines the processing behavior based on 'in->opcode'. + * 'in->data_size' and 'in->data' represent the length of valid data and the + * address where the data is stored for interaction with the firmware. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_cmd_send_in(struct auxiliary_device *aux_dev, struct ubase_cmd_buf *in) { @@ -878,6 +935,19 @@ static int __ubase_cmd_get_data_size(struct ubase_dev *udev, u16 opcode, return 0; } +/** + * ubase_cmd_get_data_size() - obtain the valid data length from cmdq opcode + * @aux_dev: auxiliary device + * @opcode: cmdq opcode + * @data_size: Save the valid data length of cmdq opcode + * + * For each opcode, the firmware has a corresponding valid data length. + * This function queries the firmware to obtain the valid data length + * corresponding to the opcode. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_cmd_get_data_size(struct auxiliary_device *aux_dev, u16 opcode, u16 *data_size) { @@ -925,6 +995,18 @@ int __ubase_register_crq_event(struct ubase_dev *udev, return ret; } +/** + * ubase_register_crq_event() - register crq event processing function + * @aux_dev: auxiliary device + * @nb: the crq event notification block + * + * Register the crq handler function. When the firmware reports a crq event, + * if the opcode reported by the firmware matches the registered 'nb->opcode', + * the 'nb->crq_handler' function will be called to process it. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ int ubase_register_crq_event(struct auxiliary_device *aux_dev, struct ubase_crq_event_nb *nb) { @@ -955,6 +1037,16 @@ void __ubase_unregister_crq_event(struct ubase_dev *udev, u16 opcode) mutex_unlock(&crq_table->lock); } +/** + * ubase_unregister_crq_event() - unregister crq event processing function + * @aux_dev: auxiliary device + * @opcode: cmdq crq opcode + * + * Unregisters the crq processing function. This function is called when user + * no longer want to handle the crq opcode events reported by the firmware. + * + * Context: Any context. + */ void ubase_unregister_crq_event(struct auxiliary_device *aux_dev, u16 opcode) { struct ubase_dev *udev; diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 9351ed2c70fd..5a0332667885 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -824,6 +824,28 @@ int __ubase_ctrlq_send(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg, return ret; } +/** + * ubase_ctrlq_send_msg() - ctrlq message send function + * @aux_dev: auxiliary device + * @msg: the message to be sent + * + * The driver uses this function to send a ctrlq message to the management software. + * The management software determines the module responsible for processing the message + * based on 'msg->service_ver', 'msg->service_type', and 'msg->opcode'; + * it also retrieves the length and content of the data to be sent from + * 'msg->in_size' and 'msg->in'. + * When 'msg->is_resp' is set to 1, it indicates that the message is a response + * to a ctrlq message from the management software. 'msg->resp_seq' and 'msg->resp_ret' + * represent the sequence number and processing result of the ctrlq message from + * the management software. + * When 'msg->need_resp' is set to 1, it indicates that the management software needs + * to respond to the driver's ctrlq message. If 'msg->out_size' is not zero and + * 'msg->out' is not empty, this function will wait synchronously for the management + * software's response, and the response information will be stored in 'msg->out'. + * + * Context: Process context. Takes and releases , BH-safe. May sleep. + * Return: 0 on success, negative error code otherwise + */ int ubase_ctrlq_send_msg(struct auxiliary_device *aux_dev, struct ubase_ctrlq_msg *msg) { @@ -1159,6 +1181,19 @@ void ubase_ctrlq_clean_service_task(struct ubase_delay_work *ubase_work) spin_unlock_bh(&csq->lock); } + +/** + * ubase_ctrlq_register_crq_event() - register ctrlq crq event processing function + * @aux_dev: auxiliary device + * @nb: the ctrlq crq event notification block + * + * Register the ctrlq crq handler function. When the management software reports + * a ctrlq crq event, if the registered 'nb->opcode' and 'nb->service_type' match + * the crq, the 'nb->crq_handler' function will be called to process it. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ int ubase_ctrlq_register_crq_event(struct auxiliary_device *aux_dev, struct ubase_ctrlq_event_nb *nb) { @@ -1193,6 +1228,17 @@ int ubase_ctrlq_register_crq_event(struct auxiliary_device *aux_dev, } EXPORT_SYMBOL(ubase_ctrlq_register_crq_event); +/** + * ubase_ctrlq_unregister_crq_event() - unregister ctrlq crq event processing function + * @aux_dev: auxiliary device + * @service_type: the ctrlq service type + * @opcode: the ctrlq opcode + * + * Unregisters the ctrlq crq processing function. This function is called when user + * no longer wants to handle the 'service_type' and 'opcode' ctrlq crq events. + * + * Context: Any context. + */ void ubase_ctrlq_unregister_crq_event(struct auxiliary_device *aux_dev, u8 service_type, u8 opcode) { diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 9ea0d14a4d39..d48582bf90a9 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -882,6 +882,15 @@ void ubase_resume_aux_devices(struct ubase_dev *udev) mutex_unlock(&priv->uadev_lock); } +/** + * ubase_adev_ubl_supported() - determine whether ub link is supported + * @adev: auxiliary device + * + * This function is used to determine whether ub link is supported. + * + * Context: Any context. + * Return: true or false + */ bool ubase_adev_ubl_supported(struct auxiliary_device *adev) { if (!adev) @@ -891,6 +900,15 @@ bool ubase_adev_ubl_supported(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_adev_ubl_supported); +/** + * ubase_adev_ctrlq_supported() - determine whether to support ctrlq + * @adev: auxiliary device + * + * This function is used to determine whether to support ctrlq. + * + * Context: Any context. + * Return: true or false + */ bool ubase_adev_ctrlq_supported(struct auxiliary_device *adev) { if (!adev) @@ -900,6 +918,15 @@ bool ubase_adev_ctrlq_supported(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_adev_ctrlq_supported); +/** + * ubase_adev_eth_mac_supported() - determine whether eth link is supported + * @adev: auxiliary device + * + * This function is used to determine whether eth link is supported. + * + * Context: Any context. + * Return: true or false + */ bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev) { if (!adev) @@ -909,6 +936,15 @@ bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_adev_eth_mac_supported); +/** + * ubase_get_io_base() - get io space base address + * @adev: auxiliary device + * + * The function is used to get io space base address. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_resource_space + */ struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev) { if (!adev) @@ -918,6 +954,15 @@ struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_io_base); +/** + * ubase_get_mem_base() - get memory space base address + * @adev: auxiliary device + * + * The function is used to get memory space base address. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_resource_space + */ struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev) { if (!adev) @@ -927,6 +972,15 @@ struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_mem_base); +/** + * ubase_get_dev_caps() - get ubase capabilities + * @adev: auxiliary device + * + * The function is used to get ubase capabilities. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_caps + */ struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev) { if (!adev) @@ -936,6 +990,15 @@ struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_dev_caps); +/** + * ubase_get_udma_caps() - get udma auxiliary device capabilities + * @adev: udma auxiliary device pointer + * + * The function is used to get udma auxiliary device capabilities. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_adev_caps + */ struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev) { struct ubase_dev *udev; @@ -949,12 +1012,30 @@ struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_udma_caps); +/** + * ubase_get_cdma_caps() - get cdma auxiliary device capabilities + * @adev: cdma auxiliary device pointer + * + * The function is used to get cdma auxiliary device capabilities. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_adev_caps + */ struct ubase_adev_caps *ubase_get_cdma_caps(struct auxiliary_device *adev) { return ubase_get_udma_caps(adev); } EXPORT_SYMBOL(ubase_get_cdma_caps); +/** + * ubase_get_reset_stage() - get current reset stage + * @adev: auxiliary device + * + * The function is used to get current reset stage. + * + * Context: Any context. + * Return: enum ubase_reset_stage + */ enum ubase_reset_stage ubase_get_reset_stage(struct auxiliary_device *adev) { struct ubase_dev *udev; @@ -968,6 +1049,17 @@ enum ubase_reset_stage ubase_get_reset_stage(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_reset_stage); +/** + * ubase_virt_register() - register auxiliary device virtualization handling function + * @adev: auxiliary device + * @virt_handler: the function pointer to handle virtualization. adev: the same as the + * parameter 'adev', bus_ue_id: bus ub entity id, is_en: true - enable virtualization, + * false - disable virtualization. + * + * The function is used to register auxiliary device virtualization handling function. + * + * Context: Process context. Takes and releases . + */ void ubase_virt_register(struct auxiliary_device *adev, void (*virt_handler)(struct auxiliary_device *adev, u16 bus_ue_id, bool is_en)) @@ -986,6 +1078,14 @@ void ubase_virt_register(struct auxiliary_device *adev, } EXPORT_SYMBOL(ubase_virt_register); +/** + * ubase_virt_unregister() - unregister auxiliary device virtualization handling function + * @adev: auxiliary device + * + * The function is used to unregister auxiliary device virtualization handling function. + * + * Context: Process context. Takes and releases . + */ void ubase_virt_unregister(struct auxiliary_device *adev) { struct ubase_adev *uadev; @@ -1001,6 +1101,16 @@ void ubase_virt_unregister(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_virt_unregister); +/** + * ubase_port_register() - register auxiliary device port handling function + * @adev: auxiliary device + * @port_handler: the function pointer to port handling. adev: the same as the + * parameter 'adev', link_up: true - link up, false - link down. + * + * The function is used to register auxiliary device port handling function. + * + * Context: Process context. Takes and releases . + */ void ubase_port_register(struct auxiliary_device *adev, void (*port_handler)(struct auxiliary_device *adev, bool link_up)) @@ -1019,6 +1129,14 @@ void ubase_port_register(struct auxiliary_device *adev, } EXPORT_SYMBOL(ubase_port_register); +/** + * ubase_port_unregister() - unregister auxiliary device port handling function + * @adev: auxiliary device + * + * The function is used to unregister auxiliary device port handling function. + * + * Context: Process context. Takes and releases . + */ void ubase_port_unregister(struct auxiliary_device *adev) { struct ubase_adev *uadev; @@ -1034,6 +1152,16 @@ void ubase_port_unregister(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_port_unregister); +/** + * ubase_reset_register() - register auxiliary device reset function + * @adev: auxiliary device + * @reset_handler: the function pointer to reset. adev: the same as the parameter + * 'adev', stage: enum ubase_reset_stage. + * + * The function is used to register auxiliary device reset function. + * + * Context: Process context. Takes and releases . + */ void ubase_reset_register(struct auxiliary_device *adev, void (*reset_handler)(struct auxiliary_device *adev, enum ubase_reset_stage stage)) @@ -1052,6 +1180,14 @@ void ubase_reset_register(struct auxiliary_device *adev, } EXPORT_SYMBOL(ubase_reset_register); +/** + * ubase_reset_unregister() - unregister auxiliary device reset function + * @adev: auxiliary device + * + * The function is used to unregister auxiliary device reset function. + * + * Context: Process context. Takes and releases . + */ void ubase_reset_unregister(struct auxiliary_device *adev) { struct ubase_adev *uadev; @@ -1067,6 +1203,15 @@ void ubase_reset_unregister(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_reset_unregister); +/** + * ubase_get_unic_caps() - get unic auxiliary device capabilities + * @adev: unic auxiliary device pointer + * + * The function is used to get unic auxiliary device capabilities. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_adev_caps + */ struct ubase_adev_caps *ubase_get_unic_caps(struct auxiliary_device *adev) { struct ubase_dev *udev; @@ -1162,6 +1307,15 @@ bool ubase_dbg_default(void) return ubase_debug; } +/** + * ubase_get_adev_qos() - get auxiliary device qos information + * @adev: auxiliary device + * + * The function is used to get auxiliary device qos information. + * + * Context: Any context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_adev_qos + */ struct ubase_adev_qos *ubase_get_adev_qos(struct auxiliary_device *adev) { struct ubase_dev *udev; @@ -1199,6 +1353,16 @@ static void ubase_activate_notify(struct ubase_dev *udev, mutex_unlock(&udev->priv.uadev_lock); } +/** + * ubase_activate_register() - register auxiliary device activate handling function + * @adev: auxiliary device + * @activate_handler: the function pointer to activate handling. adev: the same + * as the parameter 'adev', activate: true - activate, false - deactivate. + * + * The function is used to register auxiliary device activate handling function. + * + * Context: Process context. Takes and releases . + */ void ubase_activate_register(struct auxiliary_device *adev, void (*activate_handler)(struct auxiliary_device *adev, bool activate)) @@ -1217,6 +1381,14 @@ void ubase_activate_register(struct auxiliary_device *adev, } EXPORT_SYMBOL(ubase_activate_register); +/** + * ubase_activate_unregister() - unregister auxiliary device activate handling function + * @adev: auxiliary device + * + * The function is used to unregister auxiliary device activate handling function. + * + * Context: Process context. Takes and releases . + */ void ubase_activate_unregister(struct auxiliary_device *adev) { struct ubase_adev *uadev; @@ -1325,6 +1497,17 @@ void ubase_flush_workqueue(struct ubase_dev *udev) flush_workqueue(udev->ubase_arq_wq); } +/** + * ubase_activate_dev() - activate device + * @adev: auxiliary device + * + * The auxiliary device actively initializes the activate device process. + * This function will call the activate handling functions registered by all + * auxiliary devices under the same ub entity. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_activate_dev(struct auxiliary_device *adev) { struct ubase_dev *udev; @@ -1358,6 +1541,17 @@ int ubase_activate_dev(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_activate_dev); +/** + * ubase_deactivate_dev() - deactivate device + * @adev: auxiliary device + * + * The auxiliary device actively initializes the deactivate device process. + * This function will call the activate handling functions registered by all + * auxiliary devices under the same ub entity. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_deactivate_dev(struct auxiliary_device *adev) { struct ubase_dev *udev; @@ -1417,6 +1611,16 @@ static int __ubase_get_bus_eid(struct ubase_dev *udev, struct ubase_bus_eid *eid return ubase_query_bus_eid(udev, eid); } +/** + * ubase_get_bus_eid() - get bus entity id + * @adev: auxiliary device + * @eid: save the bus entity id + * + * The function is used to get bus entity id. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid) { struct ubase_dev *udev; diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index 754f62011420..db1dc72f2caf 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -1003,6 +1003,19 @@ static int __ubase_event_register(struct ubase_dev *udev, return ret; } +/** + * ubase_event_register() - register asynchronous event processing function + * @adev: auxiliary device + * @cb: asynchronous event notification block + * + * This function uses `blocking_notifier_chain_register` to register the + * asynchronous event handling function. When the ubase driver receives an + * asynchronous event and matches it with the registered event notification + * block, it calls the registered function via `blocking_notifier_call_chain`. + * + * Context: Process context, Takes and releases the RCU lock. + * Return: 0 on success, negative error code otherwise + */ int ubase_event_register(struct auxiliary_device *adev, struct ubase_event_nb *cb) { @@ -1038,6 +1051,16 @@ static void __ubase_event_unregister(struct ubase_dev *udev, cb->event_type, ret); } +/** + * ubase_event_unregister() - unregister asynchronous event processing function + * @adev: auxiliary device + * @cb: ubase asynchronous event notification block + * + * This function uses `blocking_notifier_chain_unregister` to unregister the + * asynchronous event handling function. + * + * Context: Process context, Takes and releases the RCU lock. + */ void ubase_event_unregister(struct auxiliary_device *adev, struct ubase_event_nb *cb) { @@ -1048,6 +1071,20 @@ void ubase_event_unregister(struct auxiliary_device *adev, } EXPORT_SYMBOL(ubase_event_unregister); +/** + * ubase_comp_register() - register completion event processing function + * @adev: auxiliary device + * @comp_handler: completion event processing function. nb: struct notifier_block, + * jfcn: jfc index, data: self-defined data pointer. + * + * This function uses `atomic_notifier_chain_register` to register the + * completion event handling function. When the ubase driver receives a + * completion event that matches a registered auxiliary device, it calls the + * registered function via `atomic_notifier_call_chain`. + * + * Context: Process context, may sleep + * Return: 0 on success, negative error code otherwise + */ int ubase_comp_register(struct auxiliary_device *adev, int (*comp_handler)(struct notifier_block *nb, unsigned long jfcn, void *data)) @@ -1071,6 +1108,15 @@ int ubase_comp_register(struct auxiliary_device *adev, } EXPORT_SYMBOL(ubase_comp_register); +/** + * ubase_comp_unregister() - unregister completion event processing function + * @adev: auxiliary device + * + * This function uses `atomic_notifier_chain_unregister` to unregister the + * completion event handling function. + * + * Context: Process context, Takes and releases the RCU lock. + */ void ubase_comp_unregister(struct auxiliary_device *adev) { struct ubase_adev *uadev; diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index 290a03a74b0e..2b68ba5884f5 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -1007,6 +1007,22 @@ int __ubase_perf_stats(struct ubase_dev *udev, u64 port_bitmap, u32 period, return ret; } +/** + * ubase_perf_stats() - get ub port stats + * @adev: auxiliary device + * @port_bitmap: port bitmap + * @period: period, unit: ms + * @data: stats data + * @data_size: data size + * + * The function is used to query the port bandwidth and the bandwidth of each vl + * under the port. The bandwidth statistics collection duration is 'period'. + * The larger the 'period', the longer the time required, and the more accurate + * the bandwidth measurement. + * + * Context: Process context. Takes and releases , BH-safe. Sleep. + * Return: 0 on success, negative error code otherwise + */ int ubase_perf_stats(struct auxiliary_device *adev, u64 port_bitmap, u32 period, struct ubase_perf_stats_result *data, u32 data_size) { diff --git a/drivers/ub/ubase/ubase_mailbox.c b/drivers/ub/ubase/ubase_mailbox.c index 9951929a7f8d..86da27cdf828 100644 --- a/drivers/ub/ubase/ubase_mailbox.c +++ b/drivers/ub/ubase/ubase_mailbox.c @@ -66,6 +66,15 @@ struct ubase_cmd_mailbox *__ubase_alloc_cmd_mailbox(struct ubase_dev *udev) return NULL; } +/** + * ubase_alloc_cmd_mailbox() - Alloc mailbox buffer + * @aux_dev: auxiliary device + * + * The function is used to alloc mailbox buffer. + * + * Context: Process context. + * Return: NULL if the adev is empty, otherwise the pointer to struct ubase_cmd_mailbox + */ struct ubase_cmd_mailbox *ubase_alloc_cmd_mailbox(struct auxiliary_device *aux_dev) { struct ubase_dev *udev; @@ -91,6 +100,15 @@ void __ubase_free_cmd_mailbox(struct ubase_dev *udev, kfree(mailbox); } +/** + * ubase_free_cmd_mailbox() - Free mailbox buffer + * @aux_dev: auxiliary device + * @mailbox: mailbox command address + * + * The function is used to free mailbox buffer. + * + * Context: Process context. + */ void ubase_free_cmd_mailbox(struct auxiliary_device *aux_dev, struct ubase_cmd_mailbox *mailbox) { @@ -524,6 +542,17 @@ int __ubase_hw_upgrade_ctx_ex(struct ubase_dev *udev, return ret; } +/** + * ubase_hw_upgrade_ctx_ex() - upgrade hardware context + * @aux_dev: auxiliary device + * @attr: the mailbox attribute pointer + * @mailbox: mailbox command address + * + * The function is used to upgrade hardware context. + * + * Context: Process context. Takes and releases , BH-safe. May sleep + * Return: 0 on success, negative error code otherwise + */ int ubase_hw_upgrade_ctx_ex(struct auxiliary_device *aux_dev, struct ubase_mbx_attr *attr, struct ubase_cmd_mailbox *mailbox) diff --git a/drivers/ub/ubase/ubase_pmem.c b/drivers/ub/ubase/ubase_pmem.c index a43f1c39c0be..71218dc25d94 100644 --- a/drivers/ub/ubase/ubase_pmem.c +++ b/drivers/ub/ubase/ubase_pmem.c @@ -336,6 +336,15 @@ void ubase_prealloc_mem_uninit(struct ubase_dev *udev) ubase_pmem_init_map[i].uninit(udev); } +/** + * ubase_adev_prealloc_supported() - determine whether to prealloc buffer + * @adev: auxiliary device + * + * This function is used to determine whether to prealloc buffer. + * + * Context: Any context. + * Return: true or false + */ bool ubase_adev_prealloc_supported(struct auxiliary_device *adev) { struct ubase_dev *udev; diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index 8145edc4401c..bb1a9db877d8 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -436,6 +436,25 @@ static int ubase_check_sl_bitmap(struct ubase_dev *udev, unsigned long sl_bitmap return 0; } +/** + * ubase_check_qos_sch_param() - check qos schedule parameters + * @adev: auxiliary device + * @vl_bitmap: vl bitmap + * @vl_bw: vl bandwidth weight + * @vl_tsa: vl schedule mode + * @is_ets: is ETS flow control mode + * + * The function is used to check qos schedule parameters + * Obtain valid vls through 'vl_bitmap'. The vl scheduling mode 'vl_tsa' supports + * two types: dwrr and sp. The sum of the vl scheduling weights 'vl_bw' must be + * 100. When 'is_ets' is true, it indicates ETS flow control, and the scheduling + * weight for vls with sp scheduling mode must be 0; when 'is_ets' is false, it + * indicates TM flow control, and the scheduling weight for vls with sp + * scheduling mode cannot be 0. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_check_qos_sch_param(struct auxiliary_device *adev, u16 vl_bitmap, u8 *vl_bw, u8 *vl_tsa, bool is_ets) { @@ -451,6 +470,20 @@ int ubase_check_qos_sch_param(struct auxiliary_device *adev, u16 vl_bitmap, } EXPORT_SYMBOL(ubase_check_qos_sch_param); +/** + * ubase_config_tm_vl_sch() - configuring TM flow control scheduling + * @adev: auxiliary device + * @vl_bitmap: vl bitmap + * @vl_bw: vl bandwidth weight + * @vl_tsa: vl schedule mode + * + * The function is used to configure TM flow control scheduling. + * Configure the scheduling weight 'vl_bw' and scheduling mode 'vl_tsa' + * corresponding to the valid vl in 'vl_bitmap' to the TM flow control. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_config_tm_vl_sch(struct auxiliary_device *adev, u16 vl_bitmap, u8 *vl_bw, u8 *vl_tsa) { @@ -465,6 +498,25 @@ int ubase_config_tm_vl_sch(struct auxiliary_device *adev, u16 vl_bitmap, } EXPORT_SYMBOL(ubase_config_tm_vl_sch); +/** + * ubase_set_priqos_info() - set priority qos information + * @dev: device + * @sl_priqos: priority qos + * + * The function is used to set priority qos information. + * Through 'sl_priqos->sl_bitmap', obtain the valid priority sl, use sl as an + * index to get the corresponding bandwidth weight and scheduling mode from + * 'sl_priqos->weight' and 'sl_priqos->ch_mode', and configure them to the hardware. + * Specifically, when 'sl_priqos-> port_bitmap' is 0, it configures the TM flow + * control; when 'port_bitmap' is not 0, it configures the ETS flow control for + * the corresponding port. + * The SP scheduling weight for TM flow control cannot be 0; multiple SP traffic + * flows are scheduled according to their weights. For ETS flow control, the SP + * scheduling weight must be 0. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_set_priqos_info(struct device *dev, struct ubase_sl_priqos *sl_priqos) { struct ubase_dev *udev; @@ -484,6 +536,21 @@ int ubase_set_priqos_info(struct device *dev, struct ubase_sl_priqos *sl_priqos) } EXPORT_SYMBOL(ubase_set_priqos_info); +/** + * ubase_get_priqos_info() - get priority qos information + * @dev: device + * @sl_priqos: save the queried priority QoS information + * + * The function is used to get priority qos information. + * Obtain the priority sl available for the device, as well as the corresponding + * bandwidth weight and scheduling mode. + * When port_bitmap is 0, the obtained values are the bandwidth weight and + * scheduling mode for TM flow control; when port_bitmap is not 0, the obtained + * values are the bandwidth weight and scheduling mode for ETS flow control. + * + * Context: Process context. Takes and releases , BH-safe. + * Return: 0 on success, negative error code otherwise + */ int ubase_get_priqos_info(struct device *dev, struct ubase_sl_priqos *sl_priqos) { struct ubase_dev *udev; @@ -1088,6 +1155,17 @@ static bool ubase_is_udma_tp_vl(struct ubase_adev_qos *qos, u8 vl) return false; } +/** + * ubase_update_udma_dscp_vl() - update udma's dscp to vl mapping + * @adev: auxiliary device + * @dscp_vl: dscp to vl mapping + * @dscp_num: dscp number + * + * The function updates the dscp to vl mapping based on 'dscp_vl' and saves it + * to 'udma_dscp_vl' in 'truct ubase_adev_qos'. + * + * Context: Any context. + */ void ubase_update_udma_dscp_vl(struct auxiliary_device *adev, u8 *dscp_vl, u8 dscp_num) { diff --git a/drivers/ub/ubase/ubase_reset.c b/drivers/ub/ubase/ubase_reset.c index 7da51b021026..b2fe6d9ef559 100644 --- a/drivers/ub/ubase/ubase_reset.c +++ b/drivers/ub/ubase/ubase_reset.c @@ -86,6 +86,15 @@ void __ubase_reset_event(struct ubase_dev *udev, } } +/** + * ubase_reset_event() - reset event processing + * @adev: auxiliary device + * @reset_type: reset type + * + * The function performs corresponding reset processing based on different 'reset_type'. + * + * Context: Any context. + */ void ubase_reset_event(struct auxiliary_device *adev, enum ubase_reset_type reset_type) { diff --git a/drivers/ub/ubase/ubase_stats.c b/drivers/ub/ubase/ubase_stats.c index 7f536e0cd537..4d6e4678686d 100644 --- a/drivers/ub/ubase/ubase_stats.c +++ b/drivers/ub/ubase/ubase_stats.c @@ -51,6 +51,17 @@ static int ubase_update_mac_stats(struct ubase_dev *udev, u16 port_id, u64 *data return ret; } +/** + * ubase_get_ub_port_stats() - get ub port stats + * @adev: auxiliary device + * @port_id: port id + * @data: ub date link layer stats + * + * The function is used to get ub port stats. + * + * Context: Process context. Takes and releases , BH-safe. Sleep. + * Return: 0 on success, negative error code otherwise + */ int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, struct ubase_ub_dl_stats *data) { diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 7ae969c897b9..cadc707a23e6 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -110,6 +110,23 @@ enum ubase_opcode_type { UBASE_OPC_ACTIVATE_RESP = 0xF010, }; +/** + * union ubase_mbox - ubase mailbox structure + * @in_param_l: input data storage address lower 32 bits + * @in_param_h: input data storage address high 32 bits + * @cmd: mailbox command + * @tag: queue id + * @seq_num: sequence number + * @event_en: 0-poll mode, 1-event mode + * @mbx_ue_id: mailbox ub entity id + * @rsv: reserved bits + * @status: mailbox command execution completion status, 0-success, 1-fail + * @hw_run: hardware running status, 0-not running, 1-running + * @rsv1: reserved bits + * @query_status:execution result of the mailbox query command, 0-success, 1-fail + * @query_hw_run: hardware running status of the mailbox query command, 0-not running, 1-running + * @query_rsv: reserved bits + */ union ubase_mbox { struct { /* MB 0 */ @@ -137,6 +154,13 @@ union ubase_mbox { }; }; +/** + * struct ubase_cmd_buf - ubase cmd buffer structure + * @opcode: cmdq opcode + * @is_read: read or write, true for read, false for write + * @data_size: valid length of data + * @data: data buffer + */ struct ubase_cmd_buf { u16 opcode; bool is_read; @@ -144,6 +168,13 @@ struct ubase_cmd_buf { void *data; }; +/** + * struct ubase_crq_event_nb - ubase crq event notification block structure + * @opcode: cmdq crq opcode + * @back: arbitrary registered pointer + * @crq_handler: cmdq crq handle function. dev: the struct member variable 'back', + * data: the crq message data, len: the crq message data length. + */ struct ubase_crq_event_nb { u16 opcode; void *back; diff --git a/include/ub/ubase/ubase_comm_ctrlq.h b/include/ub/ubase/ubase_comm_ctrlq.h index b1ee65524052..3e08a5ab5a4f 100644 --- a/include/ub/ubase/ubase_comm_ctrlq.h +++ b/include/ub/ubase/ubase_comm_ctrlq.h @@ -58,6 +58,21 @@ enum ubase_ctrlq_opc_type_dev_register { UBASE_CTRLQ_OPC_UE_RESET_CTRL = 0x15, }; +/** + * struct ubase_ctrlq_msg - ubase ctrlq msg structure + * @service_ver: ctrlq service version + * @service_type: ctrlq service type + * @opcode: ctrlq opcode + * @need_resp: whether the message need a response + * @is_resp: whether the message is a response + * @resv: reserved bits + * @resp_ret: the return value of response message + * @resp_seq: response message sequence + * @in_size: input data buffer size + * @out_size: output data buffer size + * @in: input data buffer + * @out: output data buffer + */ struct ubase_ctrlq_msg { enum ubase_ctrlq_ser_ver service_ver; enum ubase_ctrlq_ser_type service_type; @@ -74,6 +89,13 @@ struct ubase_ctrlq_msg { void *out; }; +/** + * struct ubase_ctrlq_event_nb - ubase ctrlq event notification block structure + * @service_type: ctrlq service type + * @opcode: ctrlq crq opcode + * @back: arbitrary registered pointer + * @crq_handler: ctrlq crq handle function + */ struct ubase_ctrlq_event_nb { u8 service_type; u8 opcode; @@ -84,7 +106,6 @@ struct ubase_ctrlq_event_nb { int ubase_ctrlq_send_msg(struct auxiliary_device *aux_dev, struct ubase_ctrlq_msg *msg); - int ubase_ctrlq_register_crq_event(struct auxiliary_device *aux_dev, struct ubase_ctrlq_event_nb *nb); void ubase_ctrlq_unregister_crq_event(struct auxiliary_device *aux_dev, diff --git a/include/ub/ubase/ubase_comm_debugfs.h b/include/ub/ubase/ubase_comm_debugfs.h index dc0bd30aa93b..4d17c98bdfba 100644 --- a/include/ub/ubase/ubase_comm_debugfs.h +++ b/include/ub/ubase/ubase_comm_debugfs.h @@ -11,6 +11,14 @@ struct ubase_dbgfs; +/** + * struct ubase_dbg_dentry_info - ubase debugfs dentry information + * @name: dentry name + * @dentry: the dentry for this file + * @property: property supported by this dentry + * @support: function to determine whether to create this dentry. dev: struct device, + * property: the struct number variable 'property'. + */ struct ubase_dbg_dentry_info { const char *name; struct dentry *dentry; @@ -18,6 +26,18 @@ struct ubase_dbg_dentry_info { bool (*support)(struct device *dev, u32 property); }; +/** + * struct ubase_dbg_cmd_info - ubase debugfs cmd information + * @name: file name + * @dentry_index: dentry index + * @property: property supported by this file. + * @support: function to determine whether to create this file. dev: struct device, + * property: the struct number variable 'property. + * @init: init debugfs cmd file function. dev: struct device, dirs: struct + * ubase_dbg_dentry_info, dbgfs: struct ubase_dbgfs, idx: idxth file. + * @read_func: reading debugfs detailed implementation function. s: struct seq_file, + * data: data buffer + */ struct ubase_dbg_cmd_info { const char *name; int dentry_index; @@ -28,8 +48,14 @@ struct ubase_dbg_cmd_info { int (*read_func)(struct seq_file *s, void *data); }; +/** + * struct ubase_dbgfs - ubase debugfs data structure + * @dentry: debugfs root path + * @cmd_info: ubase debugfs cmd information + * @cmd_info_size: the size of cmd_info + */ struct ubase_dbgfs { - struct dentry *dentry; /* dbgfs root path */ + struct dentry *dentry; struct ubase_dbg_cmd_info *cmd_info; int cmd_info_size; }; diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 35e0496ac01d..8dfbb2dc91bd 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -53,6 +53,46 @@ enum ubase_reset_stage { UBASE_RESET_STAGE_UP, }; +/** + * struct ubase_caps - ubase capabilities + * @num_ceq_vectors: completion event vectors number + * @num_aeq_vectors: asynchronous event vectors umber + * @num_misc_vectors: misc event vectors number + * @aeqe_depth: the depth of asynchronous event vector queue + * @ceqe_depth: the depth of completion event vector queue + * @aeqe_size: the size of asynchronous event vector queue element + * @ceqe_size: the size of completion event vector queue element + * @total_ue_num: ue number + * @public_jetty_cnt: public jetty count + * @vl_num: vl number + * @rsvd_jetty_cnt: reserved jetty count + * @req_vl: requested vl + * @resp_vl: response vl + * @packet_pattern_mode: packet pattern mode + * @ack_queue_num: ack queue number + * @oor_en: out of order receive, 0: disable 1: enable + * @reorder_queue_en: reorder queue enable, 0: disable 1: enable + * @on_flight_size: on flight packets size + * @reorder_cap: reorder capability + * @reorder_queue_shift: reorder queue shift + * @at_times: ack timeout + * @ue_num: the total number of ue and mue + * @mac_stats_num: mac stats number + * @logic_port_bitmap: logic port bitmap + * @ub_port_logic_id: ub port logic id + * @io_port_logic_id: io port logic id + * @io_port_id: io port id + * @nl_port_id: nl port id + * @chip_id: chip id + * @die_id: die id + * @ue_id: ub entity id + * @nl_id: nl id + * @tid: ub entity tid + * @eid: ub entity eid + * @upi: ub entity upi + * @ctl_no: ub controller id + * @fw_version: firmware version + */ struct ubase_caps { u16 num_ceq_vectors; u16 num_aeq_vectors; @@ -100,17 +140,47 @@ struct ubase_caps { u32 fw_version; }; +/** + * struct ubase_res_caps - ubase resource capbilities + * @max_cnt: the resource max count + * @start_idx: start index + * @reserved_cnt: reserved count + * @depth: the queue depth of the resource + */ struct ubase_res_caps { u32 max_cnt; u32 start_idx; u32 depth; }; +/** + * struct ubase_pmem_caps - ubase physical memory capabilities + * @dma_len: iova address sapce length + * @dma_addr: iova address + */ struct ubase_pmem_caps { u64 dma_len; dma_addr_t dma_addr; }; +/** + * struct ubase_adev_caps - ubase auxiliary device capabilities + * @jfs: jfs resource capabilities + * @jfr: jfr resource capabilities + * @jfc: jfc resource capabilities + * @tp: tp resource capabilities + * @tpg: tp group resource capabilities + * @pmem: physical memory capabilities + * @utp_port_bitmap: utp port bitmap + * @jtg_max_cnt: jetty group max count + * @rc_max_cnt: rc max count + * @rc_que_depth: rc queue depth + * @ccc_max_cnt: ccc max count + * @dest_addr_max_cnt: dest addr max count + * @seid_upi_max_cnt:seid upi max count + * @tpm_max_cnt: tpm max count + * @cqe_size: cqe size + */ struct ubase_adev_caps { struct ubase_res_caps jfs; struct ubase_res_caps jfr; @@ -125,6 +195,16 @@ struct ubase_adev_caps { u16 cqe_size; }; +/** + * struct ubase_ctx_buf_cap - ubase context buffer capabilities + * @dma_ctx_buf_ba: context buffer iova address + * @slot: iova slot + * @entry_size: context entry size + * @entry_cnt: context entry count + * @cnt_per_page_shift: context entry count per page shift + * @ctx_xa: context array + * @ctx_mutex: context mutex + */ struct ubase_ctx_buf_cap { dma_addr_t dma_ctx_buf_ba; /* pass to hw */ struct iova_slot *slot; @@ -135,6 +215,16 @@ struct ubase_ctx_buf_cap { struct mutex ctx_mutex; }; +/** + * struct ubase_ctx_buf - ubase context buffer information + * @jfs: jfs context buffer capability + * @jfr: jfr context buffer capability + * @jfc: jfc context buffer capability + * @jtg: jetty group context buffer capability + * @rc: rc context buffer capability + * @tp: tp context buffer capability + * @tpg: tp group context buffer capability + */ struct ubase_ctx_buf { struct ubase_ctx_buf_cap jfs; struct ubase_ctx_buf_cap jfr; @@ -147,16 +237,54 @@ struct ubase_ctx_buf { }; struct net_device; + +/** + * struct ubase_adev_com - ubase auxiliary device common information + * @adev: auxiliary device + * @netdev: network device + */ struct ubase_adev_com { struct auxiliary_device *adev; struct net_device *netdev; }; +/** + * struct ubase_resource_space - ubase resource space + * @addr_unmapped: unmapped address + * @addr: mapped address + */ struct ubase_resource_space { resource_size_t addr_unmapped; void __iomem *addr; }; +/** + * struct ubase_adev_qos - ubase auxiliary device qos information + * @rdma_vl_num: rdma vl number + * @rdma_tp_vl_num: rdma tp vl number + * @rdma_ctp_vl_num: rdma ctp vl number + * @rdma_tp_resp_vl_offset: rdma tp response vl offset, + * rdma_tp_resp_vl = rdma_ctp_resp_vl + rdma_tp_resp_vl_offset + * @rdma_ctp_resp_vl_offset: rdma ctp response vl offset, + * rdma_ctp_resp_vl = rdma_ctp_resp_vl + rdma_ctp_resp_vl_offset + * @max_vl: max vl number + * @resv: reserved bits + * @rdma_sl_num: rdma sl number + * @rdma_tp_sl_num: rdma tp sl number + * @rdma_ctp_sl_num: rdma ctp sl number + * @nic_sl_num: nic sl number + * @nic_vl_num: nic vl number + * @rdma_vl: rdma vl + * @rdma_tp_req_vl: rdma tp request vl + * @rdma_ctp_req_vl: rdma ctp request vl + * @rdma_sl: rdma sl + * @rdma_tp_sl: rdma tp sl + * @rdma_ctp_sl: rdma ctp sl + * @nic_sl: nic sl + * @nic_vl: nic vl + * @sl_vl: sl to vl mapping + * @rdma_dscp_vl: rdma dscp to vl mapping + */ struct ubase_adev_qos { /* udma/cdma resource */ u8 sl_num; @@ -189,6 +317,11 @@ struct ubase_adev_qos { u8 ue_sl_vl[UBASE_MAX_SL_NUM]; }; +/** + * struct ubase_ue_node - ubase ub entity list node + * @list: list head + * @bus_ue_id: bus ub entity id + */ struct ubase_ue_node { struct list_head list; u16 bus_ue_id; @@ -211,6 +344,11 @@ struct ubase_ue_caps { }; #define UBASE_BUS_EID_LEN 4 + +/** + * struct ubase_bus_eid - bus eid + * @eid: bus eid + */ struct ubase_bus_eid { u32 eid[UBASE_BUS_EID_LEN]; }; @@ -219,7 +357,6 @@ bool ubase_adev_ubl_supported(struct auxiliary_device *adev); bool ubase_adev_ctrlq_supported(struct auxiliary_device *adev); bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev); bool ubase_adev_prealloc_supported(struct auxiliary_device *aux_dev); - struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev); struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev); struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev); @@ -227,34 +364,27 @@ struct ubase_adev_caps *ubase_get_unic_caps(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_cdma_caps(struct auxiliary_device *adev); struct ubase_adev_qos *ubase_get_adev_qos(struct auxiliary_device *adev); - void ubase_reset_event(struct auxiliary_device *adev, enum ubase_reset_type reset_type); enum ubase_reset_stage ubase_get_reset_stage(struct auxiliary_device *adev); - void ubase_virt_register(struct auxiliary_device *adev, void (*virt_handler)(struct auxiliary_device *adev, u16 bus_ue_id, bool is_en)); void ubase_virt_unregister(struct auxiliary_device *adev); - void ubase_port_register(struct auxiliary_device *adev, void (*port_handler)(struct auxiliary_device *adev, bool link_up)); void ubase_port_unregister(struct auxiliary_device *adev); - void ubase_reset_register(struct auxiliary_device *adev, void (*reset_handler)(struct auxiliary_device *adev, enum ubase_reset_stage stage)); void ubase_reset_unregister(struct auxiliary_device *adev); - void ubase_activate_register(struct auxiliary_device *adev, void (*activate_handler)(struct auxiliary_device *adev, bool activate)); void ubase_activate_unregister(struct auxiliary_device *adev); - int ubase_activate_dev(struct auxiliary_device *adev); int ubase_deactivate_dev(struct auxiliary_device *adev); - int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid); #endif diff --git a/include/ub/ubase/ubase_comm_eq.h b/include/ub/ubase/ubase_comm_eq.h index fe97de1dd1ac..d1efad0a79b3 100644 --- a/include/ub/ubase/ubase_comm_eq.h +++ b/include/ub/ubase/ubase_comm_eq.h @@ -50,6 +50,13 @@ enum ubase_event_type { UBASE_EVENT_TYPE_MAX }; +/** + * struct ubase_event_nb - ubase event notification block + * @drv_type: auxiliary device driver type + * @event_type: event type + * @nb: notifier block + * @back: arbitrary registered pointer + */ struct ubase_event_nb { enum ubase_drv_type drv_type; u8 event_type; @@ -57,6 +64,20 @@ struct ubase_event_nb { void *back; }; +/** + * struct ubase_aeqe - asynchronous event interrupt queue elements + * @event_type: event type + * @sub_type: sub event type + * @rsv0: reserved bits + * @owner: owner bit + * @num: jfsn/jettyn/jfrn/jfcn/jtgn/tpn + * @rsv1: reserved bits + * @out_param: mailbox output parameter + * @seq_num: mailbox sequence number + * @status: mailbox status + * @event: aeqe event information + * @rsv: reserved bits + */ struct ubase_aeqe { u32 event_type : 8; u32 sub_type : 8; @@ -82,6 +103,12 @@ struct ubase_aeqe { u32 rsv[12]; }; +/** + * struct ubase_aeq_notify_info - aeq notification information + * @event_type: event type + * @sub_type: sub event type + * @aeqe: aeq elements + */ struct ubase_aeq_notify_info { u8 event_type; u8 sub_type; @@ -92,7 +119,6 @@ int ubase_event_register(struct auxiliary_device *adev, struct ubase_event_nb *cb); void ubase_event_unregister(struct auxiliary_device *adev, struct ubase_event_nb *cb); - int ubase_comp_register(struct auxiliary_device *adev, int (*comp_handler)(struct notifier_block *nb, unsigned long jfcn, void *data)); diff --git a/include/ub/ubase/ubase_comm_hw.h b/include/ub/ubase/ubase_comm_hw.h index ba3717fb16b3..2efac24e3268 100644 --- a/include/ub/ubase/ubase_comm_hw.h +++ b/include/ub/ubase/ubase_comm_hw.h @@ -19,6 +19,16 @@ #define UBASE_JTG_CTX_SIZE 8 #define UBASE_DESC_DATA_LEN 6 + +/** + * struct ubase_cmdq_desc - Command queue descriptor + * @opcode: Command opcode + * @flag: Command flag + * @bd_num: bd number. One bd is 32 bytes, and the first db is 24 bytes. + * @ret: Command return value + * @rsv: reserved + * @data: Command data + */ struct ubase_cmdq_desc { __le16 opcode; u8 flag; @@ -28,6 +38,16 @@ struct ubase_cmdq_desc { __le32 data[UBASE_DESC_DATA_LEN]; }; +/** + * struct ubase_cmdq_ring - Command ring queue information + * @ci: consumer indicator + * @pi: producer indicator + * @desc_num: descriptors number + * @tx_timeout: transmit timeout interval + * @desc_dma_addr: dma address of descriptors + * @desc: Command queue descriptor + * @lock: spinlock + */ struct ubase_cmdq_ring { u32 ci; u32 pi; @@ -38,11 +58,24 @@ struct ubase_cmdq_ring { spinlock_t lock; }; +/** + * struct ubase_cmdq - cmmand queue + * @csq: command send queue + * @crq: command receive queue + */ struct ubase_cmdq { struct ubase_cmdq_ring csq; struct ubase_cmdq_ring crq; }; +/** + * struct ubase_hw - hardware information + * @rs0_base: resource0 space base addr + * @io_base: io space base addr + * @mem_base: memory space base addr + * @cmdq: command queue + * @state: state of the hardware + */ struct ubase_hw { struct ubase_resource_space rs0_base; struct ubase_resource_space io_base; @@ -51,6 +84,13 @@ struct ubase_hw { unsigned long state; }; +/** + * struct ubase_mbx_event_context - mailbox event context + * @done: completion object to wait for event + * @result: mailbox execution result + * @out_param: mailbox output parameter + * @seq_num: mailbox sequence number + */ struct ubase_mbx_event_context { struct completion done; int result; diff --git a/include/ub/ubase/ubase_comm_mbx.h b/include/ub/ubase/ubase_comm_mbx.h index 26c625f80a77..9e7bbf1faf9e 100644 --- a/include/ub/ubase/ubase_comm_mbx.h +++ b/include/ub/ubase/ubase_comm_mbx.h @@ -10,11 +10,23 @@ #include #include +/** + * struct ubase_cmd_mailbox - mailbox cmmand address + * @buf: virtual address + * @dma: dma address + */ struct ubase_cmd_mailbox { void *buf; dma_addr_t dma; }; +/** + * struct ubase_mbx_attr - mailbox attribute + * @tag: queue id + * @rsv: reserved bits + * @op: mailbox opcode + * @mbx_ue_id: mailbox ub entity id + */ struct ubase_mbx_attr { __le32 tag : 24; __le32 rsv : 8; @@ -78,11 +90,21 @@ enum ubase_mbox_opcode { struct ubase_cmd_mailbox *ubase_alloc_cmd_mailbox(struct auxiliary_device *aux_dev); void ubase_free_cmd_mailbox(struct auxiliary_device *aux_dev, struct ubase_cmd_mailbox *mailbox); - int ubase_hw_upgrade_ctx_ex(struct auxiliary_device *aux_dev, struct ubase_mbx_attr *attr, struct ubase_cmd_mailbox *mailbox); +/** + * ubase_fill_mbx_attr() - fill mailbox attribute + * @attr: mailbox attribute + * @tag: queue id + * @op: mailbox opcode + * @mbx_ue_id: mailbox ub entity id + * + * The function is used to assign 'tag', 'op', and 'mbx_ue_id' to 'struct ubase_mbx_attr'. + * + * Context: Process context. + */ static inline void ubase_fill_mbx_attr(struct ubase_mbx_attr *attr, u32 tag, u8 op, u8 mbx_ue_id) { diff --git a/include/ub/ubase/ubase_comm_qos.h b/include/ub/ubase/ubase_comm_qos.h index 01fc6942ca01..daacd537658b 100644 --- a/include/ub/ubase/ubase_comm_qos.h +++ b/include/ub/ubase/ubase_comm_qos.h @@ -16,6 +16,13 @@ enum ubase_sl_sched_mode { UBASE_SL_DWRR = IEEE_8021QAZ_TSA_ETS, }; +/** + * struct ubase_sl_priqos - priority qos + * @port_bitmap: port bitmap + * @sl_bitmap: sl bitmap + * @weight: bandwidth weight + * @sch_mode: schedule mode + */ struct ubase_sl_priqos { u32 port_bitmap; u32 sl_bitmap; diff --git a/include/ub/ubase/ubase_comm_stats.h b/include/ub/ubase/ubase_comm_stats.h index fdafc18e6700..52a766e7bab0 100644 --- a/include/ub/ubase/ubase_comm_stats.h +++ b/include/ub/ubase/ubase_comm_stats.h @@ -210,6 +210,17 @@ struct ubase_eth_mac_stats { u64 rx_merge_frame_smd_error_pkts; }; +/** + * struct ubase_perf_stats_result - traffic bandwidth statistics results + * @valid: data valid flag, 0-invalid, 1-valid + * @resv0: reserved bits + * @port_id: port id + * @resv1: reserved bits + * @tx_port_bw: tx port bandwidth + * @rx_port_bw: rx port bandwidth + * @tx_vl_bw: tx vl bandwidth + * @rx_vl_bw: rx vl bandwidth + */ struct ubase_perf_stats_result { u8 valid : 1; u8 resv0 : 7; -- Gitee From f95d3584f51053a16ae9f841fd162fcd626dce03 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Thu, 27 Nov 2025 09:27:24 +0800 Subject: [PATCH 108/243] ub:ubus: Add ubus and ubfi opensource document commit e40360b68c42fb79a086799873391e728c29c0de openEuler Add ubus and ubfi opensource document to introduce ubus and ubfi related functions and capabilities for users in opensource society. Signed-off-by: Yahui Liu Signed-off-by: Yuhao Xiang Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: zhaolichang <943677312@qq.com> --- .../ABI/testing/debugfs-ub-hisi-ubus | 37 ++ Documentation/ABI/testing/sysfs-bus-ub | 427 ++++++++++++++++++ Documentation/driver-api/ub/index.rst | 6 +- Documentation/driver-api/ub/ubfi.rst | 7 + Documentation/driver-api/ub/ubus.rst | 7 + Documentation/ub/index.rst | 4 +- Documentation/ub/ubfi/index.rst | 11 + Documentation/ub/ubfi/ubfi.rst | 178 ++++++++ Documentation/ub/ubus/hisi_ubus.rst | 95 ++++ Documentation/ub/ubus/index.rst | 13 + Documentation/ub/ubus/ubus-service.rst | 60 +++ Documentation/ub/ubus/ubus.rst | 312 +++++++++++++ 12 files changed, 1154 insertions(+), 3 deletions(-) create mode 100644 Documentation/ABI/testing/debugfs-ub-hisi-ubus create mode 100644 Documentation/ABI/testing/sysfs-bus-ub create mode 100644 Documentation/driver-api/ub/ubfi.rst create mode 100644 Documentation/driver-api/ub/ubus.rst create mode 100644 Documentation/ub/ubfi/index.rst create mode 100644 Documentation/ub/ubfi/ubfi.rst create mode 100644 Documentation/ub/ubus/hisi_ubus.rst create mode 100644 Documentation/ub/ubus/index.rst create mode 100644 Documentation/ub/ubus/ubus-service.rst create mode 100644 Documentation/ub/ubus/ubus.rst diff --git a/Documentation/ABI/testing/debugfs-ub-hisi-ubus b/Documentation/ABI/testing/debugfs-ub-hisi-ubus new file mode 100644 index 000000000000..69ba558bfc02 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-ub-hisi-ubus @@ -0,0 +1,37 @@ +What: /sys/kernel/debug/UB_BUS_CTL/eu_table +Date: Oct 2025 +Contact: Junlong Zheng +Description: Display the contents of the EID-UPI entry. + By default, the EID and UPI key-value pair for entry 0 is displayed. + By writing an entry index to the properties file, you can retrieve + the content of the corresponding entry. + + Example:: + + Display the content of entry5: + # echo 5 > /sys/kernel/debug/UB_BUS_CTL/eu_table + # cat /sys/kernel/debug/UB_BUS_CTL/eu_table + +What: /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/reg_info +Date: Oct 2025 +Contact: Junlong Zheng +Description: Display the register information for the specified queue of the designated + UB Bus controller. + +What: /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info +Date: Oct 2025 +Contact: Junlong Zheng +Description: Display the SQE and CQE contents of the specified MSQG for the designated + UB Bus controller. By default, the content of SQ entry 0 is displayed. + By writing the queue type and entry index to the properties file, you can + retrieve the content of the corresponding entry. + + Example:: + + Output the content of SQ entry3: + # echo 0 3 > /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info + # cat /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info + + Output the content of CQ entry5: + # echo 2 5 > /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info + # cat /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info \ No newline at end of file diff --git a/Documentation/ABI/testing/sysfs-bus-ub b/Documentation/ABI/testing/sysfs-bus-ub new file mode 100644 index 000000000000..f7b3193958b7 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-ub @@ -0,0 +1,427 @@ +What: /sys/bus/ub/cluster +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates the current system operating mode: + 1 for cluster mode, 0 for Standalone mode. + +What: /sys/bus/ub/instance +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the list of bus instances created in the current system. + By default, it starts from the first one. Writing numbers into + the file can change the starting position of the output bus + instance. + +What: /sys/bus/ub/drivers/.../bind +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing an entity number to this file will cause the driver + to attempt to bind to the entity. This is useful for + overriding default bindings. The entity number is + the same as found in /sys/bus/ub/devices/. + For example:: + + # echo 00002 > /sys/bus/ub/drivers/sample/bind + +What: /sys/bus/ub/drivers/.../unbind +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing an entity number to this file will cause the + driver to attempt to unbind from the entity. This may be + useful when overriding default bindings. The entity + number is the same as found in /sys/bus/ub/devices/. + For example:: + + # echo 00002 > /sys/bus/ub/drivers/sample/unbind + +What: /sys/bus/ub/drivers/.../new_id +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing a device ID to this file will attempt to + dynamically add a new device ID to a UB device driver. + This may allow the driver to support more hardware than + was included in the driver's static device ID support + table at compile time. The format for the device ID is: + VVVV DDDD MVVV MMMM CCCC MMMM PPPP. That is Vendor ID, + Device ID, Module Vendor ID, Module ID, Class, Class Mask + and Private Driver Data. The Vendor ID and Device ID fields + are required, the rest are optional. Upon successfully + adding an ID, the driver will probe for the device and + attempt to bind to it. + For example:: + + # echo cc08 a001 > /sys/bus/ub/drivers/sample/new_id + +What: /sys/bus/ub/drivers/.../remove_id +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing a device ID to this file will remove an ID + that was dynamically added via the new_id sysfs entry. + The format for the device ID is: + VVVV DDDD MVVV MMMM CCCC MMMM. That is Vendor ID, Device + ID, Module Vendor ID, Module ID, Class and Class Mask. + The Vendor ID and Device ID fields are required, the rest + are optional. After successfully removing an ID, + the driver will no longer support the device. + This is useful to ensure auto probing won't + match the driver to the device. + For example:: + + # echo cc08 a001 > /sys/bus/ub/drivers/sample/remove_id + +What: /sys/bus/ub/devices/.../class_code +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the class code type of the entity. + +What: /sys/bus/ub/devices/.../config +Date: Oct 2025 +Contact: Junlong Zheng +Description: + A channel is provided for user-mode programs to access the + entity configuration space. User programs can open the file + using the open system call and then perform read/write + operations on the configuration space using the pread/pwrite + system calls. + For details, please refer to the implementation of ubutils + . + +What: /sys/bus/ub/devices/.../device +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the Device ID of the entity. + +What: /sys/bus/ub/devices/.../device_reset +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing 1 to this file can trigger a device-level reset. All + entities below it will be reset. + Supported only by Entity0. + +What: /sys/bus/ub/devices/.../direct_link +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the link connection relationships and the peer + information of the ports connected to this entity. + +What: /sys/bus/ub/devices/.../driver_override +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This file allows the driver for a device to be specified which + will override standard static and dynamic ID matching. When + specified, only a driver with a name matching the value written + to driver_override will have an opportunity to bind to the + device. The override is specified by writing a string to the + driver_override file (echo sample > driver_override) and + may be cleared with an empty string (echo > driver_override). + This returns the device to standard matching rules binding. + Writing to driver_override does not automatically unbind the + device from its current driver or make any attempt to + automatically load the specified driver. If no driver with a + matching name is currently loaded in the kernel, the device + will not bind to any driver. This also allows devices to + opt-out of driver binding using a driver_override name such as + "none". Only a single driver may be specified in the override, + there is no support for parsing delimiters. + +What: /sys/bus/ub/devices/.../eid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the entity's EID. + +What: /sys/bus/ub/devices/.../entity_idx +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the index of the entity, numbered starting from 0. + +What: /sys/bus/ub/devices/.../guid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the GUID of the entity. + +What: /sys/bus/ub/devices/.../instance +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the EID of the bus instance bound to the entity. + +What: /sys/bus/ub/devices/.../kref +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the current reference count of the entity. + +What: /sys/bus/ub/devices/.../match_driver +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates whether the current entity is ready for driver + matching. Some entities require additional initialization work, + so this entry is provided to control the entity separately. + In this case, it is necessary to ensure a certain timing + sequence; For example, the driver should be loaded only after + this status of the entity is set to 1 to ensure that the driver + probe is correctly initiated. + +What: /sys/bus/ub/devices/.../numa +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This file contains the NUMA node to which the UB Entity is + attached, or -1 if the node is unknown. The initial value + comes from UBRT table, defined in the UB firmware specification. + +What: /sys/bus/ub/devices/.../primary_cna +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the primary compact network address of the entity. + +What: /sys/bus/ub/devices/.../primary_entity +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the entity number of the entity0 to which this entity + belongs. + +What: /sys/bus/ub/devices/.../mue_list +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display a list of all MUEs under this entity, excluding itself. + Only Entity0 has this attribute file. + +What: /sys/bus/ub/devices/.../reset +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing 1 to this file can trigger an entity-level reset, only + reset this entity, it will not affect other entities. + +What: /sys/bus/ub/devices/.../resource +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Information about the resource space of the entity is displayed, + with a total of 3 entries, each consisting of the following + three components: start_address, end_address, flags. + If all values are 0, it indicates that the resource space is + not supported. + +What: /sys/bus/ub/devices/.../resource +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Provide attribute files to the user-mode driver. Through the + open and mmap system calls, the resource space of an entity can + be mapped into the process space for direct access, thereby + improving the efficiency of cross-mode resource space access. + The memory attribute mapped by this interface is the device + attribute. + +What: /sys/bus/ub/devices/.../resource_wc +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Provide attribute files to the user-mode driver. Through the + open and mmap system calls, the resource space of an entity can + be mapped into the process space for direct access, thereby + improving the efficiency of cross-mode resource space access. + The memory attribute mapped by this interface is the + write-combine attribute. + +What: /sys/bus/ub/devices/.../sw_cap +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display whether forwarding capability is supported. + Only UBC supports it. + +What: /sys/bus/ub/devices/.../tid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the Token ID of the entity. The entity uses this + Token ID to access system memory. + +What: /sys/bus/ub/devices/.../type +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the type of the entity. The type is a subdomain segment + of GUID. + +What: /sys/bus/ub/devices/.../ubc +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the entity number of the UB controller associated with + the entity. + +What: /sys/bus/ub/devices/.../ub_numues +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the number of UEs that have been enabled for this + entity. Writing a value to the file enables the UEs. The written + value must be within the range of ub_totalues. Writing 0 + disables all UEs. + Only MUE supports this file. + +What: /sys/bus/ub/devices/.../ub_total_entities +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the number of all entities supported by this entity. + Only Entity0 supports this file. + +What: /sys/bus/ub/devices/.../ub_totalues +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the number of UEs owned by this entity. + Only MUE supports this file. + +What: /sys/bus/ub/devices/.../ue_list +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display a list of all UEs under this entity, excluding itself. + Only MUE has this attribute file. + +What: /sys/bus/ub/devices/.../upi +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display UPI of the entity. + +What: /sys/bus/ub/devices/.../vendor +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display Vendor ID of the entity. + +What: /sys/bus/ub/devices/.../msi_irqs/ +Date: Oct 2025 +Contact: Junlong Zheng +Description: + The /sys/bus/ub/devices/.../msi_irqs/ directory contains a + variable set of files, with each file being named after a + corresponding msi irq vector allocated to that entity. + +What: /sys/bus/ub/devices/.../msi_irqs/ +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This attribute indicates the mode that the irq vector named by + the file is in (msi vs. msix) + +What: /sys/bus/ub/devices/.../port/asy_link_width +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates whether the port supports asymmetric link width. + Supported only on physical port. + + +What: /sys/bus/ub/devices/.../port/boundary +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates whether the port is a boundary port. + +What: /sys/bus/ub/devices/.../port/cna +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the CNA of this port. + +What: /sys/bus/ub/devices/.../port/glb_qdlws +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Write 1 to enable global dynamic lane adjustment, + write 0 to disable this function. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/linkup +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display whether the port has established a connection. + +What: /sys/bus/ub/devices/.../port/neighbor +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Displays the entity number of the peer entity at the port. + If no link is established, it displays "No Neighbor". + +What: /sys/bus/ub/devices/.../port/neighbor_guid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + If there is a peer entity, display the GUID of the peer entity. + Otherwise, display "No Neighbor". + +What: /sys/bus/ub/devices/.../port/neighbor_port_idx +Date: Oct 2025 +Contact: Junlong Zheng +Description: + If there is a peer entity, the port index of the peer entity is + displayed. Otherwise, display "No Neighbor". + +What: /sys/bus/ub/devices/.../port/port_reset +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Supports individual port reset, triggered by writing 1. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/qdlws_exec_state +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Obtain the hardware execution status of the current dynamically + adjustable lane. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/rx_qdlws +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Enable/Disable dynamic lane adjustment in the RX direction. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/tx_qdlws +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Enable/Disable dynamic lane adjustment in the TX direction. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../slot/power +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This feature supports hot-plug notification. + Display the current slot status, the value can be "on", + "poweron", "poweroff", "off" or "unknown state". And can + write 1 to enable power on the slot, and write 0 to + power it off. + This file is supported only by entities that support + hot-plug features. diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst index 85be3e89ea81..d3a5969e6e94 100644 --- a/Documentation/driver-api/ub/index.rst +++ b/Documentation/driver-api/ub/index.rst @@ -11,6 +11,8 @@ The Linux UnifiedBus implementer's API guide Table of contents .. toctree:: - :maxdepth: 1 + :maxdepth: 2 - ubase + ubfi + ubus + ubase \ No newline at end of file diff --git a/Documentation/driver-api/ub/ubfi.rst b/Documentation/driver-api/ub/ubfi.rst new file mode 100644 index 000000000000..a7b0466bdca4 --- /dev/null +++ b/Documentation/driver-api/ub/ubfi.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +UBFI Driver Support Library +--------------------------- + +.. kernel-doc:: include/ub/ubfi/ubfi.h + :functions: diff --git a/Documentation/driver-api/ub/ubus.rst b/Documentation/driver-api/ub/ubus.rst new file mode 100644 index 000000000000..4e39b79e9e5a --- /dev/null +++ b/Documentation/driver-api/ub/ubus.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +UBUS Driver Support Library +----------------------------- + +.. kernel-doc:: include/ub/ubus/ubus.h + :functions: diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 34fd8d871f19..8e939a2ba8fe 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -10,4 +10,6 @@ UnifiedBus Subsystem .. toctree:: :maxdepth: 2 - ubase/index \ No newline at end of file + ubase/index + ubfi/index + ubus/index diff --git a/Documentation/ub/ubfi/index.rst b/Documentation/ub/ubfi/index.rst new file mode 100644 index 000000000000..2dd11600f4f7 --- /dev/null +++ b/Documentation/ub/ubfi/index.rst @@ -0,0 +1,11 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================= +UB Firmware Spec Driver +======================= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ubfi diff --git a/Documentation/ub/ubfi/ubfi.rst b/Documentation/ub/ubfi/ubfi.rst new file mode 100644 index 000000000000..efea335726b8 --- /dev/null +++ b/Documentation/ub/ubfi/ubfi.rst @@ -0,0 +1,178 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========== +UBFI Driver +=========== + +What is UBFI +============ + +When BIOS boots the OS with UB firmware, it should report the UB-related +information in the system so that the OS can obtain the UB-related information, +including UBC, UMMU, and all other information required for UB enabling. + +Startup information is related to chip specifications and is static information +that can be reported through a static information table. There are three +reporting methods: UBIOS, ACPI, and Device Tree. The only difference among these +three methods lies in the entry points for obtaining the UB-related information +tables. The contents of each information table remain consistent. + +UnifiedBus Firmware Interface (UBFI) driver supports obtaining UB-related +information from the BIOS via the ACPI table or device tree. And create software +instances of UBCs and UMMUs in the OS. + +UBFI driver is one of the fundamental drivers of UB. It has achieved the +aforementioned functions. + +.. code-block:: none + + +--------------------------------------------------------------+ + | BIOS | + +--------------------------------------------------------------+ + ^ ^ + |acpi of| + v v + +--------------------------------------------------------------+ + | kernel | + +--------------------------------------------------------------+ + ^ + | + v + +--------------------------------------------------------------+ + | ubfi | + +--------------------------------------------------------------+ + ^ ^ + | | + v v + +-----------------+ +-----------------+ + | ubus | | ummu | + +-----------------+ +-----------------+ + +What does UBFI do +================= + +When loading the ubfi driver, it detects the current OS boot mode and retrieves +the UBRT (UB root table) physical address from the BIOS. + + - ACPI (UBRT table) + - device tree (node: chosen: ubios-information-table) + +For the structure of UBRT, please refer to https://www.unifiedbus.com/ + +Create UBC +---------- + +BIOS may report information about multiple UBCs, some of which is shared among +multiple UBCs and is reported in ``struct ubrt_ubc_table`` + +.. kernel-doc:: drivers/ub/ubfi/ubc.h + :functions: ubrt_ubc_table + +As ``ubc_cna_start``, ``ubc_cna_end``, ``ubc_eid_start``, ``ubc_eid_end``, +``ubc_feature``, ``cluster_mode``, these attributes belong to the entire UBPU +node and are shared by all UBCs. + +For a single UBC, its information is reported in the ``struct ubc_node`` + +.. kernel-doc:: drivers/ub/ubfi/ubc.h + :functions: ubc_node + +We have performed the following work on a single UBC. + + - Create the UBC structure and record the UBC information + - Register the UBC irq with the kernel + - Initialize UBC and register the UBC device with the kernel + - Register the MMIO address space of UBC with the kernel + - Set the MSI domain for all UBCs + +After completing these steps, ``struct list_head ubc_list`` will be provided +externally, which records all UBCs within the node for subsequent +interconnection and communication purposes. + +Set MSI domain for UBC +~~~~~~~~~~~~~~~~~~~~~~ + +UBFI driver requests interrupts from the interrupt management subsystem on +behalf of the entity and delivers the interrupt configuration to the entity. +When reporting an interrupt, the entity writes the interrupt information into +the interrupt controller, which then calls back the interrupt management +subsystem. The interrupt management subsystem subsequently invokes the UB driver +to handle the corresponding interrupt. + +UB created a new Message Signaled Interrupt domain called USI (UB Signaled +Interrupt). + +UB will add a platform device in the DSDT and IORT tables to associate UBC +with the USI domain. If booting with device tree, we will add a new UBC node in +DTS for binding the USI domain. For each UBC, a corresponding number of platform +devices should be created. We will set the USI domain of these platform devices +to the USI domain of each UBC. + +Example in DTS for UBC:: + + ubc@N { + compatible = "ub,ubc"; + #interrupt-cells = <0x3>; + interrupt-parent = <0x01>; + interrupts = <0x0 0xa 0x4>; + index = <0x00>; + msi-parent = <0x1 0xabcd>; + }; + +Parse UMMU and PMU +------------------ + +Both UMMU and UMMU-PMU devices are platform devices and support creation via +ACPI and DTS. + +ACPI method: + - The device information for UMMU and UMMU-PMU has been added to DSDT and + IORT tables. + - When the OS enables ACPI functionality, the ACPI system will recognize + the device information in the DSDT and IORT tables and automatically + create platform devices for UMMU and UMMU-PMU. + - The number of platform devices for UMMU and UMMU-PMU depends on the + number of device information nodes described in the DSDT and IORT tables. + +DTS method: + - The DTB file has added device tree nodes for UMMU and UMMU-PMU. + - When the OS enables the device tree functionality, the DTS system will + recognize the device tree nodes for UMMU and UMMU-PMU, and then + automatically create platform devices for them. + - The number of platform devices for UMMU and UMMU-PMU depends on the + number of corresponding device tree nodes described in the device tree. + + Example in DTS for UMMU and UMMU-PMU:: + + ummu@N { + compatible = "ub,ummu"; + index = <0x0>; + msi-parent = <&its>; + }; + + ummu-pmu@N { + compatible = "ub,ummu_pmu"; + index = <0x0>; + msi-parent = <&its>; + }; + +Obtain UMMU nodes from the UBRT table: + - The UBRT table can be parsed to extract the UMMU sub-table, which contains + several UMMU nodes. Each UMMU node describes the hardware information of an + UMMU device and its corresponding UMMU-PMU device. The specific content of + UMMU nodes can be found in ``struct ummu_node``. + + - The number of UMMU platform devices created via ACPI or DTS should match the + number of UMMU nodes in the UBRT table, as they have a one-to-one + correspondence. The same one-to-one correspondence applies to UMMU-PMU + devices and UMMU nodes. + +Configure UMMU and PMU devices: + - For each UMMU node parsed from the UBRT table, the register information and + NUMA affinity described in the UMMU node can be configured for the + corresponding UMMU and UMMU-PMU devices. + - Each UMMU node's content is stored in the ``ubrt_fwnode_list`` linked list. + Subsequently, the corresponding UMMU node can be found by using the fwnode + property of the UMMU and UMMU-PMU devices, making it convenient to obtain the + hardware information during the initialization of the UMMU and UMMU-PMU + drivers. \ No newline at end of file diff --git a/Documentation/ub/ubus/hisi_ubus.rst b/Documentation/ub/ubus/hisi_ubus.rst new file mode 100644 index 000000000000..b384b058129f --- /dev/null +++ b/Documentation/ub/ubus/hisi_ubus.rst @@ -0,0 +1,95 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================== +Hisilicon UBUS Driver +===================== + +Hisilicon UBUS Driver (abbreviated as Hisi UBUS) is a UnifiedBus (UB) +specification management subsystem specifically implemented for Hisi chips. It +provides a subsystem operation interfaces implementation:: + + static const struct ub_manage_subsystem_ops hisi_ub_manage_subsystem_ops = { + .vendor = HISI_VENDOR_ID, + .controller_probe = ub_bus_controller_probe, + .controller_remove = ub_bus_controller_remove, + .ras_handler_probe = ub_ras_handler_probe, + .ras_handler_remove = ub_ras_handler_remove + }; + +including probe/remove methods for the UB bus controller and ub ras handler. +Each specification management subsystem has a unique vendor id to identify the +provider. This vendor id is set to the vendor field of +``ub_manage_subsystem_ops`` implementation. During UB bus controller probe, a +ub_bus_controller_ops will be set to the UB bus controller, message device and +debug file system will be initialized. During UB bus controller remove, ops +will be unset, message device will be removed and debug file system will be +uninitialized. + +During module init, hisi_ub_manage_subsystem_ops is registered to Ubus driver +via the ``register_ub_manage_subsystem_ops()`` method provided by Ubus driver:: + + int register_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) + +When module is being unloaded, Ubus driver's +``unregister_ub_manage_subsystem_ops()`` is called to unregister the subsystem +operation interfaces:: + + void unregister_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) + +Hisi UBUS Controller Driver +=========================== +Hisi UBUS provides a ub bus controller operation interfaces implementation:: + + static struct ub_bus_controller_ops hi_ubc_ops = { + .eu_table_init = hi_eu_table_init, + .eu_table_uninit = hi_eu_table_uninit, + .eu_cfg = hi_eu_cfg, + .mem_decoder_create = hi_mem_decoder_create, + .mem_decoder_remove = hi_mem_decoder_remove, + .register_ubmem_irq = hi_register_ubmem_irq, + .unregister_ubmem_irq = hi_unregister_ubmem_irq, + .register_decoder_base_addr = hi_register_decoder_base_addr, + .entity_enable = hi_send_entity_enable_msg, + }; + +including init/uninit method for EID/UPI table, create/remove method for UB +memory decoder, register/unregister method for UB memory decoder interrupts +and so on. + +UB Message Core Driver +====================== +Hisi UBUS implements a message device that provides a set of operations:: + + static struct message_ops hi_message_ops = { + .probe_dev = hi_message_probe_dev, + .remove_dev = hi_message_remove_dev, + .sync_request = hi_message_sync_request, + .response = hi_message_response, + .sync_enum = hi_message_sync_enum, + .vdm_rx_handler = hi_vdm_rx_msg_handler, + .send = hi_message_send, + }; + +including synchronous message sending, synchronous enumeration message +sending, response message sending, vendor-defined message reception handling +and so on. After device creation, ``message_device_register()`` method of Ubus +driver is called to register the device to the Ubus driver message framework:: + + int message_device_register(struct message_device *mdev) + +This framework provides a unified interface for message transmission and +reception externally. + +Hisi UBUS Local Ras Error Handler +================================= +Hisi UBUS provides a local RAS handling module to detect and process errors +reported on the UB bus. It offers error printing and registry dump, determines +whether recovery is needed based on error type and severity, and can reset +ports for port issues in cluster environment. + +UB Vendor-Defined Messages Manager +================================== +Hisi UBUS defines several vendor-defined messages, implements messages' +transmission and processing. These private messages are mainly used for +managing the registration, release, and state control of physical and +virtual devices. \ No newline at end of file diff --git a/Documentation/ub/ubus/index.rst b/Documentation/ub/ubus/index.rst new file mode 100644 index 000000000000..a4c2a58324cf --- /dev/null +++ b/Documentation/ub/ubus/index.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============= +UB BUS Driver +============= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ubus + ubus-service + hisi_ubus \ No newline at end of file diff --git a/Documentation/ub/ubus/ubus-service.rst b/Documentation/ub/ubus/ubus-service.rst new file mode 100644 index 000000000000..fd347fff959a --- /dev/null +++ b/Documentation/ub/ubus/ubus-service.rst @@ -0,0 +1,60 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================================== +UnifiedBus Bus Driver (UBUS Driver) Service +=========================================== + +The UnifiedBus (UB) specification describes RAS-related error handling and +notification-based hot-plug functionalities. The UBUS driver implements these +two types of functionalities as two independent services in software. This +article will separately introduce these two services. + +UB Device Driver Error Service +============================== +The UB specification defines three categories of protocol errors: A, B, and C. +Among these, A and B category protocol errors are directly handled by the +UB device driver, and thus will not be further discussed in this document. +C category protocol errors are reported to the UBUS Driver via the APEI +mechanism. The UBUS Driver provides a set of mechanisms for handling C category +protocol errors, which work in conjunction with the UB device driver to +complete the error handling process. + +The UBUS driver provides the ``struct ub_error_handlers`` structure, which +includes multiple callback functions related to error handling. The UB device +driver needs to implement these callback functions:: + + struct ub_error_handlers { + void (*ub_reset_prepare)(struct ub_entity *uent); + void (*ub_reset_done)(struct ub_entity *uent); + ub_ers_result_t (*ub_error_detected)(struct ub_entity *uent, ub_channel_state_t state); + ub_ers_result_t (*ub_resource_enabled)(struct ub_entity *uent); + }; + +For UB device driver: + + - ub_reset_prepare is called before ELR, serving to notify the device driver to + prepare for the work before ELR + - ub_reset_done is called after ELR, serving to notify the device driver that + ELR has completed and services can be resumed + - ub_error_detected is called when the UB bus driver detects an error, serving + to notify the UB device driver of the occurrence of an error + - ub_resource_enabled is called after the UB bus driver has completed error + handling, serving to notify the UB device driver that error handling has + completed + +Hot-Plug Service +================ +The UB specification defines the hot-plug functionality for devices, which +requires coordination between software and hardware. The UBUS driver implements +the hot removal and hot insertion of external devices on a per-slot basis. +For detailed procedures, please refer to the UB specification document. The main +functional points implemented by the UBUS driver include: + + - Button event handling, completing the processing of hot-plug and + hot-unplug button messages + - Indicator control, switching different knowledge points based on the + device status + - Power control, performing power on/off operations for slots based on the + device status + - Providing a user-space sysfs interface to simulate button effects + according to user commands diff --git a/Documentation/ub/ubus/ubus.rst b/Documentation/ub/ubus/ubus.rst new file mode 100644 index 000000000000..e7176e98732e --- /dev/null +++ b/Documentation/ub/ubus/ubus.rst @@ -0,0 +1,312 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================================== +How To Write Linux UB Device Drivers +====================================== + +UnifiedBus (abbreviated as UB) is an interconnection technology and +architecture designed for computing systems. It unifies the communication +between IO, memory access, and various processing units within the same +interconnection technology framework, enabling high-performance data transfer, +unified resource management, efficient collaboration, and effective programming +in computing systems. Resource management is one of its key features, +implemented through a combination of software and hardware. The UB Bus Driver +(referred to as the UBUS Driver) implements the software portion of this +feature. This document provides a brief overview of the components within the +UBUS Driver framework and how to develop UB device drivers within this driver +framework. See more on the UB spec . + +Composition of the UBUS Driver +============================== +The UBUS Driver consists of two parts. The first part is the common +implementation section, which will be developed according to the UB +specification requirements. The second part is the proprietary implementation by +each manufacturer, which is based on the specific circuit designs of each host +manufacturer. Each host manufacturer can provide differentiated functionalities +in this part of the code. + +If the UBUS subsystem is not configured (CONFIG_UB_UBUS is not set), most of +the UBUS functions described below are defined as inline functions either +completely empty or just returning an appropriate error codes to avoid +lots of ifdefs in the drivers. + +The figure below illustrates the internal composition and system boundaries of +the UBUS Driver. + +.. code-block:: none + + +----------------------------------------------------------+ + | ub device driver | + +----------------------------------------------------------+ + ^ + | + v + +----------------------------------------------------------+ + | ubus driver | + | | + | +--------------------------------------------------+ | + | | ubus driver vendor-specific | | + | +--------------------------------------------------+ | + | | + | +--------------------------------------------------+ | + | | ubus driver common | | + | | | | + | | +------+ +--------+ +------+ +-------+ +-----+ | | +---------+ + | | | enum | | config | | port | | route | | msg | | | <-> | GIC/ITS | + | | +------+ +--------+ +------+ +-------+ +-----+ | | +---------+ + | | +------------+ +--------+ +-----------+ | | + | | | controller | | entity | | interrupt | | | +------------+ + | | +------------+ +--------+ +-----------+ | | <-> | IOMMU/UMMU | + | | +---------+ +----------+ +------+ +----------+ | | +------------+ + | | | decoder | | resource | | pool | | instance | | | + | | +---------+ +----------+ +------+ +----------+ | | + | | +-----+ +------+ +-------+ +---------+ +-------+ | | + | | | ras | | link | | reset | | hotplug | | sysfs | | | + | | +-----+ +------+ +-------+ +---------+ +-------+ | | + | | +------+ +---------------+ | | + | | | ubfi | | bus framework | | | + | | +------+ +---------------+ | | + | +--------------------------------------------------+ | + +----------------------------------------------------------+ + ^ + | + v + +----------------------------------------------------------+ + | hardware/firmware | + +----------------------------------------------------------+ + +The following briefly describes the functions of each submodule within the +UBUS driver: + + - enum: implement network topology scanning and device enumeration + functionality + - config: enable access to the device configuration space + - port: manage device ports + - route: implement the configuration of the routing table + - msg: implement message assembly and transmission/reception processing + for management messages + - controller: initialization and de-initialization of the UB controller + - entity: enable device configuration, multi-entity management, and other + functionalities + - interrupt: implement USI interrupt functionality + - decoder: implement address decoding functionality for MMIO access to + device resource space + - resource: manage the MMIO address space allocated by the user host to + the device + - pool: implementation of pooled message processing + - instance: implement bus instance management + - ras: implement handling for RAS exceptions + - link: implement processing of link messages + - reset: implement the reset function + - hotplug: enable hot-plug functionality for the device + - sysfs: implement sysfs attribute files + - ubfi: implement parsing of the UBRT table + - bus framework: implementation of the Ubus Driver Framework + +Structure of UB device driver +============================= +In Linux, the ``ub_driver`` structure is used to describe a UB device driver. +The `struct ub_driver` is employed to represent a UB device driver, and +the structure definition is as follows. + +.. kernel-doc:: include/ub/ubus/ubus.h + :functions: ub_driver + +This structure includes a matchable device table (`id_table`), a probe function, +a remove function, a shutdown function, error handling, and other functionalities. +The following content will provide a reference for the implementation of these +features. + +Rules for Device and Driver Matching +------------------------------------ +The matching rules for UnifiedBus devices and drivers are relatively flexible, +allowing for any combination of the following five matching entries in the +`struct ub_device_id` within the device driver to achieve the target matching rule: + + - GUID's Vendor ID + - GUID's Device ID + - Configuration Space Module Vendor ID + - Configuration Space Module ID + - Configuration Space Class Code + +The ID table is an array of ``struct ub_device_id`` entries ending with an +all-zero entry. Definitions with static const are generally preferred. + +.. kernel-doc:: include/linux/mod_devicetable.h + :functions: ub_device_id + +Most drivers only need ``UB_ENTITY()`` or ``UB_ENTITY_MODULE`` or +``UB_ENTITY_CLASS()`` to set up a ub_device_id table. + +The following is an example:: + + static const struct ub_device_id sample_tbl[] = { + { 0xCC08, 0xA001, UB_ANY_ID, UB_ANY_ID, 0, 0 }, + { UB_ENTITY(0xCC08, 0xA001), 0, 0 }, + { UB_ENTITY_MODULE(0xCC08, 0xA001, 0xCC08, 0xA001), 0, 0 }, + { UB_ENTITY_CLASS(0x0200, 0xffff) }, + }; + +New UB IDs may be added to a device driver ub_ids table at runtime +as shown below:: + + echo "vendor device modulevendor moduleid class class_mask driver_data" > \ + /sys/bus/ub/drivers/sample/new_id + +All fields are passed in as hexadecimal values (no leading 0x). +The vendor and device fields are mandatory, the others are optional. Users +need pass only as many optional fields as necessary: + + - modulevendor and moduledevice fields default to UB_ANY_ID (FFFFFFFF) + - class and classmask fields default to 0 + - driver_data defaults to 0UL. + - override_only field defaults to 0. + +Note that driver_data must match the value used by any of the ub_device_id +entries defined in the driver. This makes the driver_data field mandatory +if all the ub_device_id entries have a non-zero driver_data value. + +Once added, the driver probe routine will be invoked for any unclaimed +UB devices listed in its (newly updated) ub_ids list. + +Register UB Device Driver +------------------------- +The UB device driver uses `ub_register_driver` to register the device driver. +During the registration process, the matching between the device and the +driver will be triggered, with the matching rules referenced in the previous +section. + +UB Device Driver Probe Process Reference +---------------------------------------- +- Call `ub_set_user_info` to configure the user host information into the entity + Each entity's configuration space has corresponding user register + information, such as user EID, token ID, etc. Before the device driver + starts using the device, it needs to configure the user host information + for the device. + +- Call `ub_entity_enable` to configure the access path between the host and the device + Before using the device, you need to enable the bidirectional channel + switch for accessing the device from the user host and vice versa. + This is achieved by configuring the device configuration space registers. + +- Set the DMA mask size + The device driver can reconfigure this field segment based on the + device's DMA addressing capability. The default configuration is 32-bit. + +- Call the kernel DMA interface to request DMA memory + The device driver requests DMA memory through the DMA interface provided + by the kernel to prepare for subsequent device DMA operations. + +- Call `ub_iomap` to complete the MMIO access mapping for the resource space + The device resource space stores private configurations related to device + driver capabilities. Before accessing the device resource space, you need + to call the ioremap interface to complete address mapping. The ub_iomap + interface uses the device attribute, while the ub_iomap_wc interface + uses the writecombine attribute. + +- Call `ub_alloc_irq_vectors` or `ub_alloc_irq_vectors_affinity` to complete + the interrupt request, and then call the kernel's interrupt registration API. + +- Initiate specific business functions + +UB Device Driver Removal Process Reference +------------------------------------------ +- Stop specific business functions +- Invoke the kernel's interrupt unregistration API, call ub_disable_intr, to + complete the unregistration of the interrupt handler and release the interrupt +- Call ub_iounmap to demap the MMIO access space +- Invoke the kernel's DMA interface to release DMA memory +- Call ub_entity_enable to close the access path between the host and the device +- Call ub_unset_user_info to clear the user host information configured to the + entity + +UB Device Driver Shutdown +------------------------- +The UB device shutdown is triggered during the system shutdown or restart +process, and the UB device driver needs to stop the service flow in the shutdown +interface. + +UB Device Driver Virtual configure +---------------------------------- + +If the MUE supports multiple UEs, the device driver needs to provide +`virt_configure` callback. the UEs can be enabled or disabled to facilitate +direct connection to virtual machines for use. The bus driver will cyclically +call the virt_configure callback of the device driver to enable and disable +each UE in sequence. Within the virt_configure function of the device driver, +it needs to call `ub_enable_ue` and `ub_disable_ue` provided by the bus driver +to create and destroy UEs, at the same time, private processing logic can +also be executed. + +UE can be enabled and disabled through sysfs. The process is as follows:: + + 1. Check the number of UEs currently supported by the MUE + # cat /sys/bus/ub/devices/.../ub_totalues + 2. Specify the number of enabled UEs within the maximum UE quantity range + # echo 3 > /sys/bus/ub/devices/.../ub_numues + 3. Disable UEs + # echo 0 > /sys/bus/ub/devices/.../ub_numues + +UB Device Driver Virtual notify +------------------------------- + +If the device supports multiple UEs and the MUE device driver wants to be +aware of UE state changes, `virt_notify` hook function can be implemented to +capture the UE state. + +UB Device Driver Activate and Deactivate +---------------------------------------- + +The bus driver supports maintaining the working status of entities, indicating +whether an entity is in operation. It also provides corresponding interfaces +for controlling devices to enter or exit the working state, such as +`ub_activate_entity` and `ub_deactivate_entity`. If the device driver needs +to perform any special procedures, it must implement the corresponding activate +and deactivate hook functions. + +UB Device Driver RAS handler +---------------------------- + +The bus driver provides a set of hooks for RAS processing, creating an +opportunity window to notify device drivers when handling events such as +resets and RAS, allowing them to execute corresponding processing measures. +Currently implemented hooks include `reset_prepare`, `reset_done`, +`error_detected`, and `resource_enabled`. Device drivers can optionally +provide corresponding implementations to execute their own private processing. + +Uninstall UB Device Driver +-------------------------- +The UB device driver uses `ub_unregister_driver` to unregister the driver. This +interface call will perform a remove operation on all devices matched by the +driver, ultimately removing the UB device driver from the system. + +How to find UB devices manually +=============================== + +UBUS provides several interfaces to obtain ub_entities. You can search for them +using keywords such as GUID, EID, or entity number. Or you can find an entire +class of devices using vendor ID and device ID. + +How to access UB Configuration space +==================================== + +You can use `ub_config_(read|write)_(byte|word|dword)` to access the config +space of an entity represented by `struct ub_entity *`. All these functions return +0 when successful or an error code. Most drivers expect that accesses to valid UB +entities don't fail. + +The macros for configuration space registers are defined in the header file +include/uapi/ub/ubus/ubus_regs.h. + +Vendor and device identifications +================================= + +Do not add new device or vendor IDs to include/ub/ubus/ubus_ids.h unless they +are shared across multiple drivers. You can add private definitions in +your driver if they're helpful, or just use plain hex constants. + +The device IDs are arbitrary hex numbers (vendor controlled) and normally used +only in a single location, the ub_device_id table. + +Please DO submit new vendor/device IDs to . +There's a mirror of the ub.ids file at https://gitee.com/openeuler/ubutils/ub.ids. -- Gitee From 4c19d1d2d316abe494b9a51e7f9c81c9781f3371 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Thu, 27 Nov 2025 09:28:01 +0800 Subject: [PATCH 109/243] ub:ubus: Add ubus devicetree file commit 575bfc910e2219b710f60f49ea1fc22543e85b46 openEuler Define the attributes and attribute values of device nodes such as UB Controller to ensure that the kernel can correctly parse and use these attributes. Signed-off-by: Yahui Liu Signed-off-by: Yuhao Xiang Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: zhaolichang <943677312@qq.com> --- .../devicetree/bindings/ub/hisi,ubc.yaml | 35 ++++++++++++ .../devicetree/bindings/ub/ub,ubc.yaml | 55 +++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 Documentation/devicetree/bindings/ub/hisi,ubc.yaml create mode 100644 Documentation/devicetree/bindings/ub/ub,ubc.yaml diff --git a/Documentation/devicetree/bindings/ub/hisi,ubc.yaml b/Documentation/devicetree/bindings/ub/hisi,ubc.yaml new file mode 100644 index 000000000000..2219dd7902c1 --- /dev/null +++ b/Documentation/devicetree/bindings/ub/hisi,ubc.yaml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ub/hisi,ubc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon UBC (Unified Bus Controller) platform device + +maintainers: + - Yuhao Xiang + +description: |+ + This platform device was added to enable the automatic loading of the + hisi_ubus driver. If this feature is not needed, you can omit adding this + device and manually load the driver instead. + +properties: + $nodename: + pattern: "^hisi-ubc$" + description: | + The node name should be "hisi-ubc". + + compatible: + const: "hisi,ubc" + description: | + The compatible property should be "hisi,ubc" to identify the device as a + HiSilicon UBC platform device. + +unevaluatedProperties: false + +examples: + - |+ + hisi-ubc { + compatible = "hisi,ubc"; + }; diff --git a/Documentation/devicetree/bindings/ub/ub,ubc.yaml b/Documentation/devicetree/bindings/ub/ub,ubc.yaml new file mode 100644 index 000000000000..012a293cf9e2 --- /dev/null +++ b/Documentation/devicetree/bindings/ub/ub,ubc.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ub/ub,ubc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: UBC (Unified Bus Controller) platform device + +maintainers: + - Yuhao Xiang + +description: | + The UBC platform device reported UBC interrupt number and the association + between the ubc and the interrupt controller. + +properties: + $nodename: + pattern: "^ubc@[0-9a-f]*" + + compatible: + const: "ub,ubc" + + interrupts: + maxItems: 1 + description: | + The interrupt specifier for the UBC. Used by the msgq of the ub controller. + + index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + The index of the UBC. This is used to identify the specific UBC + in a system with multiple UBC devices. Starts from 0. + + msi-parent: + description: The msi interrupt for the UBC. Used by the ub entity connected + to UBC. + +required: + - compatible + - interrupts + - index + - msi-parent + +unevaluatedProperties: true + +examples: + - |+ + ubc@0 { + compatible = "ub,ubc"; + #interrupt-cells = <0x3>; + interrupt-parent = <0x01>; + interrupts = <0x00 0xcb 0x4>; + index = <0x00>; + msi-parent = <&its 0x54c0>; + }; -- Gitee From 2619d8d6996d0bcc83fbb28182703d07d48a82ed Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 24 Nov 2025 14:03:22 +0800 Subject: [PATCH 110/243] ub: cdma: add CDMA driver-api documentation description commit 27d2c29e4512954c7efcee4fa62b13fd67ce0f93 openEuler This patch add CDMA driver-api documentation description Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/driver-api/ub/cdma.rst | 190 ++++++++++++++++++++++ Documentation/driver-api/ub/index.rst | 5 +- drivers/ub/cdma/cdma_api.c | 219 ++++++++++++++++++++++++++ include/ub/cdma/cdma_api.h | 84 +++++++++- 4 files changed, 492 insertions(+), 6 deletions(-) create mode 100644 Documentation/driver-api/ub/cdma.rst diff --git a/Documentation/driver-api/ub/cdma.rst b/Documentation/driver-api/ub/cdma.rst new file mode 100644 index 000000000000..784962a71af5 --- /dev/null +++ b/Documentation/driver-api/ub/cdma.rst @@ -0,0 +1,190 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============================ +CDMA Driver Support Library +============================ + +Overview +========= +CDMA (Crystal Direct Memory Access) is used to provide asynchronous memory read +and write operations between hosts or between host and devices. + +The key features are described as follows: + ++ 1. Peer-to-peer communication between hosts, enabling bidirectional asynchronous memory read or write. ++ 2. Asynchronous memory read and write between host and devices via DMA. ++ 3. Asynchronous memory read and write between devices and host via DMA. + +This document aims to provide a guide for device driver developers on the CDMA +driver API, as well as how to use it for asynchronous memory read and write +operations between hosts in CDMA. + +CDMA Interface Operation +========================== +The API of the CDMA framework does not support arbitrary concurrent calls. +For example, using a Queue object and destroying the Queue concurrently can lead +to unexpected exceptions. +Users are required to ensure the correctness of the call logic. These objects +include context, segment, queue, etc. + +.. kernel-doc:: include/ub/cdma/cdma_api.h + :functions: + +.. kernel-doc:: drivers/ub/cdma/cdma_api.c + :export: + +CDMA API Sample +================= + +DMA Resource Sample +----------------------- +.. code-block:: c + + #define POLL_LOOP_EXAMP 100 + #define POLL_MSLEEP_EXAMP 1 + #define QUEUE_DEPTH_EXAMP 512 + #define QUEUE_RMT_EID_EXAMP 2 + #define QUEUE_DCAN_EXAMP 1 + + struct dma_seg_cfg local_seg_cfg = {}; + struct dma_seg_cfg rmt_seg_cfg = {}; + struct dma_seg *local_seg, *rmt_seg; + struct queue_cfg queue_cfg = {}; + int ctx_handle, queue_handle; + struct dma_device *dev_list; + struct dma_device *dma_dev; + u32 loop = POLL_LOOP_EXAMP; + struct dma_cr ret_cr = {}; + dma_status status; + int ret = -EINVAL; + u32 dev_num = 0; + + dev_list = dma_get_device_list(&dev_num); + if (!dev_list || !dev_num) { + printk("get device list failed\n"); + return; + } + dma_dev = &dev_list[0]; + + ctx_handle = dma_create_context(dma_dev); + if (ctx_handle < 0) { + printk("create context failed, ctx_handle: %d.\n", ctx_handle); + goto free_dev_list; + } + + queue_cfg.queue_depth = QUEUE_DEPTH_EXAMP; + queue_cfg.rmt_eid.dw0 = QUEUE_RMT_EID_EXAMP; + queue_cfg.dcna = QUEUE_DCAN_EXAMP; + queue_handle = dma_alloc_queue(dma_dev, ctx_handle, &queue_cfg); + if (queue_handle < 0) { + printk("allocate queue failed, queue_handle: %d.\n", queue_handle); + goto delete_ctx; + } + + /* Input parameter, local payload address */ + local_seg_cfg.sva = (u64)local_buf_addr; + /* Input parameter, local payload memory length */ + local_seg_cfg.len = local_buf_len; + + local_seg = dma_register_seg(dma_dev, ctx_handle, &local_seg_cfg); + if (!local_seg) { + printk("register local segment failed.\n"); + goto free_queue; + } + + /* Input parameter, remote payload address */ + rmt_seg_cfg.sva = (u64)rmt_buf_addr; + /* Input parameter, remote payload memory length */ + rmt_seg_cfg.len = rmt_buf_len; + + rmt_seg = dma_import_seg(&rmt_seg_cfg); + if (!rmt_seg) { + printk("import rmt segment failed.\n"); + goto unregister_seg; + } + + status = dma_write(dma_dev, rmt_seg, local_seg, queue_handle); + if (status != DMA_STATUS_OK) { + printk("write failed, status = %d.\n", status); + goto unimport_seg; + } + + while (loop > 0) { + ret = dma_poll_queue(dma_dev, queue_handle, 1, &ret_cr); + if (ret == 1) + break; + msleep(POLL_MSLEEP_EXAMP); + loop --; + } + ... + + unimport_seg: + dma_unimport_seg(rmt_seg); + unregister_seg: + dma_unregister_seg(dma_dev, local_seg); + free_queue: + dma_free_queue(dma_dev, queue_handle); + delete_ctx: + dma_delete_context(dma_dev, ctx_handle); + free_dev_list: + dma_free_device_list(dev_list, dev_num); + ... + +/* Register the virtual kernel online interface to notify users that + * the kernel-mode CDMA driver is online. + */ +DMA Client Sample +------------------- + +.. code-block:: c + + /* After the driver is loaded or restarted upon reset, the add + * interface is called to allow users to request resources + * required for DMA. + */ + static int example_add(u32 eid) + { + /* Refer to DMA Resource Sample, create context, queue, segment + * dma_get_device_list, dma_create_context, dma_alloc_queue etc. + */ + return 0; + } + + /* The stop interface is used to notify users to stop using the + * DMA channel. + */ + static void example_remove(u32 eid) + { + /* Refer to DMA Resource Sample, delete context, queue, segment + * dma_free_queue dma_delete_context dma_free_device_list etc. + */ + } + + /* The remove interface is used to notify users to delete resources + * under DMA. + */ + static void example_stop(u32 eid) + { + /* Stop read and write operations through status control */ + } + + static struct dma_client example_client = { + .client_name = "example", + .add = example_add, + .remove = example_remove, + .stop = example_stop, + }; + + static void example_register_client(u32 eid) + { + ... + dma_register_client(&example_client); + ... + } + +Support +======== +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst index d3a5969e6e94..5738694649be 100644 --- a/Documentation/driver-api/ub/index.rst +++ b/Documentation/driver-api/ub/index.rst @@ -11,8 +11,9 @@ The Linux UnifiedBus implementer's API guide Table of contents .. toctree:: - :maxdepth: 2 + :maxdepth: 4 ubfi ubus - ubase \ No newline at end of file + ubase + cdma diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index cc3aa6ce4921..ae84210c1f97 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -20,6 +20,16 @@ LIST_HEAD(g_client_list); DECLARE_RWSEM(g_clients_rwsem); DECLARE_RWSEM(g_device_rwsem); +/** + * dma_get_device_list - Get DMA device list + * @num_devices: DMA device number + * + * Users can perform subsequent resource creation operations using a pointer + * to a DMA device in the list. + * + * Context: Process context. + * Return: address of the first device in the list + */ struct dma_device *dma_get_device_list(u32 *num_devices) { struct cdma_device_attr *attr; @@ -73,6 +83,16 @@ struct dma_device *dma_get_device_list(u32 *num_devices) } EXPORT_SYMBOL_GPL(dma_get_device_list); +/** + * dma_free_device_list - Free DMA device list + * @dev_list: DMA device list + * @num_devices: DMA device number + * + * It can be called after using dev_list and must be called. + * + * Context: Process context. + * Return: NA + */ void dma_free_device_list(struct dma_device *dev_list, u32 num_devices) { int ref_cnt; @@ -97,6 +117,15 @@ void dma_free_device_list(struct dma_device *dev_list, u32 num_devices) } EXPORT_SYMBOL_GPL(dma_free_device_list); +/** + * dma_get_device_by_eid - Get the specified EID DMA device + * @eid: Device eid pointer + * + * Choose one to use with the dma_get_device_list function. + * + * Context: Process context. + * Return: DMA device structure pointer + */ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) { struct cdma_device_attr *attr; @@ -146,6 +175,16 @@ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) } EXPORT_SYMBOL_GPL(dma_get_device_by_eid); +/** + * dma_create_context - Create DMA context + * @dma_dev: DMA device pointer + * + * The context is used to store resources such as Queue and Segment, and + * returns a pointer to the context information. + * + * Context: Process context. + * Return: DMA context ID value + */ int dma_create_context(struct dma_device *dma_dev) { struct cdma_ctx_res *ctx_res; @@ -189,6 +228,13 @@ int dma_create_context(struct dma_device *dma_dev) } EXPORT_SYMBOL_GPL(dma_create_context); +/** + * dma_delete_context - Delete DMA context + * @dma_dev: DMA device pointe + * @handle: DMA context ID value + * Context: Process context. + * Return: NA + */ void dma_delete_context(struct dma_device *dma_dev, int handle) { struct cdma_ctx_res *ctx_res; @@ -227,6 +273,17 @@ void dma_delete_context(struct dma_device *dma_dev, int handle) } EXPORT_SYMBOL_GPL(dma_delete_context); +/** + * dma_alloc_queue - Alloc DMA queue + * @dma_dev: DMA device pointer + * @ctx_id: DMA context ID + * @cfg: DMA queue configuration information pointer + * + * The user uses the queue for DMA read and write operations. + * + * Context: Process context. + * Return: DMA queue ID value + */ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cfg) { struct cdma_ctx_res *ctx_res; @@ -285,6 +342,13 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cf } EXPORT_SYMBOL_GPL(dma_alloc_queue); +/** + * dma_free_queue - Free DMA queue + * @dma_dev: DMA device pointer + * @queue_id: DMA queue ID + * Context: Process context. + * Return: NA + */ void dma_free_queue(struct dma_device *dma_dev, int queue_id) { struct cdma_ctx_res *ctx_res; @@ -318,6 +382,18 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) } EXPORT_SYMBOL_GPL(dma_free_queue); +/** + * dma_register_seg - Register local segment + * @dma_dev: DMA device pointer + * @ctx_id: DMA context ID + * @cfg: DMA segment configuration information pointer + * + * The segment stores local payload information for operations such as DMA + * read and write, and returns a pointer to the segment information. + * + * Context: Process context. + * Return: DMA segment structure pointer + */ struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, struct dma_seg_cfg *cfg) { @@ -390,6 +466,13 @@ struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, } EXPORT_SYMBOL_GPL(dma_register_seg); +/** + * dma_unregister_seg - Unregister local segment + * @dma_dev: DMA device pointer + * @dma_seg: DMA segment pointer + * Context: Process context. + * Return: NA + */ void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg) { struct cdma_ctx_res *ctx_res; @@ -426,6 +509,16 @@ void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg) } EXPORT_SYMBOL_GPL(dma_unregister_seg); +/** + * dma_import_seg - Import the remote segment + * @cfg: DMA segment configuration information pointer + * + * The segment stores the remote payload information for operations such as + * DMA read and write, and returns the segment information pointer. + * + * Context: Process context. + * Return: DMA segment structure pointer + */ struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg) { if (!cfg || !cfg->sva || !cfg->len) @@ -435,6 +528,12 @@ struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg) } EXPORT_SYMBOL_GPL(dma_import_seg); +/** + * dma_unimport_seg - Unimport the remote segment + * @dma_seg: DMA segment pointer + * Context: Process context. + * Return: NA + */ void dma_unimport_seg(struct dma_seg *dma_seg) { if (!dma_seg) @@ -481,6 +580,22 @@ static int cdma_param_transfer(struct dma_device *dma_dev, int queue_id, return 0; } +/** + * dma_write - DMA write operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * + * Invoke this interface to initiate a unilateral write operation request, + * sending the specified number of bytes of data from the designated local + * memory starting position to the specified destination address. + * Once the data is successfully written to the remote node, the application + * can poll the queue to obtain the completion message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id) { @@ -505,6 +620,23 @@ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_write); +/** + * dma_write_with_notify - DMA write with notify operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * @data: notify data for write with notify operation + * + * Invoke this interface to initiate a write notify operation request for a + * unilateral operation, which sends a specified number of bytes of data from a + * designated starting position in local memory to a specified destination address. + * Once the data is successfully read from the remote node into local memory, + * the application can poll the queue to obtain the completion message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, @@ -531,6 +663,22 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, } EXPORT_SYMBOL_GPL(dma_write_with_notify); +/** + * dma_read - DMA read operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * + * Invoke this interface to initiate a unidirectional read operation request, + * reading data from the specified remote address to the designated local cache + * starting position. + * Once the data is successfully read from the remote node to the local memory, + * the application can poll the queue to obtain the completion message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id) { @@ -555,6 +703,21 @@ enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_read); +/** + * dma_cas - DMA cas operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * @data: compare data and swap data for cas operaion + * + * Initiate a request for a unilateral atomic CAS operation. Once the operation + * is successful, the application can poll the queue to obtain the completion + * message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, struct dma_cas_data *data) @@ -580,6 +743,21 @@ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_cas); +/** + * dma_faa - DMA faa operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * @add: add data for faa operation + * + * Initiate a request for a unilateral atomic FAA operation. Once the operation + * is successful, the application can poll the queue to obtain the completion + * message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, u64 add) { @@ -604,6 +782,23 @@ enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_faa); +/** + * dma_poll_queue - DMA polling queue + * @dma_dev: DMA device pointer + * @queue_id : DMA queue ID + * @cr_cnt: number of completion record + * @cr: completion record pointer + * + * Poll the DMA channel completion event, and the polling result is returned to + * the address specified by the parameter cr. + * The cr data structure includes information such as the result of the request + * execution, the length of data transferred, and the type of error. + * The caller must ensure that the number of parameters cr_cnt matches the number + * of addresses specified by cr. + * + * Context: Process context. + * Return: Polling operation results >0 on success, others on failed + */ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { @@ -639,6 +834,21 @@ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, } EXPORT_SYMBOL_GPL(dma_poll_queue); +/** + * dma_register_client - DMA register client + * @client: DMA device client pointer + * + * Register the management software interface to notify the management software + * that the DMA driver is online. After loading or resetting and restarting the + * driver, call the add interface to notify the management software to request + * the resources required by DMA. When the driver is reset, deregistered, or + * unloaded, call the stop interface to notify the management software to stop + * using the DMA channel, and then call the remove interface to notify the + * management software to delete the DMA resources. + * + * Context: Process context. + * Return: operation result, 0 on success, others on failed + */ int dma_register_client(struct dma_client *client) { struct cdma_dev *cdev = NULL; @@ -677,6 +887,15 @@ int dma_register_client(struct dma_client *client) } EXPORT_SYMBOL_GPL(dma_register_client); +/** + * dma_unregister_client - DMA unregister client + * @client: DMA device client pointer + * + * Unregister the management software interface, and delete client resources + * + * Context: Process context. + * Return: NA + */ void dma_unregister_client(struct dma_client *client) { struct cdma_dev *cdev = NULL; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 61449ab9ee26..51acd722a74d 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -7,6 +7,14 @@ #include #include +/** + * struct dma_device - DMA device structure + * @attr: CDMA device attribute info: EID, UPI etc + * @ref_cnt: reference count for adding a context to device + * @private_data: cdma context resoucres pointer + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_device { struct cdma_device_attr attr; atomic_t ref_cnt; @@ -22,17 +30,35 @@ enum dma_cr_opcode { DMA_CR_OPC_WRITE_WITH_IMM, }; +/** + * union dma_cr_flag - DMA completion record flag + * @bs: flag bit value structure + * @value: flag value + */ union dma_cr_flag { struct { - u8 s_r : 1; - u8 jetty : 1; - u8 suspend_done : 1; - u8 flush_err_done : 1; + u8 s_r : 1; /* indicate CR stands for sending or receiving */ + u8 jetty : 1; /* indicate id in the CR stands for jetty or JFS */ + u8 suspend_done : 1; /* suspend done flag */ + u8 flush_err_done : 1; /* flush error done flag */ u8 reserved : 4; } bs; u8 value; }; +/** + * struct dma_cr - DMA completion record structure + * @status: completion record status + * @user_ctx: user private data information, optional + * @opcode: DMA operation code + * @flag: completion record flag + * @completion_len: the number of bytes transferred + * @local_id: local JFS ID + * @remote_id: remote JFS ID, not in use for now + * @tpn: transport number + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_cr { enum dma_cr_status status; u64 user_ctx; @@ -46,6 +72,16 @@ struct dma_cr { u32 rsvd[4]; }; +/** + * struct queue_cfg - DMA queue config structure + * @queue_depth: queue depth + * @priority: the priority of JFS, ranging from [0, 15] + * @user_ctx: user private data information, optional + * @dcna: remote device CNA + * @rmt_eid: remote device EID + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct queue_cfg { u32 queue_depth; u8 priority; @@ -57,6 +93,17 @@ struct queue_cfg { u32 rsvd[6]; }; +/** + * struct dma_seg - DMA segment structure + * @handle: segment recouse handle + * @sva: payload virtual address + * @len: payload data length + * @tid: payload token id + * @token_value: not used for now + * @token_value_valid: not used for now + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_seg { u64 handle; u64 sva; @@ -77,6 +124,11 @@ struct dma_seg_cfg { u32 rsvd[4]; }; +/** + * struct dma_context - DMA context structure + * @dma_dev: DMA device pointer + * @tid: token id for segment + */ struct dma_context { struct dma_device *dma_dev; u32 tid; /* data valid only in bit 0-19 */ @@ -87,6 +139,13 @@ enum dma_status { DMA_STATUS_INVAL, }; +/** + * struct dma_cas_data - DMA CAS data structure + * @compare_data: compare data, length <= 8B: CMP value, length > 8B: data address + * @swap_data: swap data, length <= 8B: swap value, length > 8B: data address + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_cas_data { u64 compare_data; u64 swap_data; @@ -94,6 +153,13 @@ struct dma_cas_data { u32 rsvd[4]; }; +/** + * struct dma_notify_data - DMA write witch notify data structure + * @notify_seg: notify segment pointer + * @notify_data: notify data value + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; @@ -101,6 +167,16 @@ struct dma_notify_data { u32 rsvd[4]; }; +/** + * struct dma_client - DMA register client structure + * @list_node: client list + * @client_name: client name pointer + * @add: add DMA resource function pointer + * @remove: remove DMA resource function pointer + * @stop: stop DMA operation function pointer + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_client { struct list_head list_node; char *client_name; -- Gitee From 1d9cff43b106a229f7e2d0546bd709c2a1581e17 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 24 Nov 2025 14:11:48 +0800 Subject: [PATCH 111/243] ub: cdma: add CDMA kernel driver design document specification commit 7daf8d5283858f1a484e8bc7ef510f3e205001e2 openEuler This patch add CDMA kernel driver design document specification Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/cdma/cdma.rst | 312 ++++++++++++++++++++++++++++++++ Documentation/ub/cdma/index.rst | 14 ++ Documentation/ub/index.rst | 3 +- 3 files changed, 328 insertions(+), 1 deletion(-) create mode 100644 Documentation/ub/cdma/cdma.rst create mode 100644 Documentation/ub/cdma/index.rst diff --git a/Documentation/ub/cdma/cdma.rst b/Documentation/ub/cdma/cdma.rst new file mode 100644 index 000000000000..39be57652426 --- /dev/null +++ b/Documentation/ub/cdma/cdma.rst @@ -0,0 +1,312 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +====================================== +Crystal Direct Memory Access (CDMA) +====================================== + +Overview +========= +CDMA (Crystal Direct Memory Access) is used to provide asynchronous memory read +and write operations between hosts or between host and devices. + +The key features are described as follows: + ++ 1. Peer-to-peer communication between hosts, enabling bidirectional asynchronous memory read or write. ++ 2. Asynchronous memory read and write between host and devices via DMA. ++ 3. Asynchronous memory read and write between devices and host via DMA. + +Overall Structure +=================== + +Driver Modules +--------------- + +The CDMA driver is divided into three modules: UBASE, K-DMA, and U-DMA: + +.. code-block:: none + + +---------------------------+ + | APP | + +---------------------------+ + | + +---------------------------+ + | U-DMA | + +---------------------------+ + | | + | +-------------------+ + | | K-DMA | + | +-------------------+ + | | | + | +----------------+ | + | | Auxiliary Bus | | + | +----------------+ | + | | | + | +-------------------+ + | | UBASE | + | +-------------------+ + | | + +---------------------------+ + | CDMA Hardware | + +---------------------------+ + ++ Figure 1: CDMA Module Relationship Diagram + +UBASE provides management of hardware public resources, including CMD, mailbox +management, event management, and device reset. +It also provides a device and driver matching interface for the CDMA driver based +on the kernel auxiliary bus. + +Within the K-DMA module, functional blocks are divided according to different data +objects: Device Management is responsible for device attribute configuration +(such as EID, UPI, etc.) and device capability queries (such as Jetty specifications); +Event Management handles events reported by the controller, including completion +events and asynchronous events; +Queue Management is responsible for JFS(Jetty For Send)/JFC(Jetty For Completion) +resource management. + +Within the U-DMA module, functional blocks are divided according to data plane +functions: Memory verbs, which are unidirectional operations including read, +write, and atomic operations. +Event verbs register callback functions with K-DMA for post-processing of +asynchronous events. + +Interaction Timing +------------------- + +.. code-block:: none + + +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ + | APP | | U-DMA | | K-DMA | | UBASE | | MS | | HW | + +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ + | CDMA API | | | | | + |---------->| ioctl | | | | + | |---------->| UBASE Func| | | + | | |----------->| | | + | | |<-----------| | | + | | | HW Interface | | + | | |----------------------------------->| + | | |<-----------------------------------| + | | | UBASE Func | | | + | | |----------->| MS MSG | | + | | | |---------->| | + | | | |<----------| | + | | |<-----------| | | + | |<----------| | | | + |<----------| | | | | + | | | | | | + | CDMA API | | | | | + |---------->| HW Interface | | | + | DMA OPT |----------------------------------------------->| + | |<-----------------------------------------------| + |<----------| | | | | + | | | | | | + ++ Figure 2: CDMA Interaction Timing + +The 'Figure 2' shows the runtime sequence of interactions between the CDMA driver, +the UBASE driver, the MS(Management Software), and the hardware. + +Functionality +=============== + +CDMA device creation and reset +--------------------------------- +The CDMA devices are dynamically created by the resource management on the +management software, and the reset operation is also performed by the management +software. +Files involved: cdma_main; + +CDMA device and context management +------------------------------------ +The CDMA driver supports lifecycle management of CDMA devices and enables +applications to create device contexts based on these devices. +Files involved: cdma_context, cdma_main; + +CDMA queue management +--------------------------- +The CDMA queue includes the CDMA JFS and JFC defined on the chip, and encompasses +the management of JFS, JFC, and CTP(Compact Transport) resources. +When a remote memory read/write request is initiated, the JFS is used to fill the +corresponding WQE(Work Queue Entry), and the request execution result is received +through the JFC. +Files involved: cdma_queue, cdma_jfs, cdma_jfc, cdma_tp, cdma_db; + +CDMA segment management +----------------------------- +The CDMA driver uses local and remote segment resources for read and write operations. +These operations primarily include the register and unregister functions for +local segment resources, as well as the import and export functions for remote +segment resources. +Files involved: cdma_segment; + +CDMA read/write semantics +--------------------------- +The CDMA communication capability is implemented on the chip side as CTP mode +communication, supporting transaction operations including: +write, write with notify, read, CAS(Compare And Swap), and FAA(Fetch And Add). +Files involved: cdma_handle; + +Processing and reporting of EQE events +--------------------------------------- +The CDMA communication device supports the reporting of transaction operation +results in interrupt mode. The reported events are classified into two types: +CE(Completion Event) and AE(Asynchronous Event). +The two types of events trigger the event callback processing function registered +by the CDMA driver in advance in the interrupt context. +Files involved: cdma_event, cdma_eq; + +Supported Hardware +==================== + +CDMA driver supported hardware: + +=========== ============= +Vendor ID Device ID +=========== ============= +0xCC08 0xA003 +0xCC08 0xA004 +0xCC08 0xD804 +0xCC08 0xD805 +=========== ============= + +You can use the ``lsub`` command on your host OS to query devices. +Below is an example output: + +.. code-block:: shell + + Class <000X>: Device : + <00004> Class <0002>: Device : + +Debugging +========== + +Device Info +----------- + +.. code-block:: none + + Query CDMA device information. + Example: + $ cat /sys/kernel/debug/ubase//cdma/resource_info/dev_info + The 'CDMA_ENO' value represents the ENO (Entity Number) information for + CDMA devices. You can use the 'lsub' command on your host OS to query devices. + +Capability Info +---------------- + +.. code-block:: none + + Query CDMA device capability information. + Example: + $ cat /sys/kernel/debug/ubase//cdma/resource_info/cap_info + +Queue Info +----------- + +.. code-block:: none + + Query current queue configuration information. + Example: + $ cat /sys/kernel/debug/ubase//cdma/resource_info/queue_info + Set the queue ID value for the current query using 'queue_id' command, like + $ echo 0 > /sys/kernel/debug/ubase//cdma/resource_info/queue_id. + +Reset Info +------------ + +.. code-block:: none + + Query CDMA device reset operation records. + Example: + $ cat /sys/kernel/debug/ubase//reset_info + +JFS Context +-------------- + +.. code-block:: none + + Query the current JFS channel context information on the software side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/jfs_context + The channel ID is configured by setting the queue ID command, like + $ echo 0 > /sys/kernel/debug/ubase//cdma/context/queue_id. + +JFS Context HW +--------------- + +.. code-block:: none + + Query the current JFS channel context information on the hardware side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/jfs_context_hw + +JFC Context +--------------- + +.. code-block:: none + + Query the current channel JFC context information on the software side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/sq_jfc_context + +JFC Context HW +------------------ + +.. code-block:: none + + Query the current JFC channel context information on the hardware side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/sq_jfc_context_hw + +JFS Entity PI +------------------ + +.. code-block:: none + + Set or query the PI value of the current JFS channel, used for querying + specific SQE information of the JFS. + Example: + $ echo 0 > /sys/kernel/debug/ubase//cdma/entry_info/entry_pi + $ cat /sys/kernel/debug/ubase//cdma/entry_info/entry_pi + +JFS Entity Info +---------------- + +.. code-block:: none + + Query the information of a specific SQE for the current channel JFS. + Example: + $ cat /sys/kernel/debug/ubase//cdma/entry_info/sqe + The channel ID is configured through the queue ID command. + The SQE ID is set by configuring the 'entry_pi' as described above. + Supports kernel-space resources only. + +JFC Entity CI +---------------- + +.. code-block:: none + + Set or query the CI value of the current JFC channel, used for querying + specific CQE information of the JFC. + Example: + $ echo 0 > /sys/kernel/debug/ubase//cdma/entry_info/entry_ci + $ cat /sys/kernel/debug/ubase//cdma/entry_info/entry_ci + +JFC Entity Info +---------------- + +.. code-block:: none + + Query the information of a specific CQE for the current channel JFC. + Example: + $ cat /sys/kernel/debug/ubase//cdma/entry_info/cqe + The channel ID is configured through the Queue ID command. + The CQE ID is set by configuring the 'entry_ci' as described above. + Supports kernel-space resources only. + +Support +======== +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. \ No newline at end of file diff --git a/Documentation/ub/cdma/index.rst b/Documentation/ub/cdma/index.rst new file mode 100644 index 000000000000..368403170e0d --- /dev/null +++ b/Documentation/ub/cdma/index.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============== +CDMA Driver +=============== + +.. toctree:: + :maxdepth: 2 + :numbered: + + cdma \ No newline at end of file diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 8e939a2ba8fe..c59089129f12 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -8,8 +8,9 @@ UnifiedBus Subsystem ===================== .. toctree:: - :maxdepth: 2 + :maxdepth: 4 ubase/index ubfi/index ubus/index + cdma/index -- Gitee From ab0e92e773acee67c8400cca1017b116f9b02530 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 24 Nov 2025 14:18:52 +0800 Subject: [PATCH 112/243] ub: cdma: add CDMA userspace-api documentation description commit d328ba25db0ec022a3e4885e770a3dcc3a101f0a openEuler This patch add CDMA userspace-api documentation description Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/userspace-api/index.rst | 1 + Documentation/userspace-api/ub/cdma.rst | 51 ++++++++++++++++++++++++ Documentation/userspace-api/ub/index.rst | 13 ++++++ 3 files changed, 65 insertions(+) create mode 100644 Documentation/userspace-api/ub/cdma.rst create mode 100644 Documentation/userspace-api/ub/index.rst diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 2125bb520e52..c02b2bc235db 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -33,6 +33,7 @@ place where this information is gathered. sysfs-platform_profile vduse futex2 + ub/index .. only:: subproject and html diff --git a/Documentation/userspace-api/ub/cdma.rst b/Documentation/userspace-api/ub/cdma.rst new file mode 100644 index 000000000000..e5b9a1e9de76 --- /dev/null +++ b/Documentation/userspace-api/ub/cdma.rst @@ -0,0 +1,51 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============================== +CDMA Userspace Support Library +=============================== + +Overview +========= +CDMA (Crystal Direct Memory Access) is used to provide asynchronous memory read +and write operations between hosts or between host and devices. + +The key features are described as follows: + ++ 1. Peer-to-peer communication between hosts, enabling bidirectional asynchronous memory read or write. ++ 2. Asynchronous memory read and write between host and devices via DMA. ++ 3. Asynchronous memory read and write between devices and host via DMA. + +Char Device +============= +The driver creates one char device per CDMA found on the physical device. +Char devices can be found in /dev/cdma/ and are named as: +/dev/cdma/dev. + +User API +========= + +ioctl +------ +========================= ==================================================== +CDMA_CMD_QUERY_DEV_INFO Query CDMA device information. +CDMA_CMD_CREATE_CTX Create user context resource. +CDMA_CMD_DELETE_CTX Delete user context resource. +CDMA_CMD_CREATE_CTP Create CTP(Compact Transport) channel resource. +CDMA_CMD_DELETE_CTP Delete CTP channel resource. +CDMA_CMD_CREATE_JFS Create JFS(Jetty For Send) resource. +CDMA_CMD_DELETE_JFS Delete JFS resource. +CDMA_CMD_REGISTER_SEG Register local segment resource. +CDMA_CMD_UNREGISTER_SEG Unregister local segment resource. +CDMA_CMD_CREATE_QUEUE Create queue resource. +CDMA_CMD_DELETE_QUEUE Delete queue resource. +CDMA_CMD_CREATE_JFC Create JFC(Jetty For Completion) resource. +CDMA_CMD_DELETE_JFC Delete JFC resource. +CDMA_CMD_CREATE_JFCE Create JFCE(Jetty For Completion Event) resource. +========================= ==================================================== + +Support +======== +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. \ No newline at end of file diff --git a/Documentation/userspace-api/ub/index.rst b/Documentation/userspace-api/ub/index.rst new file mode 100644 index 000000000000..3206a2cf64c7 --- /dev/null +++ b/Documentation/userspace-api/ub/index.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +===================== +UnifiedBus Subsystem +===================== + +.. toctree:: + :maxdepth: 1 + + cdma \ No newline at end of file -- Gitee From 30d90f41aa1b92f7a046ad17462c62ca0bf0dda0 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Wed, 26 Nov 2025 19:44:05 +0800 Subject: [PATCH 113/243] ub: ub_fwctl: add ub_fwctl driver-api documentation description commit f632e7e84ac26e324f4c4a43bc72947d44590d1d openEuler This patch add ub_fwctl driver-api documentation description Signed-off-by: Jiaqi Cheng Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/index.rst | 1 + Documentation/ub/ub_fwctl/index.rst | 11 ++ Documentation/ub/ub_fwctl/ub_fwctl.rst | 112 +++++++++++ Documentation/userspace-api/fwctl/fwctl.rst | 1 + Documentation/userspace-api/fwctl/index.rst | 1 + .../userspace-api/fwctl/ub_fwctl.rst | 51 +++++ drivers/fwctl/ub/ub_common.h | 54 +++++ include/uapi/fwctl/ub_fwctl.h | 184 ++++++++++++++++++ 8 files changed, 415 insertions(+) create mode 100644 Documentation/ub/ub_fwctl/index.rst create mode 100644 Documentation/ub/ub_fwctl/ub_fwctl.rst create mode 100644 Documentation/userspace-api/fwctl/ub_fwctl.rst diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index c59089129f12..0a3973b98512 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -10,6 +10,7 @@ UnifiedBus Subsystem .. toctree:: :maxdepth: 4 + ub_fwctl/index ubase/index ubfi/index ubus/index diff --git a/Documentation/ub/ub_fwctl/index.rst b/Documentation/ub/ub_fwctl/index.rst new file mode 100644 index 000000000000..4274b33be65a --- /dev/null +++ b/Documentation/ub/ub_fwctl/index.rst @@ -0,0 +1,11 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============== +UB FWCTL Driver +=============== + +.. toctree:: + :maxdepth: 2 + :numbered: + + ub_fwctl \ No newline at end of file diff --git a/Documentation/ub/ub_fwctl/ub_fwctl.rst b/Documentation/ub/ub_fwctl/ub_fwctl.rst new file mode 100644 index 000000000000..5256ff8d122f --- /dev/null +++ b/Documentation/ub/ub_fwctl/ub_fwctl.rst @@ -0,0 +1,112 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================== +UB FWCTL Kernel Design +====================== + +Overview +======== + +UB_FWCTL: Auxiliary bus device driver based on PMU IDEV. +It isolates user-mode debug (operation and maintenance information) functions from chip implementation details, +converts user debug commands into CMDQ commands, and sends them to the +software through the CMDQ channel of the PMU IDEV device to implement debug functions. + +Description of the Design +========================= + +The public debug tool, namely the newly added ub_fwctl tool in this document, +is primarily designed to provide functions such as querying UB public function configurations, +querying the status and statistics of many modules, and querying die-level information. + +The debug functions provided by this module are shared among multiple subsystems of UB and are not suitable +for being included in any single feature. Ub_fwctl interfaces with the open-source fwctl framework and +provides a user-defined command format for UB, supporting the public DFX functions of the UB system. + +Currently, ub_fwctl only provides querying functions and does not support configuration functions. +The DFX tools for each feature are described in detail in the corresponding feature design documents. +This design document focuses on the design of the ub_fwctl tool:: + + Purpose: As Auxiliary device driver, it provides the specific implementation of debug functions + OPS as provided by the fwctl module, and calls the CMDQ interface to pass debug messages to the software. + + Function List: + 1) Serve as an Auxiliary device driver to match Auxiliary devices. + 2) Register the fwctl device and the specific function implementation of ub_fwctl. + 3) Provide CMD queue management interfaces. + +Data structure design of UB FWCTL +================================= + +.. kernel-doc:: drivers/fwctl/ub/ub_common.h + + +System Function Design Description +================================== + +Loading and unloading the ub_fwctl driver +----------------------------------------- + +Feature Introduction:: + + FWCTL is a debug framework scheduled for integration into the mainline Linux kernel. + It provides a command pathway from userspace to kernelspace, + requiring device manufacturers to implement their + own driver plugins registered with the FWCTL kernel framework. + UB has implemented a driver called ub_fwctl, which consists of both a userspace + command-line tool (ubctl) and a kernel-space driver (ub_fwctl). After loading the ub_fwctl driver, + the sysfs system exposes a device file (such as /dev/ubcl) in the OS's /dev directory. + The userspace program ubctl can then open this device file via open(/dev/ubcl) + to obtain a file descriptor, and subsequently communicate with the driver through ioctl calls. + +Implementation Method of Function:: + + 1. Ub_fwctl registers itself with the fwctl framework. + 2. As a secondary device, ub_fwctl connects to ubase through the secondary + bus and uses the CMDQ (command queue) of The PMU IDEV to call the software + programming interface for reading and writing registers. + 3. ubctl provides command-line commands for users to invoke. + During operation, ubctl first opens the /dev/fwctl/fwctlNN device file. + It then assembles a corresponding data structure based on user input. + Next, it invokes the ioctl() system call to enter kernel mode. + Upon receiving a command from ubctl, the ub_fwctl driver first validates the command. + It then communicates with the ubase software module by calling its interface to access the CMDQ. + The software returns the register access result to ub_fwctl via the CMDQ. + ub_fwctl subsequently returns this data to user space. + Finally, after completing its operation, ubctl closes the opened /dev/ubcl file descriptor. + +.. code-block:: none + + +-------+ +----------+ +-------+ +-----+ + | ubctl | --ioctl--> | ub_fwctl | --ubase_send_cmd--> | ubase | --cmdq--> | imp | + +-------+ +----------+ +-------+ +-----+ + +Querying UB link and chip info by ub_fwctl +----------------------------------------- + +Feature Introduction:: + + After a failure occurs in the production environment, + further troubleshooting is required to identify the root cause, + including information checks such as abnormal interrupts, statistical counters, key FIFO status, + and key state machine status. The ubctl needs to support users to query the chip's + debug information through the command-line tool and output the chip's debug information + in a form that is understandable to users. + +Implementation Method of Function:: + + ubctl receives input from the command line, assembles it into corresponding commands, + and invokes ioctl to enter kernel space. The fwctl driver copies the data into the kernel space, + assembles it into the corresponding opcode, and sends the command to the software for processing via + the CMDQ of the PMU IDEV. After reading the corresponding registers according to the chip's rules, + the software returns the data to ub_fwctl, which then returns the data to user space. + Finally, ubctl displays the data. + + The following types of registers are supported for query: + 1. Querying information about the UB link. + 2. Querying QoS memory access information. + 3. Querying port link status. + 4. Querying DL layer service packet statistics. + 5. Querying NL layer service packet statistics. + 6. Querying SSU packet statistics. + 7. Querying BA layer packet statistics. diff --git a/Documentation/userspace-api/fwctl/fwctl.rst b/Documentation/userspace-api/fwctl/fwctl.rst index 8c586a8f677d..8c4472f98065 100644 --- a/Documentation/userspace-api/fwctl/fwctl.rst +++ b/Documentation/userspace-api/fwctl/fwctl.rst @@ -149,6 +149,7 @@ fwctl User API ============== .. kernel-doc:: include/uapi/fwctl/fwctl.h +.. kernel-doc:: include/uapi/fwctl/ub_fwctl.h sysfs Class ----------- diff --git a/Documentation/userspace-api/fwctl/index.rst b/Documentation/userspace-api/fwctl/index.rst index 06959fbf1547..be74da876cae 100644 --- a/Documentation/userspace-api/fwctl/index.rst +++ b/Documentation/userspace-api/fwctl/index.rst @@ -10,3 +10,4 @@ to securely construct and execute RPCs inside device firmware. :maxdepth: 1 fwctl + ub_fwctl diff --git a/Documentation/userspace-api/fwctl/ub_fwctl.rst b/Documentation/userspace-api/fwctl/ub_fwctl.rst new file mode 100644 index 000000000000..bdc0d3a5a7b6 --- /dev/null +++ b/Documentation/userspace-api/fwctl/ub_fwctl.rst @@ -0,0 +1,51 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================ +fwctl ub driver +================ + +Overview +======== + +The ub_fwctl tool is primarily designed to provide functions including querying +the configuration of UB common functions, the status and statistics of common modules, +and information at the Die level. Ub_fwctl is integrated with the open-source fwtl framework, +providing a custom user-mode command format for UB and supporting the common functionality of UB systems. + +The implemented driver is ub_fwctl, which includes the user-mode command line +tool ubctl and kernel-mode driver ub_fwctl. After the ub_fwctl driver is loaded, +a file such as ub_ctl is displayed in the /dev directory of the OS through the +sysfs system. The user-mode program ubtl obtains file descriptors by calling +open (/dev/fwctl/fwctlNN), and then communicates with the driver by calling ioctl. + +Function implementation scheme:: + + 1. Ub_fwctl registers itself with the fwctl framework. + + 2. As an auxiliary device, ub_fwctl is connected to ubase through an + auxiliary bus and uses pmu idev's CMDQ to call the software programming + interface to read and write registers. + + 3. Ubctl provides command-line commands for users to call. At startup, + ubctl opens the ubctl device file, assembles the corresponding data + structure based on input, and calls ioctl to enter kernel state. After + receiving the ubctl command, ub_fwctl first checks the legality of the + command, and then communicates with the software by calling the interface + provided by ubase to access CMDQ. The software returns the result of accessing + the register to ub_fwctl through CMDQ, and ub_fwctl then returns the + data to user state. Finally, close the opened ubctl file. + +ub_fwctl User API +================== + +First step for the app is to issue the ioctl(UBCTL_IOCTL_CMDRPC). Each RPC +request includes the operation id, and in and out buffer lengths and pointers. +The driver verifies the operations, then checks the request scope against the +required scope of the operation. The request is then put together with the +request data and sent through the software's message queue to the firmware, and the +results are returned to the caller. + +The RPC endpoints, operations, and buffer contents are defined by the +particular firmware package in the device, which varies across the +available product configurations. The details are available in the +specific product SDK documentation. diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index ab6761ffaad8..ab10576e3914 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -30,12 +30,27 @@ #define UBCTL_EXTRACT_BITS(value, start, end) \ (((value) >> (start)) & ((1UL << ((end) - (start) + 1)) - 1)) +/** + * struct ubctl_dev - Device struct of framework + * @fwctl: The device of fwctl + * @data_size: Length of @data + * @adev: data transmitted to users + */ struct ubctl_dev { struct fwctl_device fwctl; DECLARE_KFIFO_PTR(ioctl_fifo, unsigned long); struct auxiliary_device *adev; }; +/** + * struct ubctl_query_cmd_param - Parameters of userspace RPC + * @in_len: Length of @in + * @in: Data of input + * @out_len: Length of @out + * @out: Data of output + * + * Used to receive parameters passed from userspace RPC + */ struct ubctl_query_cmd_param { size_t in_len; struct fwctl_rpc_ub_in *in; @@ -43,6 +58,17 @@ struct ubctl_query_cmd_param { struct fwctl_rpc_ub_out *out; }; +/** + * struct ubctl_cmd - Parameters of query command + * @op_code: The operation code + * @is_read: Read-only or read-write + * @in_len: Length of @in_data + * @out_len: Length of @out_data + * @in: Data of input + * @out: Data of output + * + * Used for sending and receiving software communication + */ struct ubctl_cmd { u32 op_code; u32 is_read; @@ -76,16 +102,44 @@ struct ubctl_query_cmd_dp { void *cmd_out; }; +/** + * ubctl_ubase_cmd_send - The ubase interface for issuing cmdq + * @adev: The auxiliary framework device + * @cmd: Command information of ubctl + */ int ubctl_ubase_cmd_send(struct auxiliary_device *adev, struct ubctl_cmd *cmd); int ubctl_fill_cmd(struct ubctl_cmd *cmd, void *cmd_in, void *cmd_out, u32 out_len, u32 is_read); + +/** + * ubctl_query_data - Packaging and delivering parameters of cmdq + * @ucdev: Ubctl device + * @query_cmd_param: Parameters passed from userspace RPC + * @query_func: Callback functions for issuing and processing data + * @query_dp: Parameters related to cmdq + * @query_dp_num: Number of elements in @query_dp + * + */ int ubctl_query_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func, struct ubctl_query_dp *query_dp, u32 query_dp_num); + +/** + * ubctl_query_data_deal - Default callback function for processing returned data + * @ucdev: Ubctl device + * @query_cmd_param: Parameters passed from userspace RPC and IMP + * @cmd: Command information of ubctl + * @out_len: Data length of the 'out' in @query_cmd_param + * @offset: Data offset of the 'out' in @query_cmd_param + * + * On return the device is visible through sysfs and /dev, driver ops may be + * called. + */ int ubctl_query_data_deal(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_cmd *cmd, u32 out_len, u32 offset); #endif + diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 05d7be4d7f8f..38787e5cc8ca 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -36,95 +36,279 @@ struct fwctl_rpc_ub_out { __u32 data[]; }; +/** + * enum ub_fwctl_cmdrpc_type - Type of access for the RPC + * + * Refer to fwctl.rst for a more detailed discussion of these scopes. + */ enum ub_fwctl_cmdrpc_type { + /** + * @UTOOL_CMD_QUERY_NL: Query all registers at the NL layer + */ UTOOL_CMD_QUERY_NL = 0x0001, + /** + * @UTOOL_CMD_QUERY_NL_PKT_STATS: Query NL layer PKT_STATE related registers + */ UTOOL_CMD_QUERY_NL_PKT_STATS = 0x0002, + /** + * @UTOOL_CMD_QUERY_NL_SSU_STATS: Query NL layer SSU_STATS related registers + */ UTOOL_CMD_QUERY_NL_SSU_STATS = 0x0003, + /** + * @UTOOL_CMD_QUERY_NL_ABN: Query NL layer NL_ABN related registers + */ UTOOL_CMD_QUERY_NL_ABN = 0x0004, + /** + * @UTOOL_CMD_QUERY_TP: Query all registers at the TP layer + */ UTOOL_CMD_QUERY_TP = 0x0021, + /** + * @UTOOL_CMD_QUERY_TP_PKT_STATS: Query TP layer PKT_STATE related registers + */ UTOOL_CMD_QUERY_TP_PKT_STATS = 0x0022, + /** + * @UTOOL_CMD_QUERY_TP_TX_ROUTE: Query TP layer TX_ROUTE related registers + */ UTOOL_CMD_QUERY_TP_TX_ROUTE = 0x0023, + /** + * @UTOOL_CMD_QUERY_TP_ABN_STATS: Query TP layer ABN_STATS related registers + */ UTOOL_CMD_QUERY_TP_ABN_STATS = 0x0024, + /** + * @UTOOL_CMD_QUERY_TP_RX_BANK: Query TP layer RX_BANK related registers + */ UTOOL_CMD_QUERY_TP_RX_BANK = 0x0025, + /** + * @UTOOL_CMD_QUERY_DL: Query all registers at the DL layer + */ UTOOL_CMD_QUERY_DL = 0x0011, + /** + * @UTOOL_CMD_QUERY_DL_PKT_STATS: Query DL layer PKT_STATS related registers + */ UTOOL_CMD_QUERY_DL_PKT_STATS = 0x0012, + /** + * @UTOOL_CMD_QUERY_DL_LINK_STATUS: Query DL layer LINK_STATUS related registers + */ UTOOL_CMD_QUERY_DL_LINK_STATUS = 0x0013, + /** + * @UTOOL_CMD_QUERY_DL_LANE: Query DL layer LANE related registers + */ UTOOL_CMD_QUERY_DL_LANE = 0x0014, + /** + * @UTOOL_CMD_QUERY_DL_BIT_ERR: Query DL layer BIT_ERR related registers + */ UTOOL_CMD_QUERY_DL_BIT_ERR = 0x0015, + /** + * @UTOOL_CMD_QUERY_DL_LINK_TRACE: Query DL layer LINK_TRACE related registers + */ UTOOL_CMD_QUERY_DL_LINK_TRACE = 0x0016, + /** + * @UTOOL_CMD_QUERY_DL_BIST: Query DL layer BIST related registers + */ UTOOL_CMD_QUERY_DL_BIST = 0x0017, + /** + * @UTOOL_CMD_CONF_DL_BIST: Config DL layer BIST related registers + */ UTOOL_CMD_CONF_DL_BIST = 0x0018, + /** + * @UTOOL_CMD_QUERY_DL_BIST_ERR: Query DL layer BIST_ERR related registers + */ UTOOL_CMD_QUERY_DL_BIST_ERR = 0x0019, + /** + * @UTOOL_CMD_QUERY_TA: Query all registers at the TA layer + */ UTOOL_CMD_QUERY_TA = 0x0031, + /** + * @UTOOL_CMD_QUERY_TA_PKT_STATS: Query TA layer PKT_STATS related registers + */ UTOOL_CMD_QUERY_TA_PKT_STATS = 0x0032, + /** + * @UTOOL_CMD_QUERY_TA_ABN_STATS: Query TA layer ABN_STATS related registers + */ UTOOL_CMD_QUERY_TA_ABN_STATS = 0x0033, + /** + * @UTOOL_CMD_QUERY_BA: Query all registers at the BA layer + */ UTOOL_CMD_QUERY_BA = 0x0041, + /** + * @UTOOL_CMD_QUERY_BA_PKT_STATS: Query BA layer PKT_STATS related registers + */ UTOOL_CMD_QUERY_BA_PKT_STATS = 0x0042, + /** + * @UTOOL_CMD_QUERY_BA_MAR: Query BA layer MAR related registers + */ UTOOL_CMD_QUERY_BA_MAR = 0x0043, + /** + * @UTOOL_CMD_QUERY_BA_MAR_TABLE: Query BA layer MAR_TABLE related registers + */ UTOOL_CMD_QUERY_BA_MAR_TABLE = 0x0044, + /** + * @UTOOL_CMD_QUERY_BA_MAR_CYC_EN: Query BA layer MAR_CYC_EN related registers + */ UTOOL_CMD_QUERY_BA_MAR_CYC_EN = 0x0045, + /** + * @UTOOL_CMD_CONF_BA_MAR_CYC_EN: Config BA layer MAR_CYC_EN related registers + */ UTOOL_CMD_CONF_BA_MAR_CYC_EN = 0x0046, + /** + * @UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS: Config BA layer MAR_PEFR_STATS related registers + */ UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS = 0x0047, + /** + * @UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS: Query BA layer MAR_PEFR_STATS related registers + */ UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS = 0x0048, + /** + * @UTOOL_CMD_QUERY_QOS: Query QOS related registers + */ UTOOL_CMD_QUERY_QOS = 0x0051, + /** + * @UTOOL_CMD_QUERY_SCC_VERSION: Query the scc version + */ UTOOL_CMD_QUERY_SCC_VERSION = 0x0061, + /** + * @UTOOL_CMD_QUERY_SCC_LOG: Query the scc log + */ UTOOL_CMD_QUERY_SCC_LOG = 0x0062, + /** + * @UTOOL_CMD_QUERY_SCC_DEBUG_EN: Query the scc debug switch + */ UTOOL_CMD_QUERY_SCC_DEBUG_EN = 0x0063, + /** + * @UTOOL_CMD_CONF_SCC_DEBUG_EN: Config the scc debug switch + */ UTOOL_CMD_CONF_SCC_DEBUG_EN = 0x0064, + /** + * @UTOOL_CMD_QUERY_MSGQ_QUE_STATS: Query MSGQ layer QUE_STATS related registers + */ UTOOL_CMD_QUERY_MSGQ_QUE_STATS = 0x0071, + /** + * @UTOOL_CMD_QUERY_MSGQ_ENTRY: Query MSGQ layer ENTRY related registers + */ UTOOL_CMD_QUERY_MSGQ_ENTRY = 0x0072, + + /** + * @UTOOL_CMD_QUERY_QUEUE: Query QUEUE information + */ UTOOL_CMD_QUERY_QUEUE = 0x0073, + /** + * @UTOOL_CMD_QUERY_PORT_INFO: Query information about the specified port + */ UTOOL_CMD_QUERY_PORT_INFO = 0x0081, + /** + * @UTOOL_CMD_QUERY_IO_DIE_PORT_INFO: Query port-related information about the specified + * io die + */ UTOOL_CMD_QUERY_IO_DIE_PORT_INFO = 0x0082, + /** + * @UTOOL_CMD_QUERY_UBOMMU: Query UBOMMU related information + */ UTOOL_CMD_QUERY_UBOMMU = 0x0091, + /** + * @UTOOL_CMD_QUERY_UMMU_ALL: Query all information of UMMU + */ UTOOL_CMD_QUERY_UMMU_ALL = 0x00A1, + /** + * @UTOOL_CMD_QUERY_UMMU_SYNC: Query information of UMMU SYNC + */ UTOOL_CMD_QUERY_UMMU_SYNC = 0x00A2, + /** + * @UTOOL_CMD_CONFIG_UMMU_SYNC: Config information of UMMU SYNC + */ UTOOL_CMD_CONFIG_UMMU_SYNC = 0x00A3, + /** + * @UTOOL_CMD_QUERY_ECC_2B: Query information of ECC 2B + */ UTOOL_CMD_QUERY_ECC_2B = 0x00B1, + /** + * @UTOOL_CMD_QUERY_LOOPBACK: Query information of loopback + */ UTOOL_CMD_QUERY_LOOPBACK = 0x00D1, + /** + * @UTOOL_CMD_CONF_LOOPBACK: Configure specified loopback mode + */ UTOOL_CMD_CONF_LOOPBACK = 0x00D2, + /** + * @UTOOL_CMD_QUERY_PRBS_EN: Query PRBS switch status + */ UTOOL_CMD_QUERY_PRBS_EN = 0x00D3, + /** + * @UTOOL_CMD_CONF_PRBS_EN: Config PRBS switch + */ UTOOL_CMD_CONF_PRBS_EN = 0x00D4, + /** + * @UTOOL_CMD_QUERY_PRBS_RESULT: Query PRBS error count result + */ UTOOL_CMD_QUERY_PRBS_RESULT = 0x00D5, + /** + * @UTOOL_CMD_QUERY_DUMP: Dump all register data + */ UTOOL_CMD_QUERY_DUMP = 0xFFFE, + /** + * @UTOOL_CMD_QUERY_MAX: Maximum Command Code + */ UTOOL_CMD_QUERY_MAX, }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @enable: The value of param '-e' + */ struct fwctl_pkt_in_enable { __u8 enable; }; +/** + * struct fwctl_pkt_in_table - ioctl(FWCTL_RPC) input + * @port_id: The value of param '-p' + * @table_num: Length of the table + * @index: The value of param '-i' + */ struct fwctl_pkt_in_table { __u32 port_id; __u32 table_num; __u32 index; }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @port_id: The value of param '-p' + */ struct fwctl_pkt_in_port { __u32 port_id; }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @index: The value of param '-i' + */ struct fwctl_pkt_in_index { __u32 index; }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @ummu_id: The value of param '-u' + * @value: The value of param '-e' + */ struct fwctl_pkt_in_ummuid_value { __u32 ummu_id; __u32 value; }; #endif + -- Gitee From 4bfe42d8336e6a3d9575283b603faec7687532d6 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 10:48:36 +0800 Subject: [PATCH 114/243] ub: udma: Support import and unimport jfr and jetty. commit 5ebd5f51fdc2e797a8fc1cb185bbce1aa062a8bf openEuler This patch adds the ability to import and unimport jfr and jetty. During the chain establishment process, urma software stack will invoke the import interface to establish the chain. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 72 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 17 +++++++ drivers/ub/urma/hw/udma/udma_jfr.c | 35 ++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 5 ++ drivers/ub/urma/hw/udma/udma_main.c | 4 ++ 5 files changed, 133 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 914ef33b81d9..385bc9b5605b 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -832,6 +832,44 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } +static int udma_check_jetty_grp_info(struct ubcore_tjetty_cfg *cfg, struct udma_dev *dev) +{ + if (cfg->type == UBCORE_JETTY_GROUP) { + if (cfg->trans_mode != UBCORE_TP_RM) { + dev_err(dev->dev, "import jg only support RM, transmode is %u.\n", + cfg->trans_mode); + return -EINVAL; + } + + if (cfg->policy != UBCORE_JETTY_GRP_POLICY_HASH_HINT) { + dev_err(dev->dev, "import jg only support hint, policy is %u.\n", + cfg->policy); + return -EINVAL; + } + } + + return 0; +} + +int udma_unimport_jetty(struct ubcore_tjetty *tjetty) +{ + struct udma_target_jetty *udma_tjetty = to_udma_tjetty(tjetty); + struct udma_dev *udma_dev = to_udma_dev(tjetty->ub_dev); + + if (!IS_ERR_OR_NULL(tjetty->vtpn)) { + dev_err(udma_dev->dev, + "the target jetty is still being used, id = %u.\n", + tjetty->cfg.id.id); + return -EINVAL; + } + + udma_tjetty->token_value = 0; + tjetty->cfg.token_value.token = 0; + kfree(udma_tjetty); + + return 0; +} + bool verify_modify_jetty(enum ubcore_jetty_state jetty_state, enum ubcore_jetty_state attr_state) { @@ -1095,3 +1133,37 @@ int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) return ret; } + +struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_target_jetty *tjetty; + int ret = 0; + + if (cfg->type != UBCORE_JETTY_GROUP && cfg->type != UBCORE_JETTY) { + dev_err(udma_dev->dev, + "the jetty of the type %u cannot be imported in exp.\n", + cfg->type); + return NULL; + } + + ret = udma_check_jetty_grp_info(cfg, udma_dev); + if (ret) + return NULL; + + tjetty = kzalloc(sizeof(*tjetty), GFP_KERNEL); + if (!tjetty) + return NULL; + + if (cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE) { + tjetty->token_value = cfg->token_value.token; + tjetty->token_value_valid = true; + } + + udma_swap_endian(cfg->id.eid.raw, tjetty->le_eid.raw, UBCORE_EID_SIZE); + + return &tjetty->ubcore_tjetty; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 5b428e999ff1..dba8fa2a05a5 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -45,6 +45,13 @@ struct udma_jetty { bool ue_rx_closed; }; +struct udma_target_jetty { + struct ubcore_tjetty ubcore_tjetty; + union ubcore_eid le_eid; + uint32_t token_value; + bool token_value_valid; +}; + enum jfsc_mode { JFS, JETTY, @@ -214,6 +221,11 @@ static inline struct udma_jetty_grp *to_udma_jetty_grp(struct ubcore_jetty_group return container_of(jetty_grp, struct udma_jetty_grp, ubcore_jetty_grp); } +static inline struct udma_target_jetty *to_udma_tjetty(struct ubcore_tjetty *tjetty) +{ + return container_of(tjetty, struct udma_target_jetty, ubcore_tjetty); +} + static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queue *queue) { return container_of(queue, struct udma_jetty, sq); @@ -229,6 +241,7 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +int udma_unimport_jetty(struct ubcore_tjetty *tjetty); int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata); struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, @@ -244,5 +257,9 @@ void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq); +struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 953fcffc5001..7462d75f1fba 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -790,3 +790,38 @@ int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, return 0; } + +int udma_unimport_jfr(struct ubcore_tjetty *tjfr) +{ + struct udma_target_jetty *udma_tjfr = to_udma_tjetty(tjfr); + + udma_tjfr->token_value = 0; + tjfr->cfg.token_value.token = 0; + + kfree(udma_tjfr); + + return 0; +} + +struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct udma_target_jetty *udma_tjfr; + + udma_tjfr = kzalloc(sizeof(*udma_tjfr), GFP_KERNEL); + if (!udma_tjfr) + return NULL; + + if (!udata) { + if (cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE) { + udma_tjfr->token_value = cfg->token_value.token; + udma_tjfr->token_value_valid = true; + } + } + + udma_swap_endian(cfg->id.eid.raw, udma_tjfr->le_eid.raw, UBCORE_EID_SIZE); + + return &udma_tjfr->ubcore_tjetty; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index ae6d0d97f460..9a90e60bd391 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -161,5 +161,10 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_ struct ubcore_udata *udata); int udma_destroy_jfr(struct ubcore_jfr *jfr); int udma_destroy_jfr_batch(struct ubcore_jfr **jfr_arr, int jfr_num, int *bad_jfr_index); +int udma_unimport_jfr(struct ubcore_tjetty *tjfr); +struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b1fad9e31f38..b116b514ee3b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -190,10 +190,14 @@ static struct ubcore_ops g_dev_ops = { .query_jfr = udma_query_jfr, .destroy_jfr = udma_destroy_jfr, .destroy_jfr_batch = udma_destroy_jfr_batch, + .import_jfr_ex = udma_import_jfr_ex, + .unimport_jfr = udma_unimport_jfr, .create_jetty = udma_create_jetty, .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, + .import_jetty_ex = udma_import_jetty_ex, + .unimport_jetty = udma_unimport_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, }; -- Gitee From 1ed8de670cf0008e39af7ba895e08c796ca7b74a Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 11:17:15 +0800 Subject: [PATCH 115/243] ub: udma: Add and remove jetty to jetty group. commit e5a126ae4f2494f8f60f14db00500714490f715b openEuler This patch adds the ability to Add and remove jetty to jetty group. During the process of creating/destroying Jetty, Jetty can be added or removed from the Jetty group. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_jetty.c | 115 +++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index d9b10ab28028..58c5da4a2234 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -23,6 +23,8 @@ extern bool dump_aux_info; #define UDMA_CTX_NUM 2 +#define UDMA_BITS_PER_INT 32 + #define MAX_JETTY_IN_JETTY_GRP 32 #define UDMA_USER_DATA_H_OFFSET 32U diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 385bc9b5605b..4b4e924f9111 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -172,6 +172,112 @@ static void udma_init_jettyc(struct udma_dev *dev, struct ubcore_jetty_cfg *cfg, ctx->next_rcv_ssn = ctx->next_send_ssn; } +static int update_jetty_grp_ctx_valid(struct udma_dev *udma_dev, + struct udma_jetty_grp *jetty_grp) +{ + struct udma_jetty_grp_ctx ctx[UDMA_CTX_NUM]; + struct ubase_mbx_attr mbox_attr = {}; + int ret; + + ctx[0].valid = jetty_grp->valid; + /* jetty number indicates the location of the jetty with the largest ID. */ + ctx[0].jetty_number = fls(jetty_grp->valid) - 1; + memset(ctx + 1, 0xff, sizeof(ctx[1])); + ctx[1].valid = 0; + ctx[1].jetty_number = 0; + + mbox_attr.tag = jetty_grp->jetty_grp_id; + mbox_attr.op = UDMA_CMD_MODIFY_JETTY_GROUP_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, ctx, sizeof(ctx), &mbox_attr); + if (ret) + dev_err(udma_dev->dev, + "post mailbox update jetty grp ctx failed, ret = %d.\n", + ret); + + return ret; +} + +static uint32_t udma_get_jetty_grp_jetty_id(uint32_t *valid, uint32_t *next) +{ + uint32_t bit_idx; + + bit_idx = find_next_zero_bit((unsigned long *)valid, UDMA_BITS_PER_INT, *next); + if (bit_idx >= UDMA_BITS_PER_INT) + bit_idx = find_next_zero_bit((unsigned long *)valid, UDMA_BITS_PER_INT, 0); + + *next = (*next + 1) >= UDMA_BITS_PER_INT ? 0 : *next + 1; + + return bit_idx; +} + +static int add_jetty_to_grp(struct udma_dev *udma_dev, struct ubcore_jetty_group *jetty_grp, + struct udma_jetty_queue *sq, uint32_t cfg_id) +{ + struct udma_jetty_grp *udma_jetty_grp = to_udma_jetty_grp(jetty_grp); + uint32_t bit_idx = cfg_id - udma_jetty_grp->start_jetty_id; + int ret = 0; + + mutex_lock(&udma_jetty_grp->valid_lock); + + if (cfg_id == 0) + bit_idx = udma_get_jetty_grp_jetty_id(&udma_jetty_grp->valid, + &udma_jetty_grp->next_jetty_id); + + if (bit_idx >= UDMA_BITS_PER_INT || (udma_jetty_grp->valid & BIT(bit_idx))) { + dev_err(udma_dev->dev, + "jg(%u.%u) vallid %u is full or user id(%u) error", + udma_jetty_grp->jetty_grp_id, udma_jetty_grp->start_jetty_id, + udma_jetty_grp->valid, cfg_id); + ret = -ENOMEM; + goto out; + } + + udma_jetty_grp->valid |= BIT(bit_idx); + sq->id = udma_jetty_grp->start_jetty_id + bit_idx; + sq->jetty_grp = udma_jetty_grp; + + ret = update_jetty_grp_ctx_valid(udma_dev, udma_jetty_grp); + if (ret) { + dev_err(udma_dev->dev, + "update jetty grp ctx valid failed, jetty_grp id is %u.\n", + udma_jetty_grp->jetty_grp_id); + + udma_jetty_grp->valid &= ~BIT(bit_idx); + } +out: + mutex_unlock(&udma_jetty_grp->valid_lock); + + return ret; +} + +static void remove_jetty_from_grp(struct udma_dev *udma_dev, + struct udma_jetty *jetty) +{ + struct udma_jetty_grp *jetty_grp = jetty->sq.jetty_grp; + uint32_t bit_idx; + int ret; + + bit_idx = jetty->sq.id - jetty_grp->start_jetty_id; + if (bit_idx >= UDMA_BITS_PER_INT) { + dev_err(udma_dev->dev, + "jetty_id(%u) is not in jetty grp, start_jetty_id(%u).\n", + jetty->sq.id, jetty_grp->start_jetty_id); + return; + } + + mutex_lock(&jetty_grp->valid_lock); + jetty_grp->valid &= ~BIT(bit_idx); + jetty->sq.jetty_grp = NULL; + + ret = update_jetty_grp_ctx_valid(udma_dev, jetty_grp); + if (ret) + dev_err(udma_dev->dev, + "update jetty grp ctx valid failed, jetty_grp id is %u.\n", + jetty_grp->jetty_grp_id); + + mutex_unlock(&jetty_grp->valid_lock); +} + static int udma_specify_rsvd_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) { struct udma_ida *ida_table = &udma_dev->rsvd_jetty_ida_table; @@ -391,6 +497,13 @@ int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, return ret; sq->id = cfg_id; + } else if (jetty_grp) { + ret = add_jetty_to_grp(udma_dev, jetty_grp, sq, cfg_id); + if (ret) { + dev_err(udma_dev->dev, + "add jetty to grp failed, ret = %d.\n", ret); + return ret; + } } else { ret = udma_alloc_jetty_id_own(udma_dev, &sq->id, sq->jetty_type); } @@ -403,6 +516,8 @@ static void free_jetty_id(struct udma_dev *udma_dev, { if (udma_jetty->sq.id < udma_dev->caps.jetty.start_idx) udma_id_free(&udma_dev->rsvd_jetty_ida_table, udma_jetty->sq.id); + else if (is_grp) + remove_jetty_from_grp(udma_dev, udma_jetty); else udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, udma_jetty->sq.id, false); -- Gitee From 6b42f813ba1fa9ccb02fbc7dbaf0d736effb899d Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 12:00:24 +0800 Subject: [PATCH 116/243] ub: udma: Support post jfs work request. commit a13bf671c3242a5a4d215e12b9a01525d3ef238a openEuler This patch adds the ability to post jfs work request. After user post a request, driver will assemble the wqe and update doorbell. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfs.c | 584 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 90 +++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 675 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index e770bc5f6a2f..978465d77672 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -486,3 +486,587 @@ int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, return 0; } + +static uint32_t get_wqebb_num(struct udma_sqe_ctl *sqe_ctl) +{ + uint32_t opcode = sqe_ctl->opcode; + uint32_t sqe_ctl_len = get_ctl_len(opcode); + + switch (opcode) { + case UDMA_OPC_SEND: + case UDMA_OPC_SEND_WITH_IMM: + case UDMA_OPC_SEND_WITH_INVALID: + case UDMA_OPC_WRITE: + case UDMA_OPC_WRITE_WITH_IMM: + if (sqe_ctl->inline_en) + return (sqe_ctl_len + sqe_ctl->inline_msg_len - 1) / + UDMA_JFS_WQEBB_SIZE + 1; + break; + case UDMA_OPC_CAS: + case UDMA_OPC_FAA: + return ATOMIC_WQEBB_CNT; + case UDMA_OPC_NOP: + return NOP_WQEBB_CNT; + default: + break; + } + + return sq_cal_wqebb_num(sqe_ctl_len, sqe_ctl->sge_num); +} + +static uint8_t udma_get_jfs_opcode(enum ubcore_opcode opcode) +{ + switch (opcode) { + case UBCORE_OPC_SEND: + return UDMA_OPC_SEND; + case UBCORE_OPC_SEND_IMM: + return UDMA_OPC_SEND_WITH_IMM; + case UBCORE_OPC_SEND_INVALIDATE: + return UDMA_OPC_SEND_WITH_INVALID; + case UBCORE_OPC_WRITE: + return UDMA_OPC_WRITE; + case UBCORE_OPC_WRITE_IMM: + return UDMA_OPC_WRITE_WITH_IMM; + case UBCORE_OPC_READ: + return UDMA_OPC_READ; + case UBCORE_OPC_CAS: + return UDMA_OPC_CAS; + case UBCORE_OPC_FADD: + return UDMA_OPC_FAA; + case UBCORE_OPC_NOP: + return UDMA_OPC_NOP; + default: + return UDMA_OPC_INVALID; + } +} + +static int +udma_fill_sw_sge(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr, uint32_t max_inline_size, + struct udma_normal_sge *sge) +{ + struct ubcore_sge *sge_info; + uint32_t total_len = 0; + uint32_t sge_num = 0; + uint32_t num_sge; + uint32_t i; + + switch (wr->opcode) { + case UBCORE_OPC_SEND: + case UBCORE_OPC_SEND_IMM: + case UBCORE_OPC_SEND_INVALIDATE: + sge_info = wr->send.src.sge; + num_sge = wr->send.src.num_sge; + break; + case UBCORE_OPC_WRITE: + case UBCORE_OPC_WRITE_IMM: + sge_info = wr->rw.src.sge; + num_sge = wr->rw.src.num_sge; + break; + default: + return -EINVAL; + } + + if (wr->flag.bs.inline_flag) { + for (i = 0; i < num_sge; i++) { + if (total_len + sge_info[i].len > max_inline_size) { + dev_info(dev->dev, "inline_size %u is over max_size %u.\n", + total_len + sge_info[i].len, max_inline_size); + return -EINVAL; + } + + memcpy((void *)(uintptr_t)sge + total_len, + (void *)(uintptr_t)sge_info[i].addr, + sge_info[i].len); + total_len += sge_info[i].len; + } + sqe_ctl->inline_msg_len = total_len; + } else { + for (i = 0; i < num_sge; i++) { + if (sge_info[i].len == 0) + continue; + sge->va = sge_info[i].addr; + sge->length = sge_info[i].len; + sge++; + sge_num++; + } + sqe_ctl->sge_num = sge_num; + } + + return 0; +} + +static int +udma_k_fill_send_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr, struct ubcore_tjetty *tjetty, + uint32_t max_inline_size) +{ + struct udma_target_jetty *udma_tjetty; + struct udma_token_info *token_info; + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + + if (udma_fill_sw_sge(dev, sqe_ctl, wr, max_inline_size, sge)) + return -EINVAL; + + udma_tjetty = to_udma_tjetty(tjetty); + sqe_ctl->target_hint = wr->send.target_hint; + sqe_ctl->rmt_obj_id = tjetty->cfg.id.id; + sqe_ctl->token_en = udma_tjetty->token_value_valid; + sqe_ctl->rmt_token_value = udma_tjetty->token_value; + + if (wr->opcode == UBCORE_OPC_SEND_IMM) { + memcpy((void *)sqe_ctl + SQE_SEND_IMM_FIELD, &wr->send.imm_data, + sizeof(uint64_t)); + } else if (wr->opcode == UBCORE_OPC_SEND_INVALIDATE) { + udma_seg = to_udma_seg(wr->send.tseg); + token_info = (struct udma_token_info *)&sqe_ctl->rmt_addr_l_or_token_id; + token_info->token_id = udma_seg->tid; + token_info->token_value = udma_seg->token_value; + } + + return 0; +} + +static int +udma_k_fill_write_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr, struct ubcore_tjetty *tjetty, + uint32_t max_inline_size) +{ + struct udma_token_info *token_info; + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + uint32_t ctrl_len; + + ctrl_len = get_ctl_len(sqe_ctl->opcode); + sge = (struct udma_normal_sge *)((void *)sqe_ctl + ctrl_len); + + if (udma_fill_sw_sge(dev, sqe_ctl, wr, max_inline_size, sge)) + return -EINVAL; + + sge_info = wr->rw.dst.sge; + udma_seg = to_udma_seg(sge_info[0].tseg); + + sqe_ctl->target_hint = wr->rw.target_hint; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info[0].addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + if (sqe_ctl->opcode == UDMA_OPC_WRITE_WITH_IMM) { + memcpy((void *)sqe_ctl + SQE_WRITE_IMM_FIELD, &wr->rw.notify_data, + sizeof(uint64_t)); + token_info = (struct udma_token_info *) + ((void *)sqe_ctl + WRITE_IMM_TOKEN_FIELD); + token_info->token_id = tjetty->cfg.id.id; + token_info->token_value = tjetty->cfg.token_value.token; + } + + return 0; +} + +static int udma_k_fill_read_sqe(struct udma_sqe_ctl *sqe_ctl, struct ubcore_jfs_wr *wr) +{ + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + uint32_t sge_num = 0; + uint32_t num; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + sge_info = wr->rw.dst.sge; + + for (num = 0; num < wr->rw.dst.num_sge; num++) { + if (sge_info[num].len == 0) + continue; + sge->va = sge_info[num].addr; + sge->length = sge_info[num].len; + sge++; + sge_num++; + } + + sge_info = wr->rw.src.sge; + udma_seg = to_udma_seg(sge_info[0].tseg); + + sqe_ctl->sge_num = sge_num; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info[0].addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + return 0; +} + +static bool +udma_k_check_atomic_len(struct udma_dev *dev, uint32_t len, uint8_t opcode) +{ + switch (len) { + case UDMA_ATOMIC_LEN_4: + case UDMA_ATOMIC_LEN_8: + return true; + case UDMA_ATOMIC_LEN_16: + if (opcode == UBCORE_OPC_CAS) + return true; + dev_err(dev->dev, "the atomic opcode must be CAS when len is 16.\n"); + return false; + default: + dev_err(dev->dev, "invalid atomic len %u.\n", len); + return false; + } +} + +static int +udma_k_fill_cas_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr) +{ + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + + sge_info = wr->cas.src; + if (!udma_k_check_atomic_len(dev, sge_info->len, wr->opcode)) + return -EINVAL; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + + sge->va = sge_info->addr; + sge->length = sge_info->len; + + sge_info = wr->cas.dst; + udma_seg = to_udma_seg(sge_info->tseg); + + sqe_ctl->sge_num = UDMA_ATOMIC_SGE_NUM; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = (sge_info->addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= UDMA_ATOMIC_LEN_8) { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->cas.swap_data, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + &wr->cas.cmp_data, sge->length); + } else { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (void *)(uintptr_t)wr->cas.swap_addr, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + (void *)(uintptr_t)wr->cas.cmp_addr, sge->length); + } + + return 0; +} + +static int +udma_k_fill_faa_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr) +{ + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + + sge_info = wr->faa.src; + if (!udma_k_check_atomic_len(dev, sge_info->len, wr->opcode)) + return -EINVAL; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + + sge->va = sge_info->addr; + sge->length = sge_info->len; + + sge_info = wr->faa.dst; + udma_seg = to_udma_seg(sge_info->tseg); + + sqe_ctl->sge_num = UDMA_ATOMIC_SGE_NUM; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = (sge_info->addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= UDMA_ATOMIC_LEN_8) + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, &wr->faa.operand, + sge->length); + else + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (void *)(uintptr_t)wr->faa.operand_addr, sge->length); + + return 0; +} + +static int udma_fill_normal_sge(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + uint32_t max_inline_size, struct ubcore_jfs_wr *wr, + struct ubcore_tjetty *tjetty) +{ + switch (sqe_ctl->opcode) { + case UDMA_OPC_SEND: + case UDMA_OPC_SEND_WITH_IMM: + case UDMA_OPC_SEND_WITH_INVALID: + return udma_k_fill_send_sqe(dev, sqe_ctl, wr, tjetty, + max_inline_size); + case UDMA_OPC_WRITE: + return udma_k_fill_write_sqe(dev, sqe_ctl, wr, tjetty, max_inline_size); + case UDMA_OPC_WRITE_WITH_IMM: + return udma_k_fill_write_sqe(dev, sqe_ctl, wr, tjetty, + max_inline_size > SQE_WRITE_IMM_INLINE_SIZE ? + SQE_WRITE_IMM_INLINE_SIZE : max_inline_size); + case UDMA_OPC_READ: + return udma_k_fill_read_sqe(sqe_ctl, wr); + case UDMA_OPC_CAS: + return udma_k_fill_cas_sqe(dev, sqe_ctl, wr); + case UDMA_OPC_FAA: + return udma_k_fill_faa_sqe(dev, sqe_ctl, wr); + default: + return -EINVAL; + } +} + +static int udma_k_set_sqe(struct udma_sqe_ctl *sqe_ctl, struct ubcore_jfs_wr *wr, + struct udma_jetty_queue *sq, uint8_t opcode, + struct udma_dev *dev) +{ + struct udma_target_jetty *udma_tjetty; + struct ubcore_tjetty *tjetty; + int ret = 0; + + sqe_ctl->cqe = wr->flag.bs.complete_enable; + sqe_ctl->owner = (sq->pi & sq->buf.entry_cnt) == 0 ? 1 : 0; + sqe_ctl->opcode = opcode; + sqe_ctl->place_odr = wr->flag.bs.place_order; + + if (opcode == UDMA_OPC_NOP) + return 0; + + if (sq->trans_mode == UBCORE_TP_RC) + tjetty = sq->rc_tjetty; + else + tjetty = wr->tjetty; + + udma_tjetty = to_udma_tjetty(tjetty); + + sqe_ctl->tpn = tjetty->vtpn->vtpn; + sqe_ctl->fence = wr->flag.bs.fence; + sqe_ctl->comp_order = wr->flag.bs.comp_order; + sqe_ctl->se = wr->flag.bs.solicited_enable; + sqe_ctl->inline_en = wr->flag.bs.inline_flag; + sqe_ctl->rmt_jetty_type = tjetty->cfg.type; + memcpy(sqe_ctl->rmt_eid, &udma_tjetty->le_eid.raw, sizeof(uint8_t) * + UDMA_SQE_RMT_EID_SIZE); + + ret = udma_fill_normal_sge(dev, sqe_ctl, sq->max_inline_size, wr, tjetty); + if (ret) + dev_err(dev->dev, "Failed to fill normal sge, opcode :%u in wr.\n", + (uint8_t)wr->opcode); + + return ret; +} + +static bool udma_k_check_sge_num(uint8_t opcode, struct udma_jetty_queue *sq, + struct ubcore_jfs_wr *wr) +{ + switch (opcode) { + case UDMA_OPC_CAS: + case UDMA_OPC_FAA: + return sq->max_sge_num == 0; + case UDMA_OPC_READ: + return wr->rw.dst.num_sge > UDMA_JFS_MAX_SGE_READ || + wr->rw.dst.num_sge > sq->max_sge_num; + case UDMA_OPC_WRITE_WITH_IMM: + return wr->rw.src.num_sge > UDMA_JFS_MAX_SGE_WRITE_IMM || + wr->rw.src.num_sge > sq->max_sge_num; + case UDMA_OPC_SEND: + case UDMA_OPC_SEND_WITH_IMM: + case UDMA_OPC_SEND_WITH_INVALID: + return wr->send.src.num_sge > sq->max_sge_num; + default: + return wr->rw.src.num_sge > sq->max_sge_num; + } +} + +static void udma_copy_to_sq(struct udma_jetty_queue *sq, uint32_t wqebb_cnt, + struct udma_jfs_wqebb *tmp_sq) +{ + uint32_t remain = sq->buf.entry_cnt - (sq->pi & (sq->buf.entry_cnt - 1)); + uint32_t field_h; + uint32_t field_l; + + field_h = remain > wqebb_cnt ? wqebb_cnt : remain; + field_l = wqebb_cnt > field_h ? wqebb_cnt - field_h : 0; + + memcpy(sq->kva_curr, tmp_sq, field_h * sizeof(*tmp_sq)); + + if (field_l) + memcpy(sq->buf.kva, tmp_sq + field_h, field_l * sizeof(*tmp_sq)); +} + +static void *udma_k_inc_ptr_wrap(uint32_t sq_buf_size, uint32_t wqebb_size, + uint8_t *sq_base_addr, uint8_t *sq_buf_curr) +{ + uint8_t *sq_buf_end; + + sq_buf_end = (uint8_t *)(sq_buf_size + sq_base_addr); + + sq_buf_curr = ((sq_buf_curr + wqebb_size) < sq_buf_end) ? + (sq_buf_curr + wqebb_size) : sq_base_addr + (sq_buf_curr + + wqebb_size - sq_buf_end); + + return sq_buf_curr; +} + +static int udma_post_one_wr(struct udma_jetty_queue *sq, struct ubcore_jfs_wr *wr, + struct udma_dev *udma_dev, struct udma_sqe_ctl **wqe_addr, + bool *dwqe_enable) +{ + struct udma_jfs_wqebb tmp_sq[MAX_WQEBB_NUM] = {}; + uint32_t wqebb_cnt; + uint8_t opcode; + uint32_t i; + int ret; + + opcode = udma_get_jfs_opcode(wr->opcode); + if (unlikely(opcode == UDMA_OPC_INVALID)) { + dev_err(udma_dev->dev, "Invalid opcode :%u.\n", wr->opcode); + return -EINVAL; + } + + if (unlikely(udma_k_check_sge_num(opcode, sq, wr))) { + dev_err(udma_dev->dev, "WR sge num invalid.\n"); + return -EINVAL; + } + + ret = udma_k_set_sqe((struct udma_sqe_ctl *)(void *)tmp_sq, wr, sq, + opcode, udma_dev); + if (ret) + return ret; + + wqebb_cnt = get_wqebb_num((struct udma_sqe_ctl *)(void *)tmp_sq); + if (wqebb_cnt == 1 && !!(udma_dev->caps.feature & UDMA_CAP_FEATURE_DIRECT_WQE)) + *dwqe_enable = true; + + if (to_check_sq_overflow(sq, wqebb_cnt)) { + dev_err(udma_dev->dev, "JFS overflow, wqebb_cnt:%u.\n", wqebb_cnt); + return -ENOMEM; + } + + udma_copy_to_sq(sq, wqebb_cnt, tmp_sq); + + *wqe_addr = (struct udma_sqe_ctl *)sq->kva_curr; + + sq->kva_curr = udma_k_inc_ptr_wrap(sq->buf.entry_cnt * sq->buf.entry_size, + wqebb_cnt * sq->buf.entry_size, + (uint8_t *)sq->buf.kva, + (uint8_t *)sq->kva_curr); + + for (i = 0; i < wqebb_cnt; i++) + sq->wrid[(sq->pi + i) & (sq->buf.entry_cnt - 1)] = wr->user_ctx; + + sq->pi += wqebb_cnt; + + return 0; +} + +static inline void udma_k_update_sq_db(struct udma_jetty_queue *sq) +{ + uint32_t *db_addr = sq->db_addr; + *db_addr = sq->pi; +} + +#ifdef ST64B +static void st64b(uint64_t *src, uint64_t *dst) +{ + asm volatile ( + "mov x9, %0\n" + "mov x10, %1\n" + "ldr x0, [x9]\n" + "ldr x1, [x9, #8]\n" + "ldr x2, [x9, #16]\n" + "ldr x3, [x9, #24]\n" + "ldr x4, [x9, #32]\n" + "ldr x5, [x9, #40]\n" + "ldr x6, [x9, #48]\n" + "ldr x7, [x9, #56]\n" + ".inst 0xf83f9140\n" + ::"r" (src), "r"(dst):"cc", "memory" + ); +} +#endif + +static void udma_write_dsqe(struct udma_jetty_queue *sq, + struct udma_sqe_ctl *ctrl) +{ +#define DWQE_SIZE 8 + int i; + + ctrl->sqe_bb_idx = sq->pi; + +#ifdef ST64B + st64b(((uint64_t *)ctrl), (uint64_t *)sq->dwqe_addr); +#else + for (i = 0; i < DWQE_SIZE; i++) + writeq_relaxed(*((uint64_t *)ctrl + i), + (uint64_t *)sq->dwqe_addr + i); +#endif +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr) +{ + struct udma_sqe_ctl *wqe_addr; + bool dwqe_enable = false; + struct ubcore_jfs_wr *it; + int wr_cnt = 0; + int ret = 0; + + if (!sq->lock_free) + spin_lock(&sq->lock); + + for (it = wr; it != NULL; it = (struct ubcore_jfs_wr *)(void *)it->next) { + ret = udma_post_one_wr(sq, it, udma_dev, &wqe_addr, &dwqe_enable); + if (ret) { + *bad_wr = it; + goto err_post_wr; + } + wr_cnt++; + } + +err_post_wr: + if (likely(wr_cnt && udma_dev->status != UDMA_SUSPEND)) { + wmb(); /* set sqe before doorbell */ + if (wr_cnt == 1 && dwqe_enable && (sq->pi - sq->ci == 1)) + udma_write_dsqe(sq, wqe_addr); + else + udma_k_update_sq_db(sq); + } + + if (!sq->lock_free) + spin_unlock(&sq->lock); + + return ret; +} + +int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + int ret; + + ret = udma_post_sq_wr(udma_dev, &udma_jfs->sq, wr, bad_wr); + if (ret) + dev_err(udma_dev->dev, "Failed to post jfs wr, sq_id = %u.\n", + udma_jfs->sq.id); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 6cdc281e53c3..65d8e2ac52f2 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -7,10 +7,28 @@ #include "udma_common.h" #define MAX_WQEBB_NUM 4 +#define UDMA_SQE_RMT_EID_SIZE 16 +#define SQE_WRITE_IMM_CTL_LEN 64 +#define SQE_NORMAL_CTL_LEN 48 +#define ATOMIC_WQEBB_CNT 2 +#define NOP_WQEBB_CNT 1 #define UDMA_JFS_WQEBB_SIZE 64 #define UDMA_JFS_SGE_SIZE 16 +#define UDMA_JFS_MAX_SGE_READ 6 +#define UDMA_JFS_MAX_SGE_WRITE_IMM 12 +#define UDMA_ATOMIC_SGE_NUM 1 +#define UDMA_ATOMIC_LEN_4 4 +#define UDMA_ATOMIC_LEN_8 8 +#define UDMA_ATOMIC_LEN_16 16 +#define SQE_CTL_RMA_ADDR_OFFSET 32 +#define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) +#define SQE_ATOMIC_DATA_FIELD 64 +#define SQE_SEND_IMM_FIELD 40 +#define WRITE_IMM_TOKEN_FIELD 56 +#define SQE_WRITE_IMM_FIELD 48 #define SQE_WRITE_NOTIFY_CTL_LEN 80 +#define SQE_WRITE_IMM_INLINE_SIZE 192 enum udma_jfs_type { UDMA_NORMAL_JFS_TYPE, @@ -28,6 +46,63 @@ struct udma_jfs { bool ue_rx_closed; }; +/* thanks to include/rdma/ib_verbs.h */ +enum udma_sq_opcode { + UDMA_OPC_SEND, + UDMA_OPC_SEND_WITH_IMM, + UDMA_OPC_SEND_WITH_INVALID, + UDMA_OPC_WRITE, + UDMA_OPC_WRITE_WITH_IMM, + UDMA_OPC_READ = 0x6, + UDMA_OPC_CAS, + UDMA_OPC_FAA = 0xb, + UDMA_OPC_NOP = 0x11, + UDMA_OPC_INVALID = 0x12, +}; + +struct udma_jfs_wqebb { + uint32_t value[16]; +}; + +struct udma_sqe_ctl { + uint32_t sqe_bb_idx : 16; + uint32_t place_odr : 2; + uint32_t comp_order : 1; + uint32_t fence : 1; + uint32_t se : 1; + uint32_t cqe : 1; + uint32_t inline_en : 1; + uint32_t rsv : 5; + uint32_t token_en : 1; + uint32_t rmt_jetty_type : 2; + uint32_t owner : 1; + uint32_t target_hint : 8; + uint32_t opcode : 8; + uint32_t rsv1 : 6; + uint32_t inline_msg_len : 10; + uint32_t tpn : 24; + uint32_t sge_num : 8; + uint32_t rmt_obj_id : 20; + uint32_t rsv2 : 12; + uint8_t rmt_eid[UDMA_SQE_RMT_EID_SIZE]; + uint32_t rmt_token_value; + uint32_t rsv3; + uint32_t rmt_addr_l_or_token_id; + uint32_t rmt_addr_h_or_token_value; +}; + +struct udma_normal_sge { + uint32_t length; + uint32_t token_id; + uint64_t va; +}; + +struct udma_token_info { + uint32_t token_id : 20; + uint32_t rsv : 12; + uint32_t token_value; +}; + static inline struct udma_jfs *to_udma_jfs(struct ubcore_jfs *jfs) { return container_of(jfs, struct udma_jfs, ubcore_jfs); @@ -38,12 +113,23 @@ static inline struct udma_jfs *to_udma_jfs_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfs, sq); } +static inline bool to_check_sq_overflow(struct udma_jetty_queue *sq, + uint32_t wqebb_cnt) +{ + return sq->pi - sq->ci + wqebb_cnt > sq->buf.entry_cnt; +} + static inline uint32_t sq_cal_wqebb_num(uint32_t sqe_ctl_len, uint32_t sge_num) { return (sqe_ctl_len + (sge_num - 1) * UDMA_JFS_SGE_SIZE) / UDMA_JFS_WQEBB_SIZE + 1; } +static inline uint32_t get_ctl_len(uint8_t opcode) +{ + return opcode == UDMA_OPC_WRITE_WITH_IMM ? SQE_WRITE_IMM_CTL_LEN : SQE_NORMAL_CTL_LEN; +} + struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); @@ -55,5 +141,9 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata); +int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); +int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b116b514ee3b..bfbaf9d07908 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -200,6 +200,7 @@ static struct ubcore_ops g_dev_ops = { .unimport_jetty = udma_unimport_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, + .post_jfs_wr = udma_post_jfs_wr, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From d3be18954399b9bfb02ceeaf364c55d7f421e124 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 14:20:51 +0800 Subject: [PATCH 117/243] ub: udma: Support post jfr work request. commit 45af039f81ccb00d0189bf03f875c2870deccb55 openEuler This patch adds the ability to post jfr work request. After user post a request, driver will assemble the wqe and update doorbell. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 7 ++ drivers/ub/urma/hw/udma/udma_jfr.c | 100 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 19 +++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 127 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index b1b129ee4449..f8bab657aa6a 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -91,6 +91,13 @@ static inline void udma_alloc_kernel_db(struct udma_dev *dev, queue->db_addr = queue->dwqe_addr + UDMA_DOORBELL_OFFSET; } +static inline void *get_buf_entry(struct udma_buf *buf, uint32_t n) +{ + uint32_t entry_index = n & (buf->entry_cnt - 1); + + return (char *)buf->kva + (entry_index * buf->entry_size); +} + static inline uint8_t to_ta_timeout(uint32_t err_timeout) { #define TA_TIMEOUT_DIVISOR 8 diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 7462d75f1fba..5e01e6a8f141 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -803,6 +803,106 @@ int udma_unimport_jfr(struct ubcore_tjetty *tjfr) return 0; } +static void fill_wqe_idx(struct udma_jfr *jfr, uint32_t wqe_idx) +{ + uint32_t *idx_buf; + + idx_buf = (uint32_t *)get_buf_entry(&jfr->idx_que.buf, jfr->rq.pi); + *idx_buf = cpu_to_le32(wqe_idx); + + jfr->rq.pi++; +} + +static void fill_recv_sge_to_wqe(struct ubcore_jfr_wr *wr, void *wqe, + uint32_t max_sge) +{ + struct udma_wqe_sge *sge = (struct udma_wqe_sge *)wqe; + uint32_t i, cnt; + + for (i = 0, cnt = 0; i < wr->src.num_sge; i++) { + if (!wr->src.sge[i].len) + continue; + set_data_of_sge(sge + cnt, wr->src.sge + i); + ++cnt; + } + + if (cnt < max_sge) + memset(sge + cnt, 0, (max_sge - cnt) * UDMA_SGE_SIZE); +} + +static int post_recv_one(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_jfr_wr *wr) +{ + uint32_t wqe_idx; + int ret = 0; + void *wqe; + + if (unlikely(wr->src.num_sge > jfr->max_sge)) { + dev_err(dev->dev, + "failed to check sge, wr_num_sge = %u, max_sge = %u, jfrn = %u.\n", + wr->src.num_sge, jfr->max_sge, jfr->rq.id); + return -EINVAL; + } + + if (udma_jfrwq_overflow(jfr)) { + dev_err(dev->dev, "failed to check jfrwq, jfrwq is full, jfrn = %u.\n", + jfr->rq.id); + return -ENOMEM; + } + + ret = udma_id_alloc(dev, &jfr->idx_que.jfr_idx_table.ida_table, + &wqe_idx); + if (ret) { + dev_err(dev->dev, "failed to get jfr wqe idx.\n"); + return ret; + } + wqe = get_buf_entry(&jfr->rq.buf, wqe_idx); + + fill_recv_sge_to_wqe(wr, wqe, jfr->max_sge); + + fill_wqe_idx(jfr, wqe_idx); + + jfr->rq.wrid[wqe_idx] = wr->user_ctx; + + return ret; +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct udma_dev *dev = to_udma_dev(ubcore_jfr->ub_dev); + struct udma_jfr *jfr = to_udma_jfr(ubcore_jfr); + uint32_t nreq; + int ret = 0; + + if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) + spin_lock(&jfr->lock); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ret = post_recv_one(dev, jfr, wr); + if (ret) { + *bad_wr = wr; + break; + } + } + + if (likely(nreq)) { + /* + * Ensure that the pipeline fills all RQEs into the RQ queue, + * then updating the PI pointer. + */ + wmb(); + *jfr->sw_db.db_record = jfr->rq.pi & + (uint32_t)UDMA_JFR_DB_PI_M; + } + + if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) + spin_unlock(&jfr->lock); + + return ret; +} + struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index 9a90e60bd391..c446eaedee1d 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -95,6 +95,12 @@ struct udma_jfr { struct completion ae_comp; }; +struct udma_wqe_sge { + uint32_t length; + uint32_t token_id; + uint64_t va; +}; + struct udma_jfr_ctx { /* DW0 */ uint32_t state : 2; @@ -150,6 +156,17 @@ static inline struct udma_jfr *to_udma_jfr(struct ubcore_jfr *jfr) return container_of(jfr, struct udma_jfr, ubcore_jfr); } +static inline bool udma_jfrwq_overflow(struct udma_jfr *jfr) +{ + return (jfr->rq.pi - jfr->rq.ci) >= jfr->wqe_cnt; +} + +static inline void set_data_of_sge(struct udma_wqe_sge *sge, struct ubcore_sge *sg) +{ + sge->va = cpu_to_le64(sg->addr); + sge->length = cpu_to_le32(sg->len); +} + static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *queue) { return container_of(queue, struct udma_jfr, rq); @@ -166,5 +183,7 @@ struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata); +int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index bfbaf9d07908..5f8de12c21ea 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -201,6 +201,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .post_jfs_wr = udma_post_jfs_wr, + .post_jfr_wr = udma_post_jfr_wr, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 422ae9674a7d56967b57c95ad687f64e1cd2058f Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 15:15:38 +0800 Subject: [PATCH 118/243] ub: udma: Support post jetty work request. commit 6465886ae8e08630d4f8fd5b440347f8c3bdafc0 openEuler This patch adds the ability to post jetty work request. After user post a request, driver will assemble the wqe and update doorbell. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 34 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 5 +++- drivers/ub/urma/hw/udma/udma_main.c | 2 ++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 4b4e924f9111..61fc94c0898c 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -1249,6 +1249,40 @@ int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) return ret; } +int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + int ret; + + ret = udma_post_sq_wr(udma_dev, &udma_jetty->sq, wr, bad_wr); + if (ret) + dev_err(udma_dev->dev, + "jetty post sq wr failed, ret = %d, jetty id = %u.\n", + ret, udma_jetty->sq.id); + + return ret; +} + +int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + struct ubcore_jfr *jfr; + int ret; + + jfr = &udma_jetty->jfr->ubcore_jfr; + ret = udma_post_jfr_wr(jfr, wr, bad_wr); + if (ret) + dev_err(udma_dev->dev, + "jetty post jfr wr failed, ret = %d, jetty id = %u.\n", + ret, udma_jetty->sq.id); + + return ret; +} + struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index dba8fa2a05a5..4b9749afb64e 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -250,7 +250,10 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, enum jetty_state state); - +int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); +int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5f8de12c21ea..ba5d2b7996f8 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -202,6 +202,8 @@ static struct ubcore_ops g_dev_ops = { .delete_jetty_grp = udma_delete_jetty_grp, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, + .post_jetty_send_wr = udma_post_jetty_send_wr, + .post_jetty_recv_wr = udma_post_jetty_recv_wr, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From cd109d5584993241781adb26c550fe281bc79820 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 15:55:24 +0800 Subject: [PATCH 119/243] ub: udma: Support poll jfc. commit d72435589dce4f196c7993c40a20b5ff7bf5cb45 openEuler This patch adds the ability to poll jfc. When the hardware completes the sending task, it generates a completion event (CQE), which needs to be polled by the driver. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 163 ++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.c | 385 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 44 ++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 593 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 8b709dc10a20..b06ff3ea61cf 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -18,6 +18,169 @@ #include #include "udma_def.h" +static int send_cmd_query_cqe_aux_info(struct udma_dev *udma_dev, + struct udma_cmd_query_cqe_aux_info *info) +{ + struct ubase_cmd_buf cmd_in, cmd_out; + int ret; + + udma_fill_buf(&cmd_in, UDMA_CMD_GET_CQE_AUX_INFO, true, + sizeof(struct udma_cmd_query_cqe_aux_info), info); + udma_fill_buf(&cmd_out, UDMA_CMD_GET_CQE_AUX_INFO, true, + sizeof(struct udma_cmd_query_cqe_aux_info), info); + + ret = ubase_cmd_send_inout(udma_dev->comdev.adev, &cmd_in, &cmd_out); + if (ret) + dev_err(udma_dev->dev, + "failed to query cqe aux info, ret = %d.\n", ret); + + return ret; +} + +static void free_kernel_cqe_aux_info(struct udma_cqe_aux_info_out *user_aux_info_out, + struct udma_cqe_aux_info_out *aux_info_out) +{ + if (!user_aux_info_out->aux_info_type) + return; + + kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; + + kfree(aux_info_out->aux_info_value); + aux_info_out->aux_info_value = NULL; +} + +static int copy_out_cqe_data_from_user(struct udma_dev *udma_dev, + struct ubcore_user_ctl_out *out, + struct udma_cqe_aux_info_out *aux_info_out, + struct ubcore_ucontext *uctx, + struct udma_cqe_aux_info_out *user_aux_info_out) +{ + if (out->addr != 0 && out->len == sizeof(struct udma_cqe_aux_info_out)) { + memcpy(aux_info_out, (void *)(uintptr_t)out->addr, + sizeof(struct udma_cqe_aux_info_out)); + if (uctx && aux_info_out->aux_info_num > 0 && + aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL) { + if (aux_info_out->aux_info_num > MAX_CQE_AUX_INFO_TYPE_NUM) { + dev_err(udma_dev->dev, + "invalid cqe aux info num %u.\n", + aux_info_out->aux_info_num); + return -EINVAL; + } + + user_aux_info_out->aux_info_type = aux_info_out->aux_info_type; + user_aux_info_out->aux_info_value = aux_info_out->aux_info_value; + aux_info_out->aux_info_type = + kcalloc(aux_info_out->aux_info_num, + sizeof(enum udma_cqe_aux_info_type), GFP_KERNEL); + if (!aux_info_out->aux_info_type) + return -ENOMEM; + + aux_info_out->aux_info_value = + kcalloc(aux_info_out->aux_info_num, + sizeof(uint32_t), GFP_KERNEL); + if (!aux_info_out->aux_info_value) { + kfree(aux_info_out->aux_info_type); + return -ENOMEM; + } + } + } + + return 0; +} + +static int copy_out_cqe_data_to_user(struct udma_dev *udma_dev, + struct ubcore_user_ctl_out *out, + struct udma_cqe_aux_info_out *aux_info_out, + struct ubcore_ucontext *uctx, + struct udma_cqe_aux_info_out *user_aux_info_out) +{ + unsigned long byte; + + if (out->addr != 0 && out->len == sizeof(struct udma_cqe_aux_info_out)) { + if (uctx && aux_info_out->aux_info_num > 0 && + aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL) { + byte = copy_to_user((void __user *)user_aux_info_out->aux_info_type, + (void *)aux_info_out->aux_info_type, + aux_info_out->aux_info_num * + sizeof(enum udma_cqe_aux_info_type)); + if (byte) { + dev_err(udma_dev->dev, + "copy resp to aux info type failed, byte = %lu.\n", byte); + return -EFAULT; + } + + byte = copy_to_user((void __user *)user_aux_info_out->aux_info_value, + (void *)aux_info_out->aux_info_value, + aux_info_out->aux_info_num * + sizeof(uint32_t)); + if (byte) { + dev_err(udma_dev->dev, + "copy resp to aux info value failed, byte = %lu.\n", byte); + return -EFAULT; + } + + kfree(aux_info_out->aux_info_type); + kfree(aux_info_out->aux_info_value); + aux_info_out->aux_info_type = user_aux_info_out->aux_info_type; + aux_info_out->aux_info_value = user_aux_info_out->aux_info_value; + } + memcpy((void *)(uintptr_t)out->addr, aux_info_out, + sizeof(struct udma_cqe_aux_info_out)); + } + + return 0; +} + +int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_cqe_aux_info_out user_aux_info_out = {}; + struct udma_cqe_aux_info_out aux_info_out = {}; + struct udma_cmd_query_cqe_aux_info info = {}; + struct udma_cqe_info_in cqe_info_in = {}; + struct udma_dev *udev = to_udma_dev(dev); + int ret; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_cqe_info_in))) { + dev_err(udev->dev, "parameter invalid in query cqe aux info, in_len = %u.\n", + in->len); + return -EINVAL; + } + memcpy(&cqe_info_in, (void *)(uintptr_t)in->addr, + sizeof(struct udma_cqe_info_in)); + + ret = copy_out_cqe_data_from_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); + if (ret) { + dev_err(udev->dev, + "copy out data from user failed, ret = %d.\n", ret); + return ret; + } + + info.status = cqe_info_in.status; + info.is_client = !(cqe_info_in.s_r & 1); + + ret = send_cmd_query_cqe_aux_info(udev, &info); + if (ret) { + dev_err(udev->dev, + "send cmd query aux info failed, ret = %d.\n", + ret); + free_kernel_cqe_aux_info(&user_aux_info_out, &aux_info_out); + return ret; + } + + ret = copy_out_cqe_data_to_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); + if (ret) { + dev_err(udev->dev, + "copy out data to user failed, ret = %d.\n", ret); + free_kernel_cqe_aux_info(&user_aux_info_out, &aux_info_out); + } + + return ret; +} + static int to_hw_ae_event_type(struct udma_dev *udma_dev, uint32_t event_type, struct udma_cmd_query_ae_aux_info *info) { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 5067b3c52104..d6a3b53cfe79 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -645,3 +645,388 @@ int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, return ret; } + +static enum jfc_poll_state udma_get_cr_status(struct udma_dev *dev, + uint8_t src_status, + uint8_t substatus, + enum ubcore_cr_status *dst_status) +{ +#define UDMA_SRC_STATUS_NUM 7 +#define UDMA_SUB_STATUS_NUM 5 + +struct udma_cr_status { + bool is_valid; + enum ubcore_cr_status cr_status; +}; + + static struct udma_cr_status map[UDMA_SRC_STATUS_NUM][UDMA_SUB_STATUS_NUM] = { + {{true, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{true, UBCORE_CR_UNSUPPORTED_OPCODE_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{false, UBCORE_CR_SUCCESS}, {true, UBCORE_CR_LOC_LEN_ERR}, + {true, UBCORE_CR_LOC_ACCESS_ERR}, {true, UBCORE_CR_REM_RESP_LEN_ERR}, + {true, UBCORE_CR_LOC_DATA_POISON}}, + {{false, UBCORE_CR_SUCCESS}, {true, UBCORE_CR_REM_UNSUPPORTED_REQ_ERR}, + {true, UBCORE_CR_REM_ACCESS_ABORT_ERR}, {false, UBCORE_CR_SUCCESS}, + {true, UBCORE_CR_REM_DATA_POISON}}, + {{true, UBCORE_CR_RNR_RETRY_CNT_EXC_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{true, UBCORE_CR_ACK_TIMEOUT_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{true, UBCORE_CR_FLUSH_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}} + }; + + if ((src_status < UDMA_SRC_STATUS_NUM) && (substatus < UDMA_SUB_STATUS_NUM) && + map[src_status][substatus].is_valid) { + *dst_status = map[src_status][substatus].cr_status; + return JFC_OK; + } + + dev_err(dev->dev, "cqe status is error, status = %u, substatus = %u.\n", + src_status, substatus); + + return JFC_POLL_ERR; +} + +static void udma_handle_inline_cqe(struct udma_jfc_cqe *cqe, uint8_t opcode, + struct udma_jetty_queue *queue, + struct ubcore_cr *cr) +{ + struct udma_jfr *jfr = to_udma_jfr_from_queue(queue); + uint32_t rqe_idx, data_len, sge_idx, size; + struct udma_wqe_sge *sge_list; + void *cqe_inline_buf; + + rqe_idx = cqe->entry_idx; + sge_list = (struct udma_wqe_sge *)(jfr->rq.buf.kva + + rqe_idx * jfr->rq.buf.entry_size); + data_len = cqe->byte_cnt; + cqe_inline_buf = opcode == HW_CQE_OPC_SEND ? + (void *)&cqe->data_l : (void *)&cqe->inline_data; + + for (sge_idx = 0; (sge_idx < jfr->max_sge) && data_len; sge_idx++) { + size = sge_list[sge_idx].length < data_len ? + sge_list[sge_idx].length : data_len; + memcpy((void *)(uintptr_t)sge_list[sge_idx].va, + cqe_inline_buf, size); + data_len -= size; + cqe_inline_buf += size; + } + cr->completion_len = cqe->byte_cnt - data_len; + + if (data_len) { + cqe->status = UDMA_CQE_LOCAL_OP_ERR; + cqe->substatus = UDMA_CQE_LOCAL_LENGTH_ERR; + } +} + +static void udma_parse_opcode_for_res(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + uint8_t opcode = cqe->opcode; + struct udma_inv_tid *inv_tid; + + switch (opcode) { + case HW_CQE_OPC_SEND: + cr->opcode = UBCORE_CR_OPC_SEND; + break; + case HW_CQE_OPC_SEND_WITH_IMM: + cr->imm_data = (uint64_t)cqe->data_h << UDMA_IMM_DATA_SHIFT | + cqe->data_l; + cr->opcode = UBCORE_CR_OPC_SEND_WITH_IMM; + break; + case HW_CQE_OPC_SEND_WITH_INV: + cr->invalid_token.token_id = cqe->data_l & (uint32_t)UDMA_CQE_INV_TOKEN_ID; + cr->invalid_token.token_id <<= UDMA_TID_SHIFT; + cr->invalid_token.token_value.token = cqe->data_h; + cr->opcode = UBCORE_CR_OPC_SEND_WITH_INV; + + inv_tid = kzalloc(sizeof(*inv_tid), GFP_ATOMIC); + if (!inv_tid) + return; + + inv_tid->tid = cr->invalid_token.token_id >> UDMA_TID_SHIFT; + list_add(&inv_tid->list, tid_list); + + break; + case HW_CQE_OPC_WRITE_WITH_IMM: + cr->imm_data = (uint64_t)cqe->data_h << UDMA_IMM_DATA_SHIFT | + cqe->data_l; + cr->opcode = UBCORE_CR_OPC_WRITE_WITH_IMM; + break; + default: + cr->opcode = (enum ubcore_cr_opcode)HW_CQE_OPC_ERR; + dev_err(dev->dev, "receive invalid opcode :%u.\n", opcode); + cr->status = UBCORE_CR_UNSUPPORTED_OPCODE_ERR; + break; + } +} + +static struct udma_jfr *udma_get_jfr(struct udma_dev *udma_dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr) +{ + struct udma_jetty_queue *udma_sq; + struct udma_jetty *jetty = NULL; + struct udma_jfr *jfr = NULL; + uint32_t local_id; + + local_id = cr->local_id; + if (cqe->is_jetty) { + udma_sq = (struct udma_jetty_queue *)xa_load(&udma_dev->jetty_table.xa, local_id); + if (!udma_sq) { + dev_warn(udma_dev->dev, + "get jetty failed, jetty_id = %u.\n", local_id); + return NULL; + } + jetty = to_udma_jetty_from_queue(udma_sq); + jfr = jetty->jfr; + cr->user_data = (uintptr_t)&jetty->ubcore_jetty; + } else { + jfr = (struct udma_jfr *)xa_load(&udma_dev->jfr_table.xa, local_id); + if (!jfr) { + dev_warn(udma_dev->dev, + "get jfr failed jfr id = %u.\n", local_id); + return NULL; + } + cr->user_data = (uintptr_t)&jfr->ubcore_jfr; + } + + return jfr; +} + +static bool udma_update_jfr_idx(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + bool is_clean) +{ + struct udma_jetty_queue *queue; + uint8_t opcode = cqe->opcode; + struct udma_jfr *jfr; + uint32_t entry_idx; + + jfr = udma_get_jfr(dev, cqe, cr); + if (!jfr) + return true; + + queue = &jfr->rq; + entry_idx = cqe->entry_idx; + cr->user_ctx = queue->wrid[entry_idx & (queue->buf.entry_cnt - (uint32_t)1)]; + + if (!is_clean && cqe->inline_en) + udma_handle_inline_cqe(cqe, opcode, queue, cr); + + if (!jfr->ubcore_jfr.jfr_cfg.flag.bs.lock_free) + spin_lock(&jfr->lock); + + udma_id_free(&jfr->idx_que.jfr_idx_table.ida_table, entry_idx); + queue->ci++; + + if (!jfr->ubcore_jfr.jfr_cfg.flag.bs.lock_free) + spin_unlock(&jfr->lock); + + return false; +} + +static enum jfc_poll_state udma_parse_cqe_for_send(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr) +{ + struct udma_jetty_queue *queue; + struct udma_jetty *jetty; + struct udma_jfs *jfs; + + queue = (struct udma_jetty_queue *)(uintptr_t)( + (uint64_t)cqe->user_data_h << UDMA_ADDR_SHIFT | + cqe->user_data_l); + if (!queue) { + dev_err(dev->dev, "jetty queue addr is null, jetty_id = %u.\n", cr->local_id); + return JFC_POLL_ERR; + } + + if (unlikely(udma_get_cr_status(dev, cqe->status, cqe->substatus, &cr->status))) + return JFC_POLL_ERR; + + if (!!cqe->fd) { + cr->status = UBCORE_CR_WR_FLUSH_ERR_DONE; + queue->flush_flag = true; + } else { + queue->ci += (cqe->entry_idx - queue->ci) & (queue->buf.entry_cnt - 1); + cr->user_ctx = queue->wrid[queue->ci & (queue->buf.entry_cnt - 1)]; + queue->ci++; + } + + if (!!cr->flag.bs.jetty) { + jetty = to_udma_jetty_from_queue(queue); + cr->user_data = (uintptr_t)&jetty->ubcore_jetty; + } else { + jfs = container_of(queue, struct udma_jfs, sq); + cr->user_data = (uintptr_t)&jfs->ubcore_jfs; + } + + return JFC_OK; +} + +static enum jfc_poll_state udma_parse_cqe_for_recv(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + uint8_t substatus; + uint8_t status; + + if (unlikely(udma_update_jfr_idx(dev, cqe, cr, false))) + return JFC_POLL_ERR; + + udma_parse_opcode_for_res(dev, cqe, cr, tid_list); + status = cqe->status; + substatus = cqe->substatus; + if (unlikely(udma_get_cr_status(dev, status, substatus, &cr->status))) + return JFC_POLL_ERR; + + return JFC_OK; +} + +static enum jfc_poll_state parse_cqe_for_jfc(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + enum jfc_poll_state ret; + + cr->flag.bs.s_r = cqe->s_r; + cr->flag.bs.jetty = cqe->is_jetty; + cr->completion_len = cqe->byte_cnt; + cr->tpn = cqe->tpn; + cr->local_id = cqe->local_num_h << UDMA_SRC_IDX_SHIFT | cqe->local_num_l; + cr->remote_id.id = cqe->rmt_idx; + udma_swap_endian((uint8_t *)(cqe->rmt_eid), cr->remote_id.eid.raw, UBCORE_EID_SIZE); + + if (cqe->s_r == CQE_FOR_RECEIVE) + ret = udma_parse_cqe_for_recv(dev, cqe, cr, tid_list); + else + ret = udma_parse_cqe_for_send(dev, cqe, cr); + + return ret; +} + +static struct udma_jfc_cqe *get_next_cqe(struct udma_jfc *jfc, uint32_t n) +{ + struct udma_jfc_cqe *cqe; + uint32_t valid_owner; + + cqe = (struct udma_jfc_cqe *)get_buf_entry(&jfc->buf, n); + + valid_owner = (n >> jfc->cq_shift) & UDMA_JFC_DB_VALID_OWNER_M; + if (!(cqe->owner ^ valid_owner)) + return NULL; + + return cqe; +} + +static void dump_cqe_aux_info(struct udma_dev *dev, struct ubcore_cr *cr) +{ + struct ubcore_user_ctl_out out = {}; + struct ubcore_user_ctl_in in = {}; + struct udma_cqe_info_in info_in; + + info_in.status = cr->status; + info_in.s_r = cr->flag.bs.s_r; + in.addr = (uint64_t)&info_in; + in.len = sizeof(struct udma_cqe_info_in); + in.opcode = UDMA_USER_CTL_QUERY_CQE_AUX_INFO; + + (void)udma_query_cqe_aux_info(&dev->ub_dev, NULL, &in, &out); +} + +static enum jfc_poll_state udma_poll_one(struct udma_dev *dev, + struct udma_jfc *jfc, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + struct udma_jfc_cqe *cqe; + + cqe = get_next_cqe(jfc, jfc->ci); + if (!cqe) + return JFC_EMPTY; + + ++jfc->ci; + /* Memory barrier */ + rmb(); + + if (parse_cqe_for_jfc(dev, cqe, cr, tid_list)) + return JFC_POLL_ERR; + + if (unlikely(cr->status != UBCORE_CR_SUCCESS) && dump_aux_info) + dump_cqe_aux_info(dev, cr); + + return JFC_OK; +} + +static void udma_inv_tid(struct udma_dev *dev, struct list_head *tid_list) +{ + struct udma_inv_tid *tid_node; + struct udma_inv_tid *tmp; + struct iommu_sva *ksva; + uint32_t tid; + + mutex_lock(&dev->ksva_mutex); + list_for_each_entry_safe(tid_node, tmp, tid_list, list) { + tid = tid_node->tid; + ksva = (struct iommu_sva *)xa_load(&dev->ksva_table, tid); + if (!ksva) { + dev_warn(dev->dev, "tid may have been released.\n"); + } else { + ummu_ksva_unbind_device(ksva); + __xa_erase(&dev->ksva_table, tid); + } + + list_del(&tid_node->list); + kfree(tid_node); + } + mutex_unlock(&dev->ksva_mutex); +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) +{ + struct udma_dev *dev = to_udma_dev(jfc->ub_dev); + struct udma_jfc *udma_jfc = to_udma_jfc(jfc); + enum jfc_poll_state err = JFC_OK; + struct list_head tid_list; + uint32_t ci; + int npolled; + + INIT_LIST_HEAD(&tid_list); + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_lock(&udma_jfc->lock); + + for (npolled = 0; npolled < cr_cnt; ++npolled) { + err = udma_poll_one(dev, udma_jfc, cr + npolled, &tid_list); + if (err != JFC_OK) + break; + } + + if (npolled) { + ci = udma_jfc->ci; + *udma_jfc->db.db_record = ci & (uint32_t)UDMA_JFC_DB_CI_IDX_M; + } + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_unlock(&udma_jfc->lock); + + if (!list_empty(&tid_list)) + udma_inv_tid(dev, &tid_list); + + return err == JFC_POLL_ERR ? -UDMA_INTER_ERR : npolled; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 29db1243623e..1b9476c1206a 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -18,6 +18,9 @@ #define UDMA_STARS_SWITCH 1 +#define UDMA_JFC_DB_CI_IDX_M GENMASK(21, 0) +#define UDMA_CQE_INV_TOKEN_ID GENMASK(19, 0) + enum udma_jfc_state { UDMA_JFC_STATE_INVALID, UDMA_JFC_STATE_VALID, @@ -131,6 +134,46 @@ struct udma_jfc_ctx { uint32_t rsv11[12]; }; +struct udma_jfc_cqe { + /* DW0 */ + uint32_t s_r : 1; + uint32_t is_jetty : 1; + uint32_t owner : 1; + uint32_t inline_en : 1; + uint32_t opcode : 3; + uint32_t fd : 1; + uint32_t rsv : 8; + uint32_t substatus : 8; + uint32_t status : 8; + /* DW1 */ + uint32_t entry_idx : 16; + uint32_t local_num_l : 16; + /* DW2 */ + uint32_t local_num_h : 4; + uint32_t rmt_idx : 20; + uint32_t rsv1 : 8; + /* DW3 */ + uint32_t tpn : 24; + uint32_t rsv2 : 8; + /* DW4 */ + uint32_t byte_cnt; + /* DW5 ~ DW6 */ + uint32_t user_data_l; + uint32_t user_data_h; + /* DW7 ~ DW10 */ + uint32_t rmt_eid[4]; + /* DW11 ~ DW12 */ + uint32_t data_l; + uint32_t data_h; + /* DW13 ~ DW15 */ + uint32_t inline_data[3]; +}; + +struct udma_inv_tid { + uint32_t tid; + struct list_head list; +}; + static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) { return container_of(jfc, struct udma_jfc, base); @@ -144,5 +187,6 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); +int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index ba5d2b7996f8..d2467aef2f47 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -204,6 +204,7 @@ static struct ubcore_ops g_dev_ops = { .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, .post_jetty_recv_wr = udma_post_jetty_recv_wr, + .poll_jfc = udma_poll_jfc, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 90c3f01e9b987243badd104baee2412f205d5eed Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 16:55:43 +0800 Subject: [PATCH 120/243] ub: udma: Support rearm jfc and clean jfc. commit 1d17cd5f0a17c47a983524cea1677386e9f20bf3 openEuler This patch adds the ability to rearm jfc and clean jfc. When the user uses the interrupt mode for poll jfc, it needs to be rearmed after each poll. When destroying jetty/jfs/jfr, it is necessary to clean jfc. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 6 +++ drivers/ub/urma/hw/udma/udma_jetty.c | 16 ++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 ++ drivers/ub/urma/hw/udma/udma_jfc.c | 71 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 2 + drivers/ub/urma/hw/udma/udma_jfr.c | 3 ++ drivers/ub/urma/hw/udma/udma_jfs.c | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 8 files changed, 104 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index f8bab657aa6a..c38cb43b2ec6 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -83,6 +83,12 @@ void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); +static inline void udma_write64(struct udma_dev *udma_dev, + uint64_t *val, void __iomem *dest) +{ + writeq(*val, dest); +} + static inline void udma_alloc_kernel_db(struct udma_dev *dev, struct udma_jetty_queue *queue) { diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 61fc94c0898c..87174be534ba 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -768,6 +768,19 @@ static int udma_query_jetty_ctx(struct udma_dev *dev, return 0; } +void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfc *send_jfc, + struct ubcore_jfc *recv_jfc) +{ + if (sq->buf.kva) { + if (send_jfc) + udma_clean_jfc(send_jfc, sq->id, dev); + + if (recv_jfc && recv_jfc != send_jfc) + udma_clean_jfc(recv_jfc, sq->id, dev); + } +} + static bool udma_wait_timeout(uint32_t *sum_times, uint32_t times, uint32_t ta_timeout) { uint32_t wait_time; @@ -906,6 +919,9 @@ static void udma_free_jetty(struct ubcore_jetty *jetty) struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + udma_clean_cqe_for_jetty(udma_dev, &udma_jetty->sq, jetty->jetty_cfg.send_jfc, + jetty->jetty_cfg.recv_jfc); + if (dfx_switch) udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jetty, udma_jetty->sq.id); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 4b9749afb64e..f9b3b8f60885 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -264,5 +264,8 @@ struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata); +void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfc *send_jfc, + struct ubcore_jfc *recv_jfc); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index d6a3b53cfe79..12c2f143a376 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -646,6 +646,24 @@ int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, return ret; } +int udma_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only) +{ + struct udma_dev *dev = to_udma_dev(jfc->ub_dev); + struct udma_jfc *udma_jfc = to_udma_jfc(jfc); + struct udma_jfc_db db; + + db.ci = udma_jfc->ci & (uint32_t)UDMA_JFC_DB_CI_IDX_M; + db.notify = solicited_only; + db.arm_sn = udma_jfc->arm_sn; + db.type = UDMA_CQ_ARM_DB; + db.jfcn = udma_jfc->jfcn; + + udma_write64(dev, (uint64_t *)&db, (void __iomem *)(dev->k_db_base + + UDMA_JFC_HW_DB_OFFSET)); + + return 0; +} + static enum jfc_poll_state udma_get_cr_status(struct udma_dev *dev, uint8_t src_status, uint8_t substatus, @@ -1030,3 +1048,56 @@ int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) return err == JFC_POLL_ERR ? -UDMA_INTER_ERR : npolled; } + +void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev *udma_dev) +{ + struct udma_jfc *udma_jfc = to_udma_jfc(jfc); + struct udma_jfc_cqe *dest; + struct udma_jfc_cqe *cqe; + struct ubcore_cr cr; + uint32_t nfreed = 0; + uint32_t local_id; + uint8_t owner_bit; + uint32_t pi; + + if (udma_jfc->mode != (uint32_t)UDMA_NORMAL_JFC_TYPE) + return; + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_lock(&udma_jfc->lock); + + for (pi = udma_jfc->ci; get_next_cqe(udma_jfc, pi) != NULL; ++pi) { + if (pi > udma_jfc->ci + udma_jfc->buf.entry_cnt) + break; + } + while ((int) --pi - (int) udma_jfc->ci >= 0) { + cqe = get_buf_entry(&udma_jfc->buf, pi); + /* make sure cqe buffer is valid */ + rmb(); + local_id = (cqe->local_num_h << UDMA_SRC_IDX_SHIFT) | cqe->local_num_l; + if (local_id == jetty_id) { + if (cqe->s_r == CQE_FOR_RECEIVE) { + cr.local_id = local_id; + (void)udma_update_jfr_idx(udma_dev, cqe, &cr, true); + } + + ++nfreed; + } else if (!!nfreed) { + dest = get_buf_entry(&udma_jfc->buf, pi + nfreed); + /* make sure owner bit is valid */ + rmb(); + owner_bit = dest->owner; + (void)memcpy(dest, cqe, udma_dev->caps.cqe_size); + dest->owner = owner_bit; + } + } + + if (!!nfreed) { + udma_jfc->ci += nfreed; + wmb(); /* be sure software get cqe data before update doorbell */ + *udma_jfc->db.db_record = udma_jfc->ci & (uint32_t)UDMA_JFC_DB_CI_IDX_M; + } + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_unlock(&udma_jfc->lock); +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 1b9476c1206a..02b17b6011d2 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -187,6 +187,8 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); +int udma_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); +void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev *udma_dev); #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 5e01e6a8f141..5de0fc62c6e7 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -569,6 +569,9 @@ static void udma_free_jfr(struct ubcore_jfr *jfr) struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + if (udma_jfr->rq.buf.kva && jfr->jfr_cfg.jfc) + udma_clean_jfc(jfr->jfr_cfg.jfc, udma_jfr->rq.id, udma_dev); + if (dfx_switch) udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jfr, udma_jfr->rq.id); diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 978465d77672..a7b9576ea87e 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -361,6 +361,8 @@ static void udma_free_jfs(struct ubcore_jfs *jfs) struct udma_dev *dev = to_udma_dev(jfs->ub_dev); struct udma_jfs *ujfs = to_udma_jfs(jfs); + udma_clean_cqe_for_jetty(dev, &ujfs->sq, jfs->jfs_cfg.jfc, NULL); + xa_erase(&dev->jetty_table.xa, ujfs->sq.id); if (refcount_dec_and_test(&ujfs->ae_refcount)) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index d2467aef2f47..b6bb3e240d1e 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -181,6 +181,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfc = udma_create_jfc, .modify_jfc = udma_modify_jfc, .destroy_jfc = udma_destroy_jfc, + .rearm_jfc = udma_rearm_jfc, .create_jfs = udma_create_jfs, .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, -- Gitee From 297f1cd9a4ef1c41dca5a07a43046593dc70cea8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 19:35:14 +0800 Subject: [PATCH 121/243] ub: udma: Support get tp list. commit 534649e2be8ee2134415780cefa3a0bd0a5c3e1e openEuler This patch adds the ability to get tp list. During the chain construction process, the driver will obtain the tp list from UBEngine mgmt driver. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 231 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 33 ++++ drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 3 + 4 files changed, 268 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index af1732e1629b..f08830f06fa5 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -115,6 +115,237 @@ int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn) return ret; } +static int udma_ctrlq_get_trans_type(struct udma_dev *dev, + enum ubcore_transport_mode trans_mode, + enum udma_ctrlq_trans_type *tp_type) +{ +#define UDMA_TRANS_MODE_NUM 5 + +struct udma_ctrlq_trans_map { + bool is_valid; + enum udma_ctrlq_trans_type tp_type; +}; + static struct udma_ctrlq_trans_map ctrlq_trans_map[UDMA_TRANS_MODE_NUM] = { + {false, UDMA_CTRLQ_TRANS_TYPE_MAX}, + {true, UDMA_CTRLQ_TRANS_TYPE_TP_RM}, + {true, UDMA_CTRLQ_TRANS_TYPE_TP_RC}, + {false, UDMA_CTRLQ_TRANS_TYPE_MAX}, + {true, UDMA_CTRLQ_TRANS_TYPE_TP_UM}, + }; + uint8_t transport_mode = (uint8_t)trans_mode; + + if ((transport_mode < UDMA_TRANS_MODE_NUM) && + ctrlq_trans_map[transport_mode].is_valid) { + *tp_type = ctrlq_trans_map[transport_mode].tp_type; + return 0; + } + + dev_err(dev->dev, "the trans_mode %u is not support.\n", trans_mode); + + return -EINVAL; +} + +static int udma_ctrlq_store_one_tpid(struct udma_dev *udev, struct xarray *ctrlq_tpid_table, + struct udma_ctrlq_tpid *tpid) +{ + struct udma_ctrlq_tpid *tpid_entity; + int ret; + + if (debug_switch) + dev_info(udev->dev, "udma ctrlq store one tpid start. tpid %u\n", tpid->tpid); + + if (xa_load(ctrlq_tpid_table, tpid->tpid)) { + dev_warn(udev->dev, + "the tpid already exists in ctrlq tpid table, tpid = %u.\n", + tpid->tpid); + return 0; + } + + tpid_entity = kzalloc(sizeof(*tpid_entity), GFP_KERNEL); + if (!tpid_entity) + return -ENOMEM; + + memcpy(tpid_entity, tpid, sizeof(*tpid)); + + ret = xa_err(xa_store(ctrlq_tpid_table, tpid->tpid, tpid_entity, GFP_KERNEL)); + if (ret) { + dev_err(udev->dev, + "store tpid entity failed, ret = %d, tpid = %u.\n", + ret, tpid->tpid); + kfree(tpid_entity); + } + + return ret; +} + +static void udma_ctrlq_erase_one_tpid(struct xarray *ctrlq_tpid_table, + uint32_t tpid) +{ + struct udma_ctrlq_tpid *tpid_entity; + + xa_lock(ctrlq_tpid_table); + tpid_entity = xa_load(ctrlq_tpid_table, tpid); + if (!tpid_entity) { + xa_unlock(ctrlq_tpid_table); + return; + } + __xa_erase(ctrlq_tpid_table, tpid); + kfree(tpid_entity); + xa_unlock(ctrlq_tpid_table); +} + +static int udma_ctrlq_get_tpid_list(struct udma_dev *udev, + struct udma_ctrlq_get_tp_list_req_data *tp_cfg_req, + struct ubcore_get_tp_cfg *tpid_cfg, + struct udma_ctrlq_tpid_list_rsp *tpid_list_resp) +{ + enum udma_ctrlq_trans_type trans_type; + struct ubase_ctrlq_msg msg = {}; + int ret; + + if (!tpid_cfg->flag.bs.ctp) { + if (udma_ctrlq_get_trans_type(udev, tpid_cfg->trans_mode, &trans_type) != 0) { + dev_err(udev->dev, "udma get ctrlq trans_type failed, trans_mode = %d.\n", + tpid_cfg->trans_mode); + return -EINVAL; + } + + tp_cfg_req->trans_type = (uint32_t)trans_type; + } else { + tp_cfg_req->trans_type = UDMA_CTRLQ_TRANS_TYPE_CTP; + } + + udma_swap_endian(tpid_cfg->local_eid.raw, tp_cfg_req->seid, + UDMA_EID_SIZE); + udma_swap_endian(tpid_cfg->peer_eid.raw, tp_cfg_req->deid, + UDMA_EID_SIZE); + + udma_ctrlq_set_tp_msg(&msg, (void *)tp_cfg_req, sizeof(*tp_cfg_req), + (void *)tpid_list_resp, sizeof(*tpid_list_resp)); + msg.opcode = UDMA_CMD_CTRLQ_GET_TP_LIST; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "ctrlq send msg failed, ret = %d.\n", ret); + + return ret; +} + +static int udma_ctrlq_store_tpid_list(struct udma_dev *udev, + struct xarray *ctrlq_tpid_table, + struct udma_ctrlq_tpid_list_rsp *tpid_list_resp) +{ + int ret; + int i; + + if (debug_switch) + dev_info(udev->dev, "udma ctrlq store tpid list tp_list_cnt = %u.\n", + tpid_list_resp->tp_list_cnt); + + for (i = 0; i < (int)tpid_list_resp->tp_list_cnt; i++) { + ret = udma_ctrlq_store_one_tpid(udev, ctrlq_tpid_table, + &tpid_list_resp->tpid_list[i]); + if (ret) + goto err_store_one_tpid; + } + + return 0; + +err_store_one_tpid: + for (i--; i >= 0; i--) + udma_ctrlq_erase_one_tpid(ctrlq_tpid_table, tpid_list_resp->tpid_list[i].tpid); + + return ret; +} + +int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata) +{ + struct udma_ctrlq_get_tp_list_req_data tp_cfg_req = {}; + struct udma_ctrlq_tpid_list_rsp tpid_list_resp = {}; + struct udma_dev *udev = to_udma_dev(dev); + int ret; + int i; + + if (!udata) + tp_cfg_req.flag = UDMA_DEFAULT_PID; + else + tp_cfg_req.flag = (uint32_t)current->tgid & UDMA_PID_MASK; + + ret = udma_ctrlq_get_tpid_list(udev, &tp_cfg_req, tpid_cfg, &tpid_list_resp); + if (ret) { + dev_err(udev->dev, "udma ctrlq get tpid list failed, ret = %d.\n", ret); + return ret; + } + + if (tpid_list_resp.tp_list_cnt == 0 || tpid_list_resp.tp_list_cnt > *tp_cnt) { + dev_err(udev->dev, + "check tp list count failed, count = %u.\n", + tpid_list_resp.tp_list_cnt); + return -EINVAL; + } + + for (i = 0; i < tpid_list_resp.tp_list_cnt; i++) { + tp_list[i].tp_handle.bs.tpid = tpid_list_resp.tpid_list[i].tpid; + tp_list[i].tp_handle.bs.tpn_start = tpid_list_resp.tpid_list[i].tpn_start; + tp_list[i].tp_handle.bs.tp_cnt = + tpid_list_resp.tpid_list[i].tpn_cnt & UDMA_TPN_CNT_MASK; + } + *tp_cnt = tpid_list_resp.tp_list_cnt; + + ret = udma_ctrlq_store_tpid_list(udev, &udev->ctrlq_tpid_table, &tpid_list_resp); + if (ret) + dev_err(udev->dev, "udma ctrlq store list failed, ret = %d.\n", ret); + + return ret; +} + +void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, + bool is_need_flush) +{ + struct udma_ctrlq_tpid *tpid_entity = NULL; + unsigned long tpid = 0; + + xa_lock(ctrlq_tpid_table); + if (!xa_empty(ctrlq_tpid_table)) { + xa_for_each(ctrlq_tpid_table, tpid, tpid_entity) { + __xa_erase(ctrlq_tpid_table, tpid); + kfree(tpid_entity); + } + } + xa_unlock(ctrlq_tpid_table); + xa_destroy(ctrlq_tpid_table); +} + +int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) +{ + struct udma_req_msg *req_msg; + struct ubase_cmd_buf in; + uint32_t msg_len; + int ret; + + msg_len = sizeof(*req_msg) + req->len; + req_msg = kzalloc(msg_len, GFP_KERNEL); + if (!req_msg) + return -ENOMEM; + + req_msg->resp_code = opcode; + + (void)memcpy(&req_msg->req, req, sizeof(*req)); + (void)memcpy(req_msg->req.data, req->data, req->len); + udma_fill_buf(&in, UBASE_OPC_UE_TO_MUE, false, msg_len, req_msg); + + ret = ubase_cmd_send_in(udma_dev->comdev.adev, &in); + if (ret) + dev_err(udma_dev->dev, + "send req msg cmd failed, ret is %d.\n", ret); + + kfree(req_msg); + + return ret; +} + int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode) { diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 6672f8ea01ec..170597ae4b20 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -8,7 +8,12 @@ #define UDMA_EID_SIZE 16 #define UDMA_CNA_SIZE 16 +#define UDMA_PID_MASK 24 +#define UDMA_DEFAULT_PID 1 #define UDMA_UE_NUM 64 +#define UDMA_MAX_TPID_NUM 5 + +#define UDMA_TPN_CNT_MASK 0x1F enum udma_ctrlq_cmd_code_type { UDMA_CMD_CTRLQ_REMOVE_SINGLE_TP = 0x13, @@ -36,6 +41,19 @@ enum udma_ctrlq_tpid_status { UDMA_CTRLQ_TPID_IDLE, }; +struct udma_ctrlq_tpid { + uint32_t tpid : 24; + uint32_t tpn_cnt : 8; + uint32_t tpn_start : 24; + uint32_t rsv : 8; +}; + +struct udma_ctrlq_tpid_list_rsp { + uint32_t tp_list_cnt : 16; + uint32_t rsv : 16; + struct udma_ctrlq_tpid tpid_list[UDMA_MAX_TPID_NUM]; +}; + struct udma_ctrlq_tp_flush_done_req_data { uint32_t tpn : 24; uint32_t rsv : 8; @@ -79,6 +97,14 @@ struct udma_ctrlq_check_tp_active_rsp_info { struct udma_ctrlq_check_tp_active_rsp_data data[]; }; +struct udma_ctrlq_get_tp_list_req_data { + uint8_t seid[UDMA_EID_SIZE]; + uint8_t deid[UDMA_EID_SIZE]; + uint32_t trans_type : 4; + uint32_t rsv : 4; + uint32_t flag : 24; +}; + enum udma_cmd_ue_opcode { UDMA_CMD_UBCORE_COMMAND = 0x1, UDMA_CMD_NOTIFY_MUE_SAVE_TP = 0x2, @@ -101,7 +127,14 @@ struct udma_notify_flush_done { int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn); int udma_ctrlq_remove_single_tp(struct udma_dev *udev, uint32_t tpn, int status); +int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata); + +void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, + bool is_need_flush); int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); +int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 58c5da4a2234..f4ddf294b769 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -115,6 +115,7 @@ struct udma_dev { struct xarray crq_nb_table; struct xarray npu_nb_table; struct mutex npu_nb_mutex; + struct xarray ctrlq_tpid_table; struct xarray tpn_ue_idx_table; struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; resource_size_t db_base; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b6bb3e240d1e..e1c2f1ab359d 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -201,6 +201,7 @@ static struct ubcore_ops g_dev_ops = { .unimport_jetty = udma_unimport_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, + .get_tp_list = udma_get_tp_list, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, @@ -238,6 +239,7 @@ static void udma_destroy_tp_ue_idx_table(struct udma_dev *udma_dev) void udma_destroy_tables(struct udma_dev *udma_dev) { + udma_ctrlq_destroy_tpid_list(udma_dev, &udma_dev->ctrlq_tpid_table, false); udma_destroy_eid_table(udma_dev); mutex_destroy(&udma_dev->disable_ue_rx_mutex); if (!ida_is_empty(&udma_dev->rsvd_jetty_ida_table.ida)) @@ -299,6 +301,7 @@ static void udma_init_managed_by_ctrl_cpu_table(struct udma_dev *udma_dev) { mutex_init(&udma_dev->eid_mutex); xa_init(&udma_dev->eid_table); + xa_init(&udma_dev->ctrlq_tpid_table); } int udma_init_tables(struct udma_dev *udma_dev) -- Gitee From b6d6db8a575e881ab26a808ecb4e65a745eb1ba2 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 21:27:14 +0800 Subject: [PATCH 122/243] ub: udma: Support active tp. commit a315631fec986888615f469189c30799aab3c610 openEuler This patch adds the ability to active tp. During the chain construction process, the driver will post ctrlq command to active tp. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 96 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 21 ++++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 118 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index f08830f06fa5..df84c748a866 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -145,6 +145,30 @@ struct udma_ctrlq_trans_map { return -EINVAL; } +static int udma_send_req_to_mue(struct udma_dev *dev, union ubcore_tp_handle *tp_handle) +{ + uint32_t data_len = (uint32_t)sizeof(struct udma_ue_tp_info); + struct udma_ue_tp_info *data; + struct ubcore_req *req_msg; + int ret; + + req_msg = kzalloc(sizeof(*req_msg) + data_len, GFP_KERNEL); + if (!req_msg) + return -ENOMEM; + + data = (struct udma_ue_tp_info *)req_msg->data; + data->start_tpn = tp_handle->bs.tpn_start; + data->tp_cnt = tp_handle->bs.tp_cnt; + req_msg->len = data_len; + ret = send_req_to_mue(dev, req_msg, UDMA_CMD_NOTIFY_MUE_SAVE_TP); + if (ret) + dev_err(dev->dev, "fail to notify mue save tp, ret %d.\n", ret); + + kfree(req_msg); + + return ret; +} + static int udma_ctrlq_store_one_tpid(struct udma_dev *udev, struct xarray *ctrlq_tpid_table, struct udma_ctrlq_tpid *tpid) { @@ -318,6 +342,62 @@ void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpi xa_destroy(ctrlq_tpid_table); } +static int udma_k_ctrlq_create_active_tp_msg(struct udma_dev *udev, + struct ubcore_active_tp_cfg *active_cfg, + uint32_t *tp_id) +{ + struct udma_ctrlq_active_tp_resp_data active_tp_resp = {}; + struct udma_ctrlq_active_tp_req_data active_tp_req = {}; + struct ubase_ctrlq_msg msg = {}; + int ret; + + active_tp_req.local_tp_id = active_cfg->tp_handle.bs.tpid; + active_tp_req.local_tpn_cnt = active_cfg->tp_handle.bs.tp_cnt; + active_tp_req.local_tpn_start = active_cfg->tp_handle.bs.tpn_start; + active_tp_req.local_psn = active_cfg->tp_attr.tx_psn; + + active_tp_req.remote_tp_id = active_cfg->peer_tp_handle.bs.tpid; + active_tp_req.remote_tpn_cnt = active_cfg->peer_tp_handle.bs.tp_cnt; + active_tp_req.remote_tpn_start = active_cfg->peer_tp_handle.bs.tpn_start; + active_tp_req.remote_psn = active_cfg->tp_attr.rx_psn; + + if (debug_switch) + udma_dfx_ctx_print(udev, "udma create active tp msg info", + active_tp_req.local_tp_id, + sizeof(struct udma_ctrlq_active_tp_req_data) / sizeof(uint32_t), + (uint32_t *)&active_tp_req); + + msg.opcode = UDMA_CMD_CTRLQ_ACTIVE_TP; + udma_ctrlq_set_tp_msg(&msg, (void *)&active_tp_req, sizeof(active_tp_req), + (void *)&active_tp_resp, sizeof(active_tp_resp)); + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "udma active tp send failed, ret = %d.\n", ret); + + *tp_id = active_tp_resp.local_tp_id; + + return ret; +} + +int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, + struct ubcore_active_tp_cfg *active_cfg) +{ + uint32_t tp_id = active_cfg->tp_handle.bs.tpid; + int ret; + + ret = udma_k_ctrlq_create_active_tp_msg(dev, active_cfg, &tp_id); + if (ret) + return ret; + + active_cfg->tp_handle.bs.tpid = tp_id; + + if (dev->is_ue) + (void)udma_send_req_to_mue(dev, &(active_cfg->tp_handle)); + + return 0; +} + int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) { struct udma_req_msg *req_msg; @@ -376,3 +456,19 @@ int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, return ret; } + +int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *active_cfg) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + int ret; + + if (debug_switch) + udma_dfx_ctx_print(udma_dev, "udma active tp ex", active_cfg->tp_handle.bs.tpid, + sizeof(struct ubcore_active_tp_cfg) / sizeof(uint32_t), + (uint32_t *)active_cfg); + ret = udma_ctrlq_set_active_tp_ex(udma_dev, active_cfg); + if (ret) + dev_err(udma_dev->dev, "Failed to set active tp msg, ret %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 170597ae4b20..248d20a272d1 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -54,6 +54,26 @@ struct udma_ctrlq_tpid_list_rsp { struct udma_ctrlq_tpid tpid_list[UDMA_MAX_TPID_NUM]; }; +struct udma_ctrlq_active_tp_req_data { + uint32_t local_tp_id : 24; + uint32_t local_tpn_cnt : 8; + uint32_t local_tpn_start : 24; + uint32_t rsv : 8; + uint32_t remote_tp_id : 24; + uint32_t remote_tpn_cnt : 8; + uint32_t remote_tpn_start : 24; + uint32_t rsv1 : 8; + uint32_t local_psn; + uint32_t remote_psn; +}; + +struct udma_ctrlq_active_tp_resp_data { + uint32_t local_tp_id : 24; + uint32_t local_tpn_cnt : 8; + uint32_t local_tpn_start : 24; + uint32_t rsv : 8; +}; + struct udma_ctrlq_tp_flush_done_req_data { uint32_t tpn : 24; uint32_t rsv : 8; @@ -136,5 +156,6 @@ void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpi int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); +int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *active_cfg); #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index e1c2f1ab359d..f9bac04ba1a2 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -202,6 +202,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, + .active_tp = udma_active_tp, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, -- Gitee From 2bf160c850d62f09802bd89dd7a4e596c0dea94f Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 21:50:18 +0800 Subject: [PATCH 123/243] ub: udma: Support deactivate tp. commit 9545e058c4f723a72d434b7ddd0d137b9ff012b6 openEuler This patch adds the ability to deactivate tp. During the chain construction process, the driver will post ctrlq command to deactivate tp. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 2 + drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 54 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 +++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 68 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index c38cb43b2ec6..d7f7312d2b4c 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -9,6 +9,8 @@ #include "udma_ctx.h" #include "udma_dev.h" +#define UDMA_TPHANDLE_TPID_SHIFT 0xFFFFFF + struct udma_jetty_grp { struct ubcore_jetty_group ubcore_jetty_grp; uint32_t start_jetty_id; diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index df84c748a866..396880ddb7d7 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -398,6 +398,49 @@ int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, return 0; } +static int udma_k_ctrlq_deactive_tp(struct udma_dev *udev, union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata) +{ +#define UDMA_RSP_TP_MUL 2 + uint32_t tp_id = tp_handle.bs.tpid & UDMA_TPHANDLE_TPID_SHIFT; + struct udma_ctrlq_deactive_tp_req_data deactive_tp_req = {}; + uint32_t tp_num = tp_handle.bs.tp_cnt; + struct ubase_ctrlq_msg msg = {}; + int ret; + + if (tp_num) { + ret = udma_close_ue_rx(udev, true, false, false, tp_num * UDMA_RSP_TP_MUL); + if (ret) { + dev_err(udev->dev, "close ue rx failed in deactivate tp.\n"); + return ret; + } + } + + msg.opcode = UDMA_CMD_CTRLQ_DEACTIVE_TP; + deactive_tp_req.tp_id = tp_id; + deactive_tp_req.tpn_cnt = tp_handle.bs.tp_cnt; + deactive_tp_req.start_tpn = tp_handle.bs.tpn_start; + if (!udata) + deactive_tp_req.pid_flag = UDMA_DEFAULT_PID; + else + deactive_tp_req.pid_flag = (uint32_t)current->tgid & UDMA_PID_MASK; + + udma_ctrlq_set_tp_msg(&msg, (void *)&deactive_tp_req, sizeof(deactive_tp_req), NULL, 0); + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret != -EAGAIN && ret) { + dev_err(udev->dev, "deactivate tp send msg failed, tp_id = %u, ret = %d.\n", + tp_id, ret); + if (tp_num) + udma_open_ue_rx(udev, true, false, false, tp_num * UDMA_RSP_TP_MUL); + return ret; + } + + udma_ctrlq_erase_one_tpid(&udev->ctrlq_tpid_table, tp_id); + + return (ret == -EAGAIN) ? 0 : ret; +} + int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) { struct udma_req_msg *req_msg; @@ -472,3 +515,14 @@ int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *activ return ret; } + +int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + + if (debug_switch) + dev_info(udma_dev->dev, "udma deactivate tp ex tp_id = %u\n", tp_handle.bs.tpid); + + return udma_k_ctrlq_deactive_tp(udma_dev, tp_handle, udata); +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 248d20a272d1..5eb470a9e3d7 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -74,6 +74,15 @@ struct udma_ctrlq_active_tp_resp_data { uint32_t rsv : 8; }; +struct udma_ctrlq_deactive_tp_req_data { + uint32_t tp_id : 24; + uint32_t tpn_cnt : 8; + uint32_t start_tpn : 24; + uint32_t rsv : 8; + uint32_t pid_flag : 24; + uint32_t rsv1 : 8; +}; + struct udma_ctrlq_tp_flush_done_req_data { uint32_t tpn : 24; uint32_t rsv : 8; @@ -157,5 +166,7 @@ int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *active_cfg); +int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata); #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index f9bac04ba1a2..f7a5adcd281b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -203,6 +203,7 @@ static struct ubcore_ops g_dev_ops = { .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, .active_tp = udma_active_tp, + .deactive_tp = udma_deactive_tp, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, -- Gitee From 223050bfa779891aaf70827bcbd7dd70f8eb398b Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 10:28:49 +0800 Subject: [PATCH 124/243] ub: udma: Support query NPU info. commit 4b13fcb18fad1bea907631524a08c3b59d5a903f openEuler This patch adds the ability to query NPU (Neural network Processing Unit)info. Users can send a ctrlq message through user control to query. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 22 ++++ drivers/ub/urma/hw/udma/udma_common.h | 1 + drivers/ub/urma/hw/udma/udma_ctl.c | 129 ++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 151 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 ++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 6 files changed, 316 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 375ed4826f6a..017216169ea3 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -430,6 +430,28 @@ void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex) mutex_init(udma_mutex); } +void udma_destroy_npu_cb_table(struct udma_dev *dev) +{ + struct udma_ctrlq_event_nb *nb = NULL; + unsigned long index = 0; + + mutex_lock(&dev->npu_nb_mutex); + if (!xa_empty(&dev->npu_nb_table)) { + xa_for_each(&dev->npu_nb_table, index, nb) { + ubase_ctrlq_unregister_crq_event(dev->comdev.adev, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + nb->opcode); + __xa_erase(&dev->npu_nb_table, index); + kfree(nb); + nb = NULL; + } + } + + mutex_unlock(&dev->npu_nb_mutex); + xa_destroy(&dev->npu_nb_table); + mutex_destroy(&dev->npu_nb_mutex); +} + void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name) { diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index d7f7312d2b4c..300357af8895 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -72,6 +72,7 @@ struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); +void udma_destroy_npu_cb_table(struct udma_dev *dev); void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name); void udma_destroy_eid_table(struct udma_dev *udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index b06ff3ea61cf..ac2e573fa8db 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -371,3 +371,132 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc return ret; } + +static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { + [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, + [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, + [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, + [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, +}; + +static udma_user_ctl_ops g_udma_user_ctl_u_ops[] = { + [UDMA_USER_CTL_CREATE_JFS_EX] = NULL, + [UDMA_USER_CTL_DELETE_JFS_EX] = NULL, + [UDMA_USER_CTL_CREATE_JFC_EX] = NULL, + [UDMA_USER_CTL_DELETE_JFC_EX] = NULL, + [UDMA_USER_CTL_SET_CQE_ADDR] = NULL, + [UDMA_USER_CTL_QUERY_UE_INFO] = NULL, + [UDMA_USER_CTL_GET_DEV_RES_RATIO] = NULL, + [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = NULL, + [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = NULL, + [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, + [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, + [UDMA_USER_CTL_QUERY_UBMEM_INFO] = NULL, + [UDMA_USER_CTL_QUERY_PAIR_DEVNUM] = NULL, +}; + +static int udma_user_data(struct ubcore_device *dev, + struct ubcore_user_ctl *k_user_ctl) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubcore_user_ctl_out out = {}; + struct ubcore_user_ctl_in in = {}; + unsigned long byte; + int ret; + + if (k_user_ctl->in.len >= UDMA_HW_PAGE_SIZE || k_user_ctl->out.len >= UDMA_HW_PAGE_SIZE) { + dev_err(udev->dev, "The len exceeds the maximum value in user ctrl.\n"); + return -EINVAL; + } + + in.opcode = k_user_ctl->in.opcode; + if (!g_udma_user_ctl_u_ops[in.opcode]) { + dev_err(udev->dev, "invalid user opcode: 0x%x.\n", in.opcode); + return -EINVAL; + } + + if (k_user_ctl->in.len) { + in.addr = (uint64_t)kzalloc(k_user_ctl->in.len, GFP_KERNEL); + if (!in.addr) + return -ENOMEM; + + in.len = k_user_ctl->in.len; + byte = copy_from_user((void *)(uintptr_t)in.addr, + (void __user *)(uintptr_t)k_user_ctl->in.addr, + k_user_ctl->in.len); + if (byte) { + dev_err(udev->dev, + "failed to copy user data in user ctrl, byte = %lu.\n", byte); + kfree((void *)in.addr); + return -EFAULT; + } + } + + if (k_user_ctl->out.len) { + out.addr = (uint64_t)kzalloc(k_user_ctl->out.len, GFP_KERNEL); + if (!out.addr) { + kfree((void *)in.addr); + + return -ENOMEM; + } + out.len = k_user_ctl->out.len; + + if (k_user_ctl->out.addr) { + byte = copy_from_user((void *)(uintptr_t)out.addr, + (void __user *)(uintptr_t)k_user_ctl->out.addr, + k_user_ctl->out.len); + if (byte) { + dev_err(udev->dev, + "failed to copy user data out user ctrl, byte = %lu.\n", + byte); + kfree((void *)out.addr); + kfree((void *)in.addr); + + return -EFAULT; + } + } + } + + ret = g_udma_user_ctl_u_ops[in.opcode](dev, k_user_ctl->uctx, &in, &out); + kfree((void *)in.addr); + + if (out.addr) { + byte = copy_to_user((void __user *)(uintptr_t)k_user_ctl->out.addr, + (void *)(uintptr_t)out.addr, min(out.len, k_user_ctl->out.len)); + if (byte) { + dev_err(udev->dev, + "copy resp to user failed in user ctrl, byte = %lu.\n", byte); + ret = -EFAULT; + } + + kfree((void *)out.addr); + } + + return ret; +} + +int udma_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ctl) +{ + struct udma_dev *udev; + + if (dev == NULL || k_user_ctl == NULL) + return -EINVAL; + + udev = to_udma_dev(dev); + + if (k_user_ctl->in.opcode >= UDMA_USER_CTL_MAX) { + dev_err(udev->dev, "invalid opcode: 0x%x.\n", k_user_ctl->in.opcode); + return -EINVAL; + } + + if (k_user_ctl->uctx) + return udma_user_data(dev, k_user_ctl); + + if (!g_udma_user_ctl_k_ops[k_user_ctl->in.opcode]) { + dev_err(udev->dev, "invalid user opcode: 0x%x.\n", k_user_ctl->in.opcode); + return -EINVAL; + } + + return g_udma_user_ctl_k_ops[k_user_ctl->in.opcode](dev, k_user_ctl->uctx, &k_user_ctl->in, + &k_user_ctl->out); +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 396880ddb7d7..d28e206fb277 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -115,6 +115,157 @@ int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn) return ret; } +int udma_get_dev_resource_ratio(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev_resource_ratio dev_res = {}; + struct udma_dev_pair_info dev_res_out = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_ctrlq_msg ctrlq_msg = {}; + int ret = 0; + + if (udma_check_base_param(in->addr, in->len, sizeof(dev_res.index))) { + dev_err(udev->dev, "parameter invalid in get dev res, len = %u.\n", in->len); + return -EINVAL; + } + + if (out->addr == 0 || out->len != sizeof(dev_res_out)) { + dev_err(udev->dev, "get dev resource ratio, addr is NULL:%d, len:%u.\n", + out->addr == 0, out->len); + return -EINVAL; + } + + memcpy(&dev_res.index, (void *)(uintptr_t)in->addr, sizeof(dev_res.index)); + + ret = ubase_get_bus_eid(udev->comdev.adev, &dev_res.eid); + if (ret) { + dev_err(udev->dev, "get dev bus eid failed, ret is %d.\n", ret); + return ret; + } + + ctrlq_msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + ctrlq_msg.service_ver = UBASE_CTRLQ_SER_VER_01; + ctrlq_msg.need_resp = 1; + ctrlq_msg.in_size = sizeof(dev_res); + ctrlq_msg.in = (void *)&dev_res; + ctrlq_msg.out_size = sizeof(dev_res_out); + ctrlq_msg.out = &dev_res_out; + ctrlq_msg.opcode = UDMA_CTRLQ_GET_DEV_RESOURCE_RATIO; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &ctrlq_msg); + if (ret) { + dev_err(udev->dev, "get dev res send ctrlq msg failed, ret is %d.\n", ret); + return ret; + } + memcpy((void *)(uintptr_t)out->addr, &dev_res_out, sizeof(dev_res_out)); + + return ret; +} + +static int udma_dev_res_ratio_ctrlq_handler(struct auxiliary_device *adev, + uint8_t service_ver, void *data, + uint16_t len, uint16_t seq) +{ + struct udma_dev *udev = (struct udma_dev *)get_udma_dev(adev); + struct udma_ctrlq_event_nb *udma_cb; + int ret; + + mutex_lock(&udev->npu_nb_mutex); + udma_cb = xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + if (!udma_cb) { + dev_err(udev->dev, "failed to query npu info cb while xa_load.\n"); + mutex_unlock(&udev->npu_nb_mutex); + return -EINVAL; + } + + ret = udma_cb->crq_handler(&udev->ub_dev, data, len); + if (ret) + dev_err(udev->dev, "npu crq handler failed, ret = %d.\n", ret); + mutex_unlock(&udev->npu_nb_mutex); + + return ret; +} + +int udma_register_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct ubase_ctrlq_event_nb ubase_cb = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ctrlq_event_nb *udma_cb; + int ret; + + if (udma_check_base_param(in->addr, in->len, sizeof(udma_cb->crq_handler))) { + dev_err(udev->dev, "parameter invalid in register npu cb, len = %u.\n", in->len); + return -EINVAL; + } + + udma_cb = kzalloc(sizeof(*udma_cb), GFP_KERNEL); + if (!udma_cb) + return -ENOMEM; + + udma_cb->opcode = UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO; + udma_cb->crq_handler = (void *)(uintptr_t)in->addr; + + mutex_lock(&udev->npu_nb_mutex); + if (xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO)) { + dev_err(udev->dev, "query npu info callback exist.\n"); + ret = -EINVAL; + goto err_release_udma_cb; + } + ret = xa_err(__xa_store(&udev->npu_nb_table, udma_cb->opcode, udma_cb, GFP_KERNEL)); + if (ret) { + dev_err(udev->dev, + "save crq nb entry failed, opcode is %u, ret is %d.\n", + udma_cb->opcode, ret); + goto err_release_udma_cb; + } + + ubase_cb.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + ubase_cb.opcode = UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO; + ubase_cb.back = udev->comdev.adev; + ubase_cb.crq_handler = udma_dev_res_ratio_ctrlq_handler; + ret = ubase_ctrlq_register_crq_event(udev->comdev.adev, &ubase_cb); + if (ret) { + __xa_erase(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + dev_err(udev->dev, "ubase register npu crq event failed, ret is %d.\n", ret); + goto err_release_udma_cb; + } + mutex_unlock(&udev->npu_nb_mutex); + + return 0; + +err_release_udma_cb: + mutex_unlock(&udev->npu_nb_mutex); + kfree(udma_cb); + return ret; +} + +int udma_unregister_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ctrlq_event_nb *nb; + + ubase_ctrlq_unregister_crq_event(udev->comdev.adev, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + + mutex_lock(&udev->npu_nb_mutex); + nb = xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + if (!nb) { + dev_warn(udev->dev, "query npu info cb not exist.\n"); + goto err_find_npu_nb; + } + + __xa_erase(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + kfree(nb); + nb = NULL; + +err_find_npu_nb: + mutex_unlock(&udev->npu_nb_mutex); + return 0; +} + static int udma_ctrlq_get_trans_type(struct udma_dev *dev, enum ubcore_transport_mode trans_mode, enum udma_ctrlq_trans_type *tp_type) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 5eb470a9e3d7..ba43f3590417 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -11,6 +11,7 @@ #define UDMA_PID_MASK 24 #define UDMA_DEFAULT_PID 1 #define UDMA_UE_NUM 64 +#define UDMA_MAX_UE_IDX 256 #define UDMA_MAX_TPID_NUM 5 #define UDMA_TPN_CNT_MASK 0x1F @@ -154,6 +155,16 @@ struct udma_notify_flush_done { uint32_t tpn; }; +struct udma_dev_resource_ratio { + struct ubase_bus_eid eid; + uint32_t index; +}; + +int udma_register_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +int udma_unregister_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn); int udma_ctrlq_remove_single_tp(struct udma_dev *udev, uint32_t tpn, int status); int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index f7a5adcd281b..3111c535b549 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -204,6 +204,7 @@ static struct ubcore_ops g_dev_ops = { .get_tp_list = udma_get_tp_list, .active_tp = udma_active_tp, .deactive_tp = udma_deactive_tp, + .user_ctl = udma_user_ctl, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, @@ -254,6 +255,7 @@ void udma_destroy_tables(struct udma_dev *udma_dev) xa_destroy(&udma_dev->crq_nb_table); udma_destroy_tp_ue_idx_table(udma_dev); + udma_destroy_npu_cb_table(udma_dev); if (!xa_empty(&udma_dev->ksva_table)) dev_err(udma_dev->dev, "ksva table is not empty.\n"); -- Gitee From 3c5a8c562ce64489c0dab5b75271aa6c6e3963bb Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 11:10:44 +0800 Subject: [PATCH 125/243] ub: udma: Support dump ae aux info. commit 57fd1bc217e5c6026902e8c0f0a3b46946a09825 openEuler This patch adds the ability to dump ae aux info. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 113 +++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index ac2e573fa8db..8147a784dd2f 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -18,6 +18,117 @@ #include #include "udma_def.h" +const char *udma_ae_aux_info_type_str[] = { + "TP_RRP_FLUSH_TIMER_PKT_CNT", + "TPP_DFX5", + "TWP_AE_DFX", + "TP_RRP_ERR_FLG_0", + "TP_RRP_ERR_FLG_1", + "TP_RWP_INNER_ALM", + "TP_RCP_INNER_ALM", + "LQC_TA_TQEP_WQE_ERR", + "LQC_TA_CQM_CQE_INNER_ALARM", +}; + +static void dump_fill_aux_info(struct udma_dev *dev, struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info, + enum udma_ae_aux_info_type *type, uint32_t aux_info_num) +{ + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->ae_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", udma_ae_aux_info_type_str[type[i]], + info->ae_aux_info[type[i]]); +} + +static void dump_ae_tp_flush_done_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + TP_RRP_FLUSH_TIMER_PKT_CNT, + TPP_DFX5, + }; + + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_tp_err_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + TWP_AE_DFX_FOR_AE, + TP_RRP_ERR_FLG_0_FOR_AE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_jetty_err_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + TP_RRP_ERR_FLG_0_FOR_AE, + TP_RRP_ERR_FLG_1, + TP_RWP_INNER_ALM_FOR_AE, + TP_RCP_INNER_ALM_FOR_AE, + LQC_TA_TQEP_WQE_ERR, + LQC_TA_CQM_CQE_INNER_ALARM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_jfc_err_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + LQC_TA_CQM_CQE_INNER_ALARM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + switch (info->event_type) { + case UBASE_EVENT_TYPE_TP_FLUSH_DONE: + dump_ae_tp_flush_done_aux_info(dev, aux_info_out, info); + break; + case UBASE_EVENT_TYPE_TP_LEVEL_ERROR: + dump_ae_tp_err_aux_info(dev, aux_info_out, info); + break; + case UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR: + if (info->sub_type == UBASE_SUBEVENT_TYPE_JFS_CHECK_ERROR) + dump_ae_jetty_err_aux_info(dev, aux_info_out, info); + else + dump_ae_jfc_err_aux_info(dev, aux_info_out, info); + break; + default: + break; + } +} + static int send_cmd_query_cqe_aux_info(struct udma_dev *udma_dev, struct udma_cmd_query_cqe_aux_info *info) { @@ -362,6 +473,8 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc return ret; } + dump_ae_aux_info(udma_dev, &aux_info_out, &info); + ret = copy_out_ae_data_to_user(udma_dev, out, &aux_info_out, uctx, &user_aux_info_out); if (ret) { dev_err(udma_dev->dev, -- Gitee From 0406128deb212d6b150e854fa322837b7d63303b Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 11:40:04 +0800 Subject: [PATCH 126/243] ub: udma: Support dump ce aux info. commit ea37ae296d7c96d306197138e09a870bc3008c16 openEuler This patch adds the ability to dump ce aux info. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 360 ++++++++++++++++++++++++++++- 1 file changed, 356 insertions(+), 4 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 8147a784dd2f..49cb1ebb0895 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -18,6 +18,51 @@ #include #include "udma_def.h" +const char *udma_cqe_aux_info_type_str[] = { + "TPP2TQEM_WR_CNT", + "DEVICE_RAS_STATUS_2", + "RXDMA_WR_PAYL_AXI_ERR", + "RXDMA_HEAD_SPLIT_ERR_FLAG0", + "RXDMA_HEAD_SPLIT_ERR_FLAG1", + "RXDMA_HEAD_SPLIT_ERR_FLAG2", + "RXDMA_HEAD_SPLIT_ERR_FLAG3", + "TP_RCP_INNER_ALM", + "TWP_AE_DFX", + "PA_OUT_PKT_ERR_CNT", + "TP_DAM_AXI_ALARM", + "TP_DAM_VFT_BT_ALARM", + "TP_EUM_AXI_ALARM", + "TP_EUM_VFT_BT_ALARM", + "TP_TPMM_AXI_ALARM", + "TP_TPMM_VFT_BT_ALARM", + "TP_TPGCM_AXI_ALARM", + "TP_TPGCM_VFT_BT_ALARM", + "TWP_ALM", + "TP_RWP_INNER_ALM", + "TWP_DFX21", + "LQC_TA_RNR_TANACK_CNT", + "FVT", + "RQMT0", + "RQMT1", + "RQMT2", + "RQMT3", + "RQMT4", + "RQMT5", + "RQMT6", + "RQMT7", + "RQMT8", + "RQMT9", + "RQMT10", + "RQMT11", + "RQMT12", + "RQMT13", + "RQMT14", + "RQMT15", + "PROC_ERROR_ALM", + "LQC_TA_TIMEOUT_TAACK_CNT", + "TP_RRP_ERR_FLG_0", +}; + const char *udma_ae_aux_info_type_str[] = { "TP_RRP_FLUSH_TIMER_PKT_CNT", "TPP_DFX5", @@ -30,6 +75,305 @@ const char *udma_ae_aux_info_type_str[] = { "LQC_TA_CQM_CQE_INNER_ALARM", }; +static void dump_cqe_client_loc_len_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TPP2TQEM_WR_CNT, + DEVICE_RAS_STATUS_2, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_client_loc_access_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TWP_AE_DFX_FOR_CQE, + PA_OUT_PKT_ERR_CNT, + TP_DAM_AXI_ALARM, + TP_DAM_VFT_BT_ALARM, + TP_EUM_AXI_ALARM, + TP_EUM_VFT_BT_ALARM, + TP_TPMM_AXI_ALARM, + TP_TPMM_VFT_BT_ALARM, + TP_TPGCM_AXI_ALARM, + TP_TPGCM_VFT_BT_ALARM, + DEVICE_RAS_STATUS_2, + TWP_ALM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_client_rem_resp_len_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TP_RWP_INNER_ALM_FOR_CQE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void +dump_cqe_client_rem_access_abort_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TP_RRP_ERR_FLG_0_FOR_CQE, + TPP2TQEM_WR_CNT, + TWP_DFX21 + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_client_ack_timeout_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + LQC_TA_TIMEOUT_TAACK_CNT, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void +dump_cqe_client_rnr_retry_cnt_exc_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + LQC_TA_RNR_TANACK_CNT, + FVT, + RQMT0, + RQMT1, + RQMT2, + RQMT3, + RQMT4, + RQMT5, + RQMT6, + RQMT7, + RQMT8, + RQMT9, + RQMT10, + RQMT11, + RQMT12, + RQMT13, + RQMT14, + RQMT15, + PROC_ERROR_ALM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_server_loc_access_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TP_RWP_INNER_ALM_FOR_CQE, + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TP_RRP_ERR_FLG_0_FOR_CQE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_server_loc_len_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TP_RWP_INNER_ALM_FOR_CQE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_all_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= MAX_CQE_AUX_INFO_TYPE_NUM) { + for (i = 0; i < MAX_CQE_AUX_INFO_TYPE_NUM; i++) { + aux_info_out->aux_info_type[i] = i; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[i]; + } + aux_info_out->aux_info_num = MAX_CQE_AUX_INFO_TYPE_NUM; + } + + for (i = 0; i < MAX_CQE_AUX_INFO_TYPE_NUM; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[i], info->cqe_aux_info[i]); +} + +static void (*udma_cqe_aux_info_dump[14][2])(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) = { + {NULL, NULL}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, + {dump_cqe_server_loc_len_err_aux_info, + dump_cqe_client_loc_len_err_aux_info}, + {NULL, NULL}, + {dump_cqe_server_loc_access_err_aux_info, + dump_cqe_client_loc_access_err_aux_info}, + {dump_cqe_all_aux_info, + dump_cqe_client_rem_resp_len_err_aux_info}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, + {NULL, NULL}, + {dump_cqe_all_aux_info, + dump_cqe_client_rem_access_abort_err_aux_info}, + {dump_cqe_all_aux_info, + dump_cqe_client_ack_timeout_err_aux_info}, + {dump_cqe_all_aux_info, + dump_cqe_client_rnr_retry_cnt_exc_err_aux_info}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, + {NULL, NULL}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, +}; + static void dump_fill_aux_info(struct udma_dev *dev, struct udma_ae_aux_info_out *aux_info_out, struct udma_cmd_query_ae_aux_info *info, enum udma_ae_aux_info_type *type, uint32_t aux_info_num) @@ -246,7 +590,7 @@ static int copy_out_cqe_data_to_user(struct udma_dev *udma_dev, } int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, - struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { struct udma_cqe_aux_info_out user_aux_info_out = {}; struct udma_cqe_aux_info_out aux_info_out = {}; @@ -263,6 +607,15 @@ int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *u memcpy(&cqe_info_in, (void *)(uintptr_t)in->addr, sizeof(struct udma_cqe_info_in)); + info.status = cqe_info_in.status; + info.is_client = !(cqe_info_in.s_r & 1); + if (cqe_info_in.status >= ARRAY_SIZE(udma_cqe_aux_info_dump) || + udma_cqe_aux_info_dump[info.status][info.is_client] == NULL) { + dev_err(udev->dev, "status %u is invalid or does not need to be queried.\n", + cqe_info_in.status); + return -EINVAL; + } + ret = copy_out_cqe_data_from_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); if (ret) { dev_err(udev->dev, @@ -270,9 +623,6 @@ int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *u return ret; } - info.status = cqe_info_in.status; - info.is_client = !(cqe_info_in.s_r & 1); - ret = send_cmd_query_cqe_aux_info(udev, &info); if (ret) { dev_err(udev->dev, @@ -282,6 +632,8 @@ int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *u return ret; } + udma_cqe_aux_info_dump[info.status][info.is_client](udev, &aux_info_out, &info); + ret = copy_out_cqe_data_to_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); if (ret) { dev_err(udev->dev, -- Gitee From 3e604417cbbd90ae0fc714ea10a2e9c829a0af74 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 14:29:52 +0800 Subject: [PATCH 127/243] ub: udma: Support bind and unbind jetty. commit 03e3f930e3b9639951964a810b59ccfee3d4d73c openEuler This patch adds the ability to bind and unbind jetty. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 21 +++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 5 +++++ drivers/ub/urma/hw/udma/udma_main.c | 2 ++ 3 files changed, 28 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 87174be534ba..8dffd0c43721 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -1299,6 +1299,15 @@ int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr return ret; } +int udma_unbind_jetty(struct ubcore_jetty *jetty) +{ + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + + udma_jetty->sq.rc_tjetty = NULL; + + return 0; +} + struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, @@ -1332,3 +1341,15 @@ struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, return &tjetty->ubcore_tjetty; } + +int udma_bind_jetty_ex(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + + udma_jetty->sq.rc_tjetty = tjetty; + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index f9b3b8f60885..7b5975dbcf14 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -254,6 +254,7 @@ int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr struct ubcore_jfs_wr **bad_wr); int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr); +int udma_unbind_jetty(struct ubcore_jetty *jetty); void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); @@ -264,6 +265,10 @@ struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata); +int udma_bind_jetty_ex(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, struct ubcore_jfc *send_jfc, struct ubcore_jfc *recv_jfc); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 3111c535b549..1a143f8ca3c3 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -199,6 +199,8 @@ static struct ubcore_ops g_dev_ops = { .destroy_jetty = udma_destroy_jetty, .import_jetty_ex = udma_import_jetty_ex, .unimport_jetty = udma_unimport_jetty, + .bind_jetty_ex = udma_bind_jetty_ex, + .unbind_jetty = udma_unbind_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, -- Gitee From 78f8f4a2a12ddc73502dbee1ea2cc123bd36a493 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 14:58:52 +0800 Subject: [PATCH 128/243] ub: udma: Support destroy jfs and jetty batch. commit 49aa505c529d656712b64d471c59503a3897e7ac openEuler This patch adds the ability to destroy jfs and jetty batch. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 307 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 4 + drivers/ub/urma/hw/udma/udma_jfs.c | 42 ++++ drivers/ub/urma/hw/udma/udma_jfs.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 356 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 8dffd0c43721..a3e29776ad4e 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -963,6 +963,309 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } +static int udma_batch_jetty_get_ack(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, bool *jetty_flag, + int *bad_jetty_index) +{ + struct udma_jetty_ctx ctx = {}; + struct udma_jetty_queue *sq; + uint16_t rcv_send_diff = 0; + uint32_t i; + int ret; + + for (i = 0; i < jetty_cnt; i++) { + sq = sq_list[i]; + if (sq->state != UBCORE_JETTY_STATE_READY && + sq->state != UBCORE_JETTY_STATE_SUSPENDED) + continue; + + if (jetty_flag[i]) + continue; + + ret = udma_query_jetty_ctx(dev, &ctx, sq->id); + if (ret) { + dev_err(dev->dev, + "query jetty ctx failed, id = %u, ret = %d.\n", + sq->id, ret); + *bad_jetty_index = 0; + return ret; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.PI == ctx.CI && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_READY) { + jetty_flag[i] = true; + continue; + } + + if (rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_ERROR) { + jetty_flag[i] = true; + continue; + } + + *bad_jetty_index = 0; + break; + } + + return (i == jetty_cnt) ? 0 : -EAGAIN; +} + +static uint32_t get_max_jetty_ta_timeout(struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt) +{ + uint32_t max_timeout = 0; + uint32_t i; + + for (i = 0; i < jetty_cnt; i++) { + if (sq_list[i]->ta_timeout > max_timeout) + max_timeout = sq_list[i]->ta_timeout; + } + + return max_timeout; +} + +static bool udma_batch_query_jetty_fd(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + uint32_t ta_timeout = get_max_jetty_ta_timeout(sq_list, jetty_cnt); + struct udma_jetty_ctx ctx = {}; + struct udma_jetty_queue *sq; + uint16_t rcv_send_diff = 0; + uint32_t sum_times = 0; + uint32_t flush_cnt = 0; + bool all_query_done; + uint32_t times = 0; + bool *jetty_flag; + uint32_t i; + + jetty_flag = kcalloc(jetty_cnt, sizeof(bool), GFP_KERNEL); + if (!jetty_flag) { + *bad_jetty_index = 0; + return false; + } + + while (true) { + for (i = 0; i < jetty_cnt; i++) { + if (jetty_flag[i]) + continue; + + sq = sq_list[i]; + if (udma_query_jetty_ctx(dev, &ctx, sq->id)) { + kfree(jetty_flag); + *bad_jetty_index = 0; + return false; + } + + if (!ctx.flush_cqe_done) + continue; + + flush_cnt++; + jetty_flag[i] = true; + } + + if (flush_cnt == jetty_cnt) { + kfree(jetty_flag); + return true; + } + + if (udma_wait_timeout(&sum_times, times, ta_timeout)) + break; + + times++; + } + + all_query_done = true; + + for (i = 0; i < jetty_cnt; i++) { + if (jetty_flag[i]) + continue; + + sq = sq_list[i]; + if (udma_query_jetty_ctx(dev, &ctx, sq->id)) { + kfree(jetty_flag); + *bad_jetty_index = 0; + return false; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.flush_cqe_done || (ctx.flush_ssn_vld && + rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF)) + continue; + + *bad_jetty_index = 0; + all_query_done = false; + udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, + sizeof(ctx) / sizeof(uint32_t), (uint32_t *)&ctx); + break; + } + + kfree(jetty_flag); + + return all_query_done; +} + +static int batch_modify_jetty_to_error(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + struct udma_jetty_queue *sq; + uint32_t i; + int ret; + + for (i = 0; i < jetty_cnt; i++) { + sq = sq_list[i]; + if (sq->state == UBCORE_JETTY_STATE_ERROR || + sq->state == UBCORE_JETTY_STATE_RESET) + continue; + + ret = udma_set_jetty_state(dev, sq->id, JETTY_ERROR); + if (ret) { + dev_err(dev->dev, "modify jetty to error failed, id: %u.\n", + sq->id); + *bad_jetty_index = 0; + return ret; + } + + sq->state = UBCORE_JETTY_STATE_ERROR; + } + + return 0; +} + +static int udma_batch_modify_jetty_precondition(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + uint32_t ta_timeout = get_max_jetty_ta_timeout(sq_list, jetty_cnt); + uint32_t sum_times = 0; + uint32_t times = 0; + bool *jetty_flag; + int ret; + + jetty_flag = kcalloc(jetty_cnt, sizeof(bool), GFP_KERNEL); + if (!jetty_flag) { + *bad_jetty_index = 0; + return -ENOMEM; + } + + while (true) { + ret = udma_batch_jetty_get_ack(dev, sq_list, jetty_cnt, + jetty_flag, bad_jetty_index); + if (ret != -EAGAIN) { + kfree(jetty_flag); + return ret; + } + + if (udma_wait_timeout(&sum_times, times, ta_timeout)) { + dev_warn(dev->dev, + "timeout after %u ms, not all jetty get ack.\n", + sum_times); + break; + } + times++; + } + + kfree(jetty_flag); + + return 0; +} + +static bool udma_batch_destroy_jetty_precondition(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + if (!(dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE) && + udma_batch_modify_jetty_precondition(dev, sq_list, jetty_cnt, bad_jetty_index)) + return false; + + if (batch_modify_jetty_to_error(dev, sq_list, jetty_cnt, bad_jetty_index)) { + dev_err(dev->dev, "batch md jetty err failed.\n"); + return false; + } + + if (!udma_batch_query_jetty_fd(dev, sq_list, jetty_cnt, bad_jetty_index)) + return false; + + udelay(UDMA_DESTROY_JETTY_DELAY_TIME); + + return true; +} + +int udma_batch_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + uint32_t i; + int ret; + + if (!udma_batch_destroy_jetty_precondition(dev, sq_list, jetty_cnt, bad_jetty_index)) + return -EFAULT; + + for (i = 0; i < jetty_cnt; i++) { + if (sq_list[i]->state != UBCORE_JETTY_STATE_RESET) { + ret = udma_destroy_hw_jetty_ctx(dev, sq_list[i]->id); + if (ret) { + dev_err(dev->dev, + "jetty destroyed failed, id: %u.\n", + sq_list[i]->id); + *bad_jetty_index = 0; + return ret; + } + + sq_list[i]->state = UBCORE_JETTY_STATE_RESET; + } + } + + return 0; +} + +int udma_destroy_jetty_batch(struct ubcore_jetty **jetty, int jetty_cnt, int *bad_jetty_index) +{ + struct udma_jetty_queue **sq_list; + struct udma_dev *udma_dev; + uint32_t i; + int ret; + + if (!jetty) { + pr_err("jetty array is null.\n"); + return -EINVAL; + } + + if (!jetty_cnt) { + pr_err("jetty cnt is 0.\n"); + return -EINVAL; + } + + udma_dev = to_udma_dev(jetty[0]->ub_dev); + + sq_list = kcalloc(1, sizeof(*sq_list) * jetty_cnt, GFP_KERNEL); + if (!sq_list) { + *bad_jetty_index = 0; + return -ENOMEM; + } + + for (i = 0; i < jetty_cnt; i++) + sq_list[i] = &(to_udma_jetty(jetty[i])->sq); + + ret = udma_batch_modify_and_destroy_jetty(udma_dev, sq_list, jetty_cnt, bad_jetty_index); + + kfree(sq_list); + + if (ret) { + dev_err(udma_dev->dev, + "udma batch modify error and destroy jetty failed.\n"); + return ret; + } + + for (i = 0; i < jetty_cnt; i++) + udma_free_jetty(jetty[i]); + + return 0; +} + static int udma_check_jetty_grp_info(struct ubcore_tjetty_cfg *cfg, struct udma_dev *dev) { if (cfg->type == UBCORE_JETTY_GROUP) { @@ -1353,3 +1656,7 @@ int udma_bind_jetty_ex(struct ubcore_jetty *jetty, return 0; } + +module_param(well_known_jetty_pgsz_check, bool, 0444); +MODULE_PARM_DESC(well_known_jetty_pgsz_check, + "Whether check the system page size. default: true(true:check; false: not check)"); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 7b5975dbcf14..5558e4ca68e1 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -241,6 +241,7 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +int udma_destroy_jetty_batch(struct ubcore_jetty **jetty_arr, int jetty_num, int *bad_jetty_index); int udma_unimport_jetty(struct ubcore_tjetty *tjetty); int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata); @@ -272,5 +273,8 @@ int udma_bind_jetty_ex(struct ubcore_jetty *jetty, void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, struct ubcore_jfc *send_jfc, struct ubcore_jfc *recv_jfc); +int udma_batch_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index a7b9576ea87e..c1a4999128cb 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -412,6 +412,48 @@ int udma_destroy_jfs(struct ubcore_jfs *jfs) return 0; } +int udma_destroy_jfs_batch(struct ubcore_jfs **jfs, int jfs_cnt, int *bad_jfs_index) +{ + struct udma_jetty_queue **sq_list; + struct udma_dev *udma_dev; + uint32_t i; + int ret; + + if (!jfs) { + pr_err("jfs array is null.\n"); + return -EINVAL; + } + + if (!jfs_cnt) { + pr_err("jfs cnt is 0.\n"); + return -EINVAL; + } + + udma_dev = to_udma_dev(jfs[0]->ub_dev); + + sq_list = kcalloc(jfs_cnt, sizeof(*sq_list), GFP_KERNEL); + if (!sq_list) + return -ENOMEM; + + for (i = 0; i < jfs_cnt; i++) + sq_list[i] = &(to_udma_jfs(jfs[i])->sq); + + ret = udma_batch_modify_and_destroy_jetty(udma_dev, sq_list, jfs_cnt, bad_jfs_index); + + kfree(sq_list); + + if (ret) { + dev_err(udma_dev->dev, + "udma batch modify error and destroy jfs failed.\n"); + return ret; + } + + for (i = 0; i < jfs_cnt; i++) + udma_free_jfs(jfs[i]); + + return 0; +} + static int udma_modify_jfs_state(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs, struct ubcore_jfs_attr *attr) { diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 65d8e2ac52f2..887c8e3b0127 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -134,6 +134,7 @@ struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jfs(struct ubcore_jfs *jfs); +int udma_destroy_jfs_batch(struct ubcore_jfs **jfs_arr, int jfs_num, int *bad_jfs_index); int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct udma_create_jetty_ucmd *ucmd); int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 1a143f8ca3c3..b3d7c065ddda 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -186,6 +186,7 @@ static struct ubcore_ops g_dev_ops = { .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, + .destroy_jfs_batch = udma_destroy_jfs_batch, .create_jfr = udma_create_jfr, .modify_jfr = udma_modify_jfr, .query_jfr = udma_query_jfr, @@ -197,6 +198,7 @@ static struct ubcore_ops g_dev_ops = { .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, + .destroy_jetty_batch = udma_destroy_jetty_batch, .import_jetty_ex = udma_import_jetty_ex, .unimport_jetty = udma_unimport_jetty, .bind_jetty_ex = udma_bind_jetty_ex, -- Gitee From 470acf7b3574d74b6218fc51fb153f335e6ede25 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 15:42:20 +0800 Subject: [PATCH 129/243] ub: udma: Support flush jfs and jetty. commit c858b41548fec87c2c817bba979b80b9c2259074 openEuler This patch adds the ability to flush jfs and jetty. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 25 ++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 1 + drivers/ub/urma/hw/udma/udma_jfs.c | 129 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 5 ++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 162 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index a3e29776ad4e..2bc86bdb2421 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -1568,6 +1568,31 @@ int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) return ret; } +int udma_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + struct udma_jetty_queue *sq = &udma_jetty->sq; + int n_flushed; + + if (!sq->flush_flag) + return 0; + + if (!sq->lock_free) + spin_lock(&sq->lock); + + for (n_flushed = 0; n_flushed < cr_cnt; n_flushed++) { + if (sq->ci == sq->pi) + break; + udma_flush_sq(udma_dev, sq, cr + n_flushed); + } + + if (!sq->lock_free) + spin_unlock(&sq->lock); + + return n_flushed; +} + int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr) { diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 5558e4ca68e1..64ec81bdc2b8 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -249,6 +249,7 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); +int udma_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr); int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, enum jetty_state state); int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index c1a4999128cb..d96f1caf016a 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -531,6 +531,94 @@ int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, return 0; } +static void fill_imm_data_or_token_for_cr(struct udma_dev *udma_dev, + struct udma_sqe_ctl *sqe_ctl, + struct ubcore_cr *cr, + uint32_t opcode) +{ + switch (opcode) { + case UDMA_OPC_SEND: + case UDMA_OPC_WRITE: + case UDMA_OPC_READ: + case UDMA_OPC_CAS: + case UDMA_OPC_FAA: + break; + case UDMA_OPC_SEND_WITH_IMM: + memcpy(&cr->imm_data, (void *)sqe_ctl + SQE_SEND_IMM_FIELD, + sizeof(uint64_t)); + break; + case UDMA_OPC_SEND_WITH_INVALID: + cr->invalid_token.token_id = sqe_ctl->rmt_addr_l_or_token_id; + cr->invalid_token.token_value.token = sqe_ctl->rmt_addr_h_or_token_value; + break; + case UDMA_OPC_WRITE_WITH_IMM: + memcpy(&cr->imm_data, (void *)sqe_ctl + SQE_WRITE_IMM_FIELD, + sizeof(uint64_t)); + break; + default: + dev_err(udma_dev->dev, "Flush invalid opcode :%u.\n", opcode); + break; + } +} + +static void fill_cr_by_sqe_ctl(struct udma_dev *udma_dev, + struct udma_sqe_ctl *sqe_ctl, + struct ubcore_cr *cr) +{ + uint32_t opcode = sqe_ctl->opcode; + struct udma_normal_sge *sge; + uint32_t src_sge_num = 0; + uint64_t total_len = 0; + uint32_t ctrl_len; + uint32_t i; + + fill_imm_data_or_token_for_cr(udma_dev, sqe_ctl, cr, opcode); + + cr->tpn = sqe_ctl->tpn; + cr->remote_id.id = sqe_ctl->rmt_obj_id; + memcpy(cr->remote_id.eid.raw, sqe_ctl->rmt_eid, UBCORE_EID_SIZE); + + if (sqe_ctl->inline_en) { + cr->completion_len = sqe_ctl->inline_msg_len; + return; + } + + src_sge_num = sqe_ctl->sge_num; + ctrl_len = get_ctl_len(opcode); + sge = (struct udma_normal_sge *)((void *)sqe_ctl + ctrl_len); + + for (i = 0; i < src_sge_num; i++) { + total_len += sge->length; + sge++; + } + + if (total_len > UINT32_MAX) { + cr->completion_len = UINT32_MAX; + dev_warn(udma_dev->dev, "total len %llu is overflow.\n", total_len); + } else { + cr->completion_len = total_len; + } +} + +static void udma_copy_from_sq(struct udma_jetty_queue *sq, uint32_t wqebb_cnt, + struct udma_jfs_wqebb *tmp_sq) +{ + uint32_t field_h; + uint32_t field_l; + uint32_t offset; + uint32_t remain; + + remain = sq->buf.entry_cnt - (sq->ci & (sq->buf.entry_cnt - 1)); + offset = (sq->ci & (sq->buf.entry_cnt - 1)) * UDMA_JFS_WQEBB_SIZE; + field_h = remain > wqebb_cnt ? wqebb_cnt : remain; + field_l = wqebb_cnt > field_h ? wqebb_cnt - field_h : 0; + + memcpy(tmp_sq, sq->buf.kva + offset, field_h * sizeof(*tmp_sq)); + + if (field_l) + memcpy(tmp_sq + field_h, sq->buf.kva, field_l * sizeof(*tmp_sq)); +} + static uint32_t get_wqebb_num(struct udma_sqe_ctl *sqe_ctl) { uint32_t opcode = sqe_ctl->opcode; @@ -558,6 +646,47 @@ static uint32_t get_wqebb_num(struct udma_sqe_ctl *sqe_ctl) return sq_cal_wqebb_num(sqe_ctl_len, sqe_ctl->sge_num); } +void udma_flush_sq(struct udma_dev *udma_dev, + struct udma_jetty_queue *sq, struct ubcore_cr *cr) +{ + struct udma_jfs_wqebb tmp_sq[MAX_WQEBB_NUM] = {}; + + udma_copy_from_sq(sq, MAX_WQEBB_NUM, tmp_sq); + fill_cr_by_sqe_ctl(udma_dev, (struct udma_sqe_ctl *)tmp_sq, cr); + cr->status = UBCORE_CR_WR_UNHANDLED; + cr->user_ctx = sq->wrid[sq->ci & (sq->buf.entry_cnt - 1)]; + /* Fill in UINT8_MAX for send direction */ + cr->opcode = UINT8_MAX; + cr->local_id = sq->id; + + sq->ci += get_wqebb_num((struct udma_sqe_ctl *)tmp_sq); +} + +int udma_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + struct udma_jetty_queue *sq = &udma_jfs->sq; + int n_flushed; + + if (!sq->flush_flag) + return 0; + + if (!jfs->jfs_cfg.flag.bs.lock_free) + spin_lock(&sq->lock); + + for (n_flushed = 0; n_flushed < cr_cnt; n_flushed++) { + if (sq->ci == sq->pi) + break; + udma_flush_sq(udma_dev, sq, cr + n_flushed); + } + + if (!jfs->jfs_cfg.flag.bs.lock_free) + spin_unlock(&sq->lock); + + return n_flushed; +} + static uint8_t udma_get_jfs_opcode(enum ubcore_opcode opcode) { switch (opcode) { diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 887c8e3b0127..b030150af88f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -30,6 +30,8 @@ #define SQE_WRITE_NOTIFY_CTL_LEN 80 #define SQE_WRITE_IMM_INLINE_SIZE 192 +#define UINT8_MAX 0xff + enum udma_jfs_type { UDMA_NORMAL_JFS_TYPE, UDMA_KERNEL_STARS_JFS_TYPE, @@ -142,9 +144,12 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata); +int udma_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); +void udma_flush_sq(struct udma_dev *udma_dev, + struct udma_jetty_queue *sq, struct ubcore_cr *cr); #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b3d7c065ddda..8845ac4cc661 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -185,6 +185,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfs = udma_create_jfs, .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, + .flush_jfs = udma_flush_jfs, .destroy_jfs = udma_destroy_jfs, .destroy_jfs_batch = udma_destroy_jfs_batch, .create_jfr = udma_create_jfr, @@ -197,6 +198,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty = udma_create_jetty, .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, + .flush_jetty = udma_flush_jetty, .destroy_jetty = udma_destroy_jetty, .destroy_jetty_batch = udma_destroy_jetty_batch, .import_jetty_ex = udma_import_jetty_ex, -- Gitee From c7308c160ea125faa5e05a3e68312723bf99e819 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 19:15:30 +0800 Subject: [PATCH 130/243] ub: udma: Support device status inquiry. commit a857af1498bbb8b1602308d2880b49f6ef8b64ce openEuler This patch adds the ability to query device status. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_cmd.c | 21 ++++++++++++++++++++- drivers/ub/urma/hw/udma/udma_main.c | 26 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_cmd.c b/drivers/ub/urma/hw/udma/udma_cmd.c index 6e4c66af0537..0e3c18c81e25 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.c +++ b/drivers/ub/urma/hw/udma/udma_cmd.c @@ -6,7 +6,11 @@ #include #include #include +#include "udma_eid.h" #include "udma_cmd.h" +#include "udma_jfc.h" +#include "udma_jfr.h" +#include "udma_jetty.h" bool debug_switch = true; @@ -71,6 +75,19 @@ void udma_free_cmd_mailbox(struct udma_dev *dev, kfree(mailbox); } +static void udma_set_mb_flag_or_fd(uint8_t op, struct udma_mbx_op_match *match, + void *buf) +{ + struct udma_jetty_ctx *jfs_ctx; + + if (op == UDMA_CMD_QUERY_JFS_CONTEXT) { + jfs_ctx = (struct udma_jetty_ctx *)buf; + jfs_ctx->flush_cqe_done = 1; + jfs_ctx->state = 1; + jfs_ctx->flush_ssn_vld = 1; + } +} + static bool udma_op_ignore_eagain(uint8_t op, void *buf) { struct udma_mbx_op_match matches[] = { @@ -100,8 +117,10 @@ static bool udma_op_ignore_eagain(uint8_t op, void *buf) uint32_t i; for (i = 0; i < ARRAY_SIZE(matches); i++) { - if (op == matches[i].op) + if (op == matches[i].op) { + udma_set_mb_flag_or_fd(op, &matches[i], buf); return matches[i].ignore_ret; + } } return false; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 8845ac4cc661..5123cc0c071b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -162,6 +162,31 @@ static int udma_query_device_attr(struct ubcore_device *dev, return 0; } +static int udma_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val) +{ + struct ubcore_stats_com_val *com_val = (struct ubcore_stats_com_val *)val->addr; + struct udma_dev *udma_dev = to_udma_dev(dev); + struct ubase_ub_dl_stats dl_stats = {}; + int ret; + + ret = ubase_get_ub_port_stats(udma_dev->comdev.adev, + udma_dev->port_logic_id, &dl_stats); + if (ret) { + dev_err(udma_dev->dev, "failed to query port stats, ret = %d.\n", ret); + return ret; + } + + com_val->tx_pkt = dl_stats.dl_tx_busi_pkt_num; + com_val->rx_pkt = dl_stats.dl_rx_busi_pkt_num; + com_val->rx_pkt_err = 0; + com_val->tx_pkt_err = 0; + com_val->tx_bytes = 0; + com_val->rx_bytes = 0; + + return ret; +} + static struct ubcore_ops g_dev_ops = { .owner = THIS_MODULE, .abi_version = 0, @@ -216,6 +241,7 @@ static struct ubcore_ops g_dev_ops = { .post_jetty_send_wr = udma_post_jetty_send_wr, .post_jetty_recv_wr = udma_post_jetty_recv_wr, .poll_jfc = udma_poll_jfc, + .query_stats = udma_query_stats, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From d6ebd53d0dce208c0f463fc04af41bbf5eba1284 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 19:45:52 +0800 Subject: [PATCH 131/243] ub: udma: Support entity index inquiry. commit 1bbf139669f04e7b523b30b6258d851276820d3d openEuler This patch adds the ability to query entity index. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 29 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_common.h | 8 ++++++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 38 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 017216169ea3..3ec53595bcf8 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -650,6 +650,35 @@ void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_ dma_free_iova(slot); } +int udma_query_ue_idx(struct ubcore_device *ubcore_dev, struct ubcore_devid *devid, + uint16_t *ue_idx) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + struct udma_ue_index_cmd cmd = {}; + struct ubase_cmd_buf out; + struct ubase_cmd_buf in; + int ret; + + if (!devid) { + dev_err(dev->dev, "failed to query ue idx, devid is NULL.\n"); + return -EINVAL; + } + + (void)memcpy(cmd.guid, devid->raw, sizeof(devid->raw)); + + udma_fill_buf(&in, UDMA_CMD_QUERY_UE_INDEX, true, sizeof(cmd), &cmd); + udma_fill_buf(&out, UDMA_CMD_QUERY_UE_INDEX, true, sizeof(cmd), &cmd); + + ret = ubase_cmd_send_inout(dev->comdev.adev, &in, &out); + if (ret) { + dev_err(dev->dev, "failed to query ue idx, ret = %d.\n", ret); + return ret; + } + *ue_idx = cmd.ue_idx; + + return 0; +} + void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, uint32_t len, uint32_t *ctx) { diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 300357af8895..f09fffc5d50c 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -68,6 +68,12 @@ struct udma_umem_param { bool is_kernel; }; +struct udma_ue_index_cmd { + uint16_t ue_idx; + uint8_t rsv[2]; + uint8_t guid[16]; +}; + struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); @@ -118,6 +124,8 @@ static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; } +int udma_query_ue_idx(struct ubcore_device *ub_dev, struct ubcore_devid *devid, + uint16_t *ue_idx); void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, uint32_t len, uint32_t *ctx); void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5123cc0c071b..f33943d4c2e0 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -242,6 +242,7 @@ static struct ubcore_ops g_dev_ops = { .post_jetty_recv_wr = udma_post_jetty_recv_wr, .poll_jfc = udma_poll_jfc, .query_stats = udma_query_stats, + .query_ue_idx = udma_query_ue_idx, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From fadd25bdc1f18e79176f785876bfd88433982acb Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 09:25:15 +0800 Subject: [PATCH 132/243] ub: udma: Support tp context inquiry. commit 1e63a34814fddce6d7f02744a9ab17acd8fcb84f openEuler This patch adds the ability to query tp context. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 238 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctl.c | 45 +++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 63 +++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 30 ++- drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 376 insertions(+), 2 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index f09fffc5d50c..2bfededa6203 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -9,6 +9,7 @@ #include "udma_ctx.h" #include "udma_dev.h" +#define TP_ACK_UDP_SPORT_H_OFFSET 8 #define UDMA_TPHANDLE_TPID_SHIFT 0xFFFFFF struct udma_jetty_grp { @@ -74,6 +75,243 @@ struct udma_ue_index_cmd { uint8_t guid[16]; }; +struct udma_tp_ctx { + /* Byte4 */ + uint32_t version : 1; + uint32_t tp_mode : 1; + uint32_t trt : 1; + uint32_t wqe_bb_shift : 4; + uint32_t oor_en : 1; + uint32_t tempid : 6; + uint32_t portn : 6; + uint32_t rsvd1 : 12; + /* Byte8 */ + uint32_t wqe_ba_l; + /* Byte12 */ + uint32_t wqe_ba_h : 20; + uint32_t udp_srcport_range : 4; + uint32_t cng_alg_sel : 3; + uint32_t lbi : 1; + uint32_t rsvd4 : 1; + uint32_t vlan_en : 1; + uint32_t mtu : 2; + /* Byte16 */ + uint32_t route_addr_idx : 20; + uint32_t rsvd6 : 12; + /* Byte20 */ + u32 tpn_vtpn : 24; + u32 rsvd7 : 8; + /* Byte24 to Byte28 */ + u32 rsvd8[2]; + /* Byte 32 */ + u32 seid_idx : 16; + u32 sjetty_l : 16; + /* Byte 36 */ + u32 sjetty_h : 4; + u32 tp_wqe_token_id : 20; + u32 tp_wqe_position : 1; + u32 rsv9_l : 7; + /* Byte 40 */ + u32 rsvd9_h : 6; + u32 taack_tpn : 24; + u32 rsvd10 : 2; + /* Byte 44 */ + u32 spray_en : 1; + u32 sr_en : 1; + u32 ack_freq_mode : 1; + u32 route_type : 2; + u32 vl : 4; + u32 dscp : 6; + u32 switch_mp_en : 1; + u32 at_times : 5; + u32 retry_num_init : 3; + u32 at : 5; + u32 rsvd13 : 3; + /* Byte 48 */ + u32 on_flight_size : 16; + u32 hpln : 8; + u32 fl_l : 8; + /* Byte 52 */ + u32 fl_h : 12; + u32 dtpn : 20; + /* Byte 56 */ + u32 rc_tpn : 24; + u32 rc_vl : 4; + u32 tpg_vld : 1; + u32 reorder_cap : 3; + /* Byte 60 */ + u32 reorder_q_shift : 4; + u32 reorder_q_addr_l : 28; + /* Byte 64 */ + u32 reorder_q_addr_h : 24; + u32 tpg_l : 8; + /* Byte 68 */ + u32 tpg_h : 12; + u32 jettyn : 20; + /* Byte 72 */ + u32 dyn_timeout_mode : 1; + u32 base_time : 23; + u32 rsvd15 : 8; + /* Byte 76 */ + u32 tpack_psn : 24; + u32 tpack_rspst : 3; + u32 tpack_rspinfo : 5; + /* Byte 80 */ + u32 tpack_msn : 24; + u32 ack_udp_srcport_l : 8; + /* Byte 84 */ + u32 ack_udp_srcport_h : 8; + u32 max_rcv_psn : 24; + /* Byte 88 */ + u32 scc_token : 19; + u32 poll_db_wait_do : 1; + u32 msg_rty_lp_flg : 1; + u32 retry_cnt : 3; + u32 sq_invld_flg : 1; + u32 wait_ack_timeout : 1; + u32 tx_rtt_caling : 1; + u32 cnp_tx_flag : 1; + u32 sq_db_doing : 1; + u32 tpack_doing : 1; + u32 sack_wait_do : 1; + u32 tpack_wait_do : 1; + /* Byte 92 */ + u16 post_max_idx; + u16 wqe_max_bb_idx; + /* Byte 96 */ + u16 wqe_bb_pi; + u16 wqe_bb_ci; + /* Byte 100 */ + u16 data_udp_srcport; + u16 wqe_msn; + /* Byte 104 */ + u32 cur_req_psn : 24; + u32 tx_ack_psn_err : 1; + u32 poll_db_type : 2; + u32 tx_ack_flg : 1; + u32 tx_sq_err_flg : 1; + u32 scc_retry_type : 2; + u32 flush_cqe_wait_do : 1; + /* Byte 108 */ + u32 wqe_max_psn : 24; + u32 ssc_token_l : 4; + u32 rsvd16 : 4; + /* Byte 112 */ + u32 tx_sq_timer; + /* Byte 116 */ + u32 rtt_timestamp_psn : 24; + u32 rsvd17 : 8; + /* Byte 120 */ + u32 rtt_timestamp : 24; + u32 cnp_timer_l : 8; + /* Byte 124 */ + u32 cnp_timer_h : 16; + u32 max_reorder_id : 16; + /* Byte 128 */ + u16 cur_reorder_id; + u16 wqe_max_msn; + /* Byte 132 */ + u16 post_bb_pi; + u16 post_bb_ci; + /* Byte 136 */ + u32 lr_ae_ind : 1; + u32 rx_cqe_cnt : 16; + u32 reorder_q_si : 13; + u32 rq_err_type_l : 2; + /* Byte 140 */ + u32 rq_err_type_h : 3; + u32 rsvd18 : 2; + u32 rsvd19 : 27; + /* Byte 144 */ + u32 req_seq; + /* Byte 148 */ + uint32_t req_ce_seq; + /* Byte 152 */ + u32 req_cmp_lrb_indx : 12; + u32 req_lrb_indx : 12; + u32 req_lrb_indx_vld : 1; + u32 rx_req_psn_err : 1; + u32 rx_req_last_optype : 3; + u32 rx_req_fake_flg : 1; + u32 rsvd20 : 2; + /* Byte 156 */ + uint16_t jfr_wqe_idx; + uint16_t rx_req_epsn_l; + /* Byte 160 */ + uint32_t rx_req_epsn_h : 8; + uint32_t rx_req_reduce_code : 8; + uint32_t rx_req_msn_l : 16; + /* Byte 164 */ + uint32_t rx_req_msn_h : 8; + uint32_t jfr_wqe_rnr : 1; + uint32_t jfr_wqe_rnr_timer : 5; + uint32_t rsvd21 : 2; + uint32_t jfr_wqe_cnt : 16; + /* Byte 168 */ + uint32_t max_reorder_q_idx : 13; + uint32_t rsvd22 : 3; + uint32_t reorder_q_ei : 13; + uint32_t rx_req_last_elr_flg : 1; + uint32_t rx_req_last_elr_err_type_l : 2; + /* Byte172 */ + uint32_t rx_req_last_elr_err_type_h : 3; + uint32_t rx_req_last_op : 1; + uint32_t jfrx_jetty : 1; + uint32_t jfrx_jfcn_l : 16; + uint32_t jfrx_jfcn_h : 4; + uint32_t jfrx_jfrn_l : 7; + /* Byte176 */ + u32 jfrx_jfrn_h1 : 9; + u32 jfrx_jfrn_h2 : 4; + u32 rq_timer_l : 19; + /* Byte180 */ + u32 rq_timer_h : 13; + u32 rq_at : 5; + u32 wait_cqe_timeout : 1; + u32 rsvd23 : 13; + /* Byte184 */ + u32 rx_sq_timer; + /* Byte188 */ + u32 tp_st : 3; + u32 rsvd24 : 4; + u32 ls_ae_ind : 1; + u32 retry_msg_psn : 24; + /* Byte192 */ + u32 retry_msg_fpsn : 24; + u32 rsvd25 : 8; + /* Byte196 */ + u16 retry_wqebb_idx; + u16 retry_msg_msn; + /* Byte200 */ + u32 ack_rcv_seq; + /* Byte204 */ + u32 rtt : 24; + u32 dup_sack_cnt : 8; + /* Byte208 */ + u32 sack_max_rcv_psn : 24; + u32 rsvd26 : 7; + u32 rx_ack_flg : 1; + /* Byte212 */ + u32 rx_ack_msn : 16; + u32 sack_lrb_indx : 12; + u32 rx_fake_flg : 1; + u32 rx_rtt_caling : 1; + u32 rx_ack_psn_err : 1; + u32 sack_lrb_indx_vld : 1; + /* Byte216 */ + u32 rx_ack_epsn : 24; + u32 rsvd27 : 8; + /* Byte220 */ + u32 max_retry_psn : 24; + u32 retry_reorder_id_l : 8; + /* Byte224 */ + u32 retry_reorder_id_h : 8; + u32 rsvd28 : 8; + u32 rsvd29 : 16; + /* Byte228 to Byte256 */ + u32 scc_data[8]; +}; + struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 49cb1ebb0895..764a29b9b24b 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -75,6 +75,49 @@ const char *udma_ae_aux_info_type_str[] = { "LQC_TA_CQM_CQE_INNER_ALARM", }; +static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_tp_sport_out tp_sport_out = {}; + struct udma_tp_sport_in tp_sport_in = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_cmd_mailbox *mailbox = NULL; + struct ubase_mbx_attr mbox_attr = {}; + struct udma_tp_ctx *tpc; + + if (udma_check_base_param(out->addr, out->len, sizeof(struct udma_tp_sport_out)) || + udma_check_base_param(in->addr, in->len, sizeof(struct udma_tp_sport_in))) { + dev_err(udev->dev, "parameter invalid in query tp sport, in_len = %u, out_len = %u.\n", + in->len, out->len); + return -EINVAL; + } + + if (udev->is_ue) { + dev_err(udev->dev, "ue is not supported.\n"); + return -EINVAL; + } + + memcpy(&tp_sport_in, (void *)(uintptr_t)in->addr, sizeof(struct udma_tp_sport_in)); + + mbox_attr.tag = tp_sport_in.tpn; + mbox_attr.op = UDMA_CMD_QUERY_TP_CONTEXT; + mailbox = udma_mailbox_query_ctx(udev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + tpc = (struct udma_tp_ctx *)mailbox->buf; + + tp_sport_out.ack_udp_srcport = tpc->ack_udp_srcport_h << TP_ACK_UDP_SPORT_H_OFFSET | + tpc->ack_udp_srcport_l; + tp_sport_out.data_udp_srcport = tpc->data_udp_srcport; + + memcpy((void *)(uintptr_t)out->addr, &tp_sport_out, out->len); + + udma_free_cmd_mailbox(udev, mailbox); + + return 0; +} + static void dump_cqe_client_loc_len_err_aux_info(struct udma_dev *dev, struct udma_cqe_aux_info_out *aux_info_out, struct udma_cmd_query_cqe_aux_info *info) @@ -840,6 +883,7 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, + [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, }; @@ -854,6 +898,7 @@ static udma_user_ctl_ops g_udma_user_ctl_u_ops[] = { [UDMA_USER_CTL_GET_DEV_RES_RATIO] = NULL, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = NULL, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = NULL, + [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, [UDMA_USER_CTL_QUERY_UBMEM_INFO] = NULL, diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index d28e206fb277..fd499a89e131 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -592,6 +592,69 @@ static int udma_k_ctrlq_deactive_tp(struct udma_dev *udev, union ubcore_tp_handl return (ret == -EAGAIN) ? 0 : ret; } +int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata) +{ + struct udma_ctrlq_set_tp_attr_req tp_attr_req = {}; + struct udma_dev *udev = to_udma_dev(dev); + union ubcore_tp_handle tp_handle_val; + struct ubase_ctrlq_msg msg = {}; + int ret; + + tp_handle_val.value = tp_handle; + tp_attr_req.tpid = tp_handle_val.bs.tpid; + tp_attr_req.tpn_cnt = tp_handle_val.bs.tp_cnt; + tp_attr_req.tpn_start = tp_handle_val.bs.tpn_start; + tp_attr_req.tp_attr_cnt = tp_attr_cnt; + tp_attr_req.tp_attr.tp_attr_bitmap = tp_attr_bitmap; + memcpy(&tp_attr_req.tp_attr.tp_attr_value, (void *)tp_attr, sizeof(*tp_attr)); + + udma_ctrlq_set_tp_msg(&msg, &tp_attr_req, sizeof(tp_attr_req), NULL, 0); + msg.opcode = UDMA_CMD_CTRLQ_SET_TP_ATTR; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "set tp attr failed, tpid = %u, ret = %d.\n", + tp_attr_req.tpid, ret); + + return ret; +} + +int udma_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata) +{ + struct udma_ctrlq_get_tp_attr_resp tp_attr_resp = {}; + struct udma_ctrlq_get_tp_attr_req tp_attr_req = {}; + struct udma_dev *udev = to_udma_dev(dev); + union ubcore_tp_handle tp_handle_val; + struct ubase_ctrlq_msg msg = {}; + int ret; + + tp_handle_val.value = tp_handle; + tp_attr_req.tpid.tpid = tp_handle_val.bs.tpid; + tp_attr_req.tpid.tpn_cnt = tp_handle_val.bs.tp_cnt; + tp_attr_req.tpid.tpn_start = tp_handle_val.bs.tpn_start; + udma_ctrlq_set_tp_msg(&msg, &tp_attr_req, sizeof(tp_attr_req), &tp_attr_resp, + sizeof(tp_attr_resp)); + msg.opcode = UDMA_CMD_CTRLQ_GET_TP_ATTR; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) { + dev_err(udev->dev, "get tp attr failed, tpid = %u, ret = %d.\n", + tp_attr_req.tpid.tpid, ret); + return ret; + } + + *tp_attr_cnt = tp_attr_resp.tp_attr_cnt; + *tp_attr_bitmap = tp_attr_resp.tp_attr.tp_attr_bitmap; + memcpy((void *)tp_attr, &tp_attr_resp.tp_attr.tp_attr_value, + sizeof(tp_attr_resp.tp_attr.tp_attr_value)); + + return 0; +} + int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) { struct udma_req_msg *req_msg; diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index ba43f3590417..e83cd3e94c56 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -151,8 +151,27 @@ struct udma_ue_idx_table { uint8_t ue_idx[UDMA_UE_NUM]; }; -struct udma_notify_flush_done { - uint32_t tpn; +struct udma_ctrlq_tp_attr { + uint32_t tp_attr_bitmap; + struct ubcore_tp_attr_value tp_attr_value; +}; + +struct udma_ctrlq_get_tp_attr_req { + struct udma_ctrlq_tpid tpid; +}; + +struct udma_ctrlq_set_tp_attr_req { + uint32_t tpid : 24; + uint32_t tpn_cnt : 8; + uint32_t tpn_start : 24; + uint32_t tp_attr_cnt : 8; + struct udma_ctrlq_tp_attr tp_attr; +}; + +struct udma_ctrlq_get_tp_attr_resp { + uint32_t tpid : 24; + uint32_t tp_attr_cnt : 8; + struct udma_ctrlq_tp_attr tp_attr; }; struct udma_dev_resource_ratio { @@ -173,6 +192,13 @@ int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_c void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, bool is_need_flush); + +int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata); +int udma_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata); int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index f33943d4c2e0..7726d371476b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -233,6 +233,8 @@ static struct ubcore_ops g_dev_ops = { .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, + .set_tp_attr = udma_set_tp_attr, + .get_tp_attr = udma_get_tp_attr, .active_tp = udma_active_tp, .deactive_tp = udma_deactive_tp, .user_ctl = udma_user_ctl, -- Gitee From 86ae76154e8596b6d45db0fc9befceaba59b0ed5 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 10:11:49 +0800 Subject: [PATCH 133/243] ub: udma: Support create and destroy stars jfs. commit 18f72c331dd8e53d2863c7875de149fce6417efb openEuler This patch adds the ability to create and destroy stars jfs. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 203 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 + drivers/ub/urma/hw/udma/udma_jfs.c | 34 +++++ drivers/ub/urma/hw/udma/udma_jfs.h | 5 + 4 files changed, 245 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 764a29b9b24b..7851a262e50a 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -11,6 +11,7 @@ #include #include "udma_cmd.h" #include "udma_jetty.h" +#include "udma_segment.h" #include "udma_jfs.h" #include "udma_jfc.h" #include "udma_db.h" @@ -75,6 +76,206 @@ const char *udma_ae_aux_info_type_str[] = { "LQC_TA_CQM_CQE_INNER_ALARM", }; +static int udma_get_sq_buf_ex(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct udma_jfs_cfg_ex *cfg_ex) +{ + struct ubcore_jfs_cfg *jfs_cfg; + uint32_t wqe_bb_depth; + uint32_t sqe_bb_cnt; + uint32_t size; + + jfs_cfg = &cfg_ex->base_cfg; + + if (!jfs_cfg->flag.bs.lock_free) + spin_lock_init(&sq->lock); + sq->max_inline_size = jfs_cfg->max_inline_data; + sq->max_sge_num = jfs_cfg->max_sge; + sq->tid = dev->tid; + sq->lock_free = jfs_cfg->flag.bs.lock_free; + + sqe_bb_cnt = sq_cal_wqebb_num(SQE_WRITE_NOTIFY_CTL_LEN, jfs_cfg->max_sge); + if (sqe_bb_cnt > MAX_WQEBB_NUM) + sqe_bb_cnt = MAX_WQEBB_NUM; + sq->sqe_bb_cnt = sqe_bb_cnt; + + wqe_bb_depth = roundup_pow_of_two(sqe_bb_cnt * jfs_cfg->depth); + sq->buf.entry_size = UDMA_JFS_WQEBB_SIZE; + size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); + sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; + + if (size != cfg_ex->cstm_cfg.sq.buff_size) { + dev_err(dev->dev, "buff size is wrong, buf size = %u.\n", size); + return -EINVAL; + } + + if (cfg_ex->cstm_cfg.sq.buff == 0) { + dev_err(dev->dev, "cstm_cfg sq buff is wrong.\n"); + return -EINVAL; + } + + sq->buf.addr = (dma_addr_t)(uintptr_t)phys_to_virt((uint64_t) + (uintptr_t)cfg_ex->cstm_cfg.sq.buff); + if (sq->buf.addr == 0) { + dev_err(dev->dev, "sq buff addr is wrong.\n"); + return -EINVAL; + } + + sq->buf.kva = (void *)(uintptr_t)sq->buf.addr; + + sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); + if (!sq->wrid) { + sq->buf.kva = NULL; + sq->buf.addr = 0; + dev_err(dev->dev, + "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", + sq->id, sq->buf.entry_cnt); + return -ENOMEM; + } + + udma_alloc_kernel_db(dev, sq); + sq->kva_curr = sq->buf.kva; + + sq->trans_mode = jfs_cfg->trans_mode; + + return 0; +} + +static int udma_get_jfs_buf_ex(struct udma_dev *dev, struct udma_jfs *jfs, + struct udma_jfs_cfg_ex *cfg_ex) +{ + int ret; + + jfs->jfs_addr = (uintptr_t)&jfs->sq; + + ret = udma_get_sq_buf_ex(dev, &jfs->sq, cfg_ex); + if (ret) + dev_err(dev->dev, + "failed to get sq buf in jfs process, ret = %d.\n", ret); + + return ret; +} + +static struct ubcore_jfs *udma_create_jfs_ex(struct ubcore_device *ub_dev, + struct udma_jfs_cfg_ex *cfg_ex) +{ + struct ubcore_jfs_cfg *cfg = &cfg_ex->base_cfg; + struct udma_dev *dev = to_udma_dev(ub_dev); + struct ubase_mbx_attr attr = {}; + struct udma_jetty_ctx ctx = {}; + struct udma_jfs *jfs; + int ret; + + ret = udma_verify_jfs_param(dev, cfg, true); + if (ret) + return NULL; + + jfs = kcalloc(1, sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return NULL; + + dev_info(dev->dev, "start alloc id!\n"); + ret = udma_alloc_jetty_id(dev, &jfs->sq.id, &dev->caps.jetty); + if (ret) { + dev_err(dev->dev, "alloc JFS id failed, ret = %d.\n", ret); + goto err_alloc_jfsn; + } + jfs->ubcore_jfs.jfs_id.id = jfs->sq.id; + jfs->ubcore_jfs.jfs_cfg = *cfg; + jfs->ubcore_jfs.ub_dev = ub_dev; + jfs->ubcore_jfs.uctx = NULL; + jfs->ubcore_jfs.jfae_handler = cfg_ex->jfae_handler; + jfs->mode = UDMA_KERNEL_STARS_JFS_TYPE; + + ret = xa_err(xa_store(&dev->jetty_table.xa, jfs->sq.id, &jfs->sq, GFP_KERNEL)); + if (ret) { + dev_err(dev->dev, "store jfs sq(%u) failed, ret = %d.\n", + jfs->sq.id, ret); + goto err_store_jfs_sq; + } + + dev_info(dev->dev, "start get stars jfs buf!\n"); + ret = udma_get_jfs_buf_ex(dev, jfs, cfg_ex); + if (ret) + goto err_alloc_jfs_id; + + udma_set_query_flush_time(&jfs->sq, cfg->err_timeout); + jfs->sq.state = UBCORE_JETTY_STATE_READY; + udma_init_jfsc(dev, cfg, jfs, &ctx); + attr.tag = jfs->sq.id; + attr.op = UDMA_CMD_CREATE_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); + if (ret) { + dev_err(dev->dev, "failed to upgrade JFSC, ret = %d.\n", ret); + goto err_update_ctx; + } + + refcount_set(&jfs->ae_refcount, 1); + init_completion(&jfs->ae_comp); + + if (dfx_switch) + udma_dfx_store_jfs_id(dev, jfs); + + dev_info(dev->dev, "create stars jfs success!\n"); + + return &jfs->ubcore_jfs; + +err_update_ctx: + kfree(jfs->sq.wrid); +err_alloc_jfs_id: + xa_erase(&dev->jetty_table.xa, jfs->sq.id); +err_store_jfs_sq: + udma_adv_id_free(&dev->jetty_table.bitmap_table, jfs->sq.id, false); +err_alloc_jfsn: + kfree(jfs); + return NULL; +} + +static int udma_create_jfs_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_jfs_cfg_ex cfg_ex; + struct ubcore_jfs *jfs; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_jfs_cfg_ex)) || + udma_check_base_param(out->addr, out->len, sizeof(struct ubcore_jfs *))) { + dev_err(udev->dev, "param invalid in create jfs, in_len = %u, out_len = %u.\n", + in->len, out->len); + return -EINVAL; + } + + memcpy(&cfg_ex, (void *)(uintptr_t)in->addr, sizeof(struct udma_jfs_cfg_ex)); + + jfs = udma_create_jfs_ex(dev, &cfg_ex); + if (jfs == NULL) + return -EFAULT; + + memcpy((void *)(uintptr_t)out->addr, &jfs, sizeof(struct ubcore_jfs *)); + + return 0; +} + +static int udma_delete_jfs_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubcore_jfs *jfs; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct ubcore_jfs *))) { + dev_err(udev->dev, "parameter invalid in delete jfs, len = %u.\n", + in->len); + return -EFAULT; + } + memcpy(&jfs, (void *)(uintptr_t)in->addr, sizeof(struct ubcore_jfs *)); + if (jfs == NULL) + return -EINVAL; + + if (udma_destroy_jfs(jfs)) + return -EFAULT; + + return 0; +} + static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { @@ -881,6 +1082,8 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc } static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { + [UDMA_USER_CTL_CREATE_JFS_EX] = udma_create_jfs_ops_ex, + [UDMA_USER_CTL_DELETE_JFS_EX] = udma_delete_jfs_ops_ex, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 64ec81bdc2b8..011711dc1926 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -262,7 +262,10 @@ int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); +int udma_alloc_jetty_id(struct udma_dev *udma_dev, uint32_t *idx, + struct udma_res *jetty_res); int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq); + struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index d96f1caf016a..a1a36a0a136f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -155,6 +155,40 @@ void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, ctx->next_rcv_ssn = ctx->next_send_ssn; } +int udma_verify_jfs_param(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + bool enable_stars) +{ + if (!cfg->depth || cfg->depth > dev->caps.jfs.depth || + cfg->max_sge > dev->caps.jfs_sge || cfg->trans_mode == UBCORE_TP_RC) { + dev_err(dev->dev, + "jfs param is invalid, depth = %u, seg = %u, max_depth = %u, max_jfs_seg = %u, trans_mode = %u.\n", + cfg->depth, cfg->max_sge, dev->caps.jfs.depth, + dev->caps.jfs_sge, cfg->trans_mode); + return -EINVAL; + } + + if (enable_stars && cfg->max_inline_data != 0 && + cfg->max_inline_data > dev->caps.jfs_inline_sz) { + dev_err(dev->dev, "jfs param is invalid, inline_data:%u, max_inline_len:%u.\n", + cfg->max_inline_data, dev->caps.jfs_inline_sz); + return -EINVAL; + } + + if (enable_stars && cfg->max_rsge > dev->caps.jfs_rsge) { + dev_err(dev->dev, "jfs param is invalid, rsge:%u, max_rsge:%u.\n", + cfg->max_rsge, dev->caps.jfs_rsge); + return -EINVAL; + } + + if (cfg->priority >= UDMA_MAX_PRIORITY) { + dev_err(dev->dev, "kernel mode jfs priority is out of range, priority is %u.\n", + cfg->priority); + return -EINVAL; + } + + return 0; +} + void udma_dfx_store_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs) { struct udma_dfx_jfs *jfs; diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index b030150af88f..d3a29f2a68a0 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -151,5 +151,10 @@ int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); void udma_flush_sq(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct ubcore_cr *cr); +void udma_dfx_store_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs); +void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + struct udma_jfs *jfs, void *mb_buf); +int udma_verify_jfs_param(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + bool enable_stars); #endif /* __UDMA_JFS_H__ */ -- Gitee From 62aee3662fbe9128fa0d4810a3bce33ae767a324 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 11:31:26 +0800 Subject: [PATCH 134/243] ub: udma: Support create and destroy stars jfc. commit a388e048a2bdf74c3a699be40e12ca7bb317d707 openEuler This patch adds the ability to create and destroy stars jfc. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 205 +++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 4 + 2 files changed, 209 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 7851a262e50a..9b4c306d4887 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -276,6 +276,208 @@ static int udma_delete_jfs_ops_ex(struct ubcore_device *dev, struct ubcore_ucont return 0; } +static int udma_get_jfc_buf_ex(struct udma_dev *dev, + struct udma_jfc *jfc, + struct udma_jfc_cfg_ex *cfg_ex) +{ + uint32_t size; + int ret = 0; + + if (!jfc->lock_free) + spin_lock_init(&jfc->lock); + jfc->buf.entry_size = dev->caps.cqe_size; + jfc->tid = dev->tid; + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + + if (size != cfg_ex->cstm_cfg.cq.buff_size) { + dev_err(dev->dev, "cqe buff size is wrong, buf size = %u.\n", size); + return -EINVAL; + } + + jfc->buf.addr = (dma_addr_t)(uintptr_t)cfg_ex->cstm_cfg.cq.buff; + + if (jfc->buf.addr == 0) { + dev_err(dev->dev, "cq buff addr is wrong.\n"); + return -EINVAL; + } + + jfc->buf.kva = (void *)(uintptr_t)jfc->buf.addr; + + ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); + if (ret) { + dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); + return -ENOMEM; + } + + return ret; +} + +static struct ubcore_jfc *udma_create_jfc_ex(struct ubcore_device *ubcore_dev, + struct udma_jfc_cfg_ex *cfg_ex) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + struct ubcore_jfc_cfg *cfg = &cfg_ex->base_cfg; + unsigned long flags_store; + unsigned long flags_erase; + struct udma_jfc *jfc; + int ret; + + jfc = kzalloc(sizeof(struct udma_jfc), GFP_KERNEL); + if (!jfc) + return NULL; + + jfc->arm_sn = 1; + jfc->buf.entry_cnt = cfg->depth ? roundup_pow_of_two(cfg->depth) : cfg->depth; + + ret = udma_check_jfc_cfg(dev, jfc, &cfg_ex->base_cfg); + if (ret) + goto err_check_cfg; + + ret = udma_id_alloc_auto_grow(dev, &dev->jfc_table.ida_table, &jfc->jfcn); + if (ret) + goto err_alloc_jfc_id; + + udma_init_jfc_param(cfg, jfc); + jfc->base.ub_dev = ubcore_dev; + jfc->base.uctx = NULL; + jfc->base.jfae_handler = cfg_ex->jfae_handler; + jfc->base.jfce_handler = cfg_ex->jfce_handler; + jfc->mode = UDMA_KERNEL_STARS_JFC_TYPE; + + xa_lock_irqsave(&dev->jfc_table.xa, flags_store); + ret = xa_err(__xa_store(&dev->jfc_table.xa, jfc->jfcn, jfc, GFP_ATOMIC)); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_store); + if (ret) { + dev_err(dev->dev, + "failed to stored jfc id to jfc_table, jfcn: %u.\n", + jfc->jfcn); + goto err_store_jfcn; + } + + ret = udma_get_jfc_buf_ex(dev, jfc, cfg_ex); + if (ret) + goto err_get_jfc_buf; + + ret = udma_post_create_jfc_mbox(dev, jfc); + if (ret) + goto err_alloc_cqc; + + refcount_set(&jfc->event_refcount, 1); + + init_completion(&jfc->event_comp); + + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->jfc, jfc->jfcn, "jfc"); + + return &jfc->base; + +err_alloc_cqc: + udma_free_sw_db(dev, &jfc->db); +err_get_jfc_buf: + xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); + __xa_erase(&dev->jfc_table.xa, jfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_erase); +err_store_jfcn: + udma_id_free(&dev->jfc_table.ida_table, jfc->jfcn); +err_alloc_jfc_id: +err_check_cfg: + kfree(jfc); + return NULL; +} + +static int udma_create_jfc_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_jfc_cfg_ex cfg_ex; + struct ubcore_jfc *jfc; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_jfc_cfg_ex)) || + udma_check_base_param(out->addr, out->len, sizeof(struct ubcore_jfc *))) { + dev_err(udev->dev, "input parameter invalid in create jfc, in_len = %u, out_len = %u.\n", + in->len, out->len); + return -EINVAL; + } + + memcpy(&cfg_ex, (void *)(uintptr_t)in->addr, + min(in->len, sizeof(struct udma_jfc_cfg_ex))); + + jfc = udma_create_jfc_ex(dev, &cfg_ex); + if (jfc == NULL) + return -EFAULT; + + memcpy((void *)(uintptr_t)out->addr, &jfc, sizeof(struct ubcore_jfc *)); + + return 0; +} + +static int udma_delete_jfc_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubcore_jfc *jfc; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct ubcore_jfc *))) { + dev_err(udev->dev, "parameter invalid in delete jfc, len = %u.\n", + in->len); + return -EINVAL; + } + + memcpy(&jfc, (void *)(uintptr_t)in->addr, + min(in->len, sizeof(struct ubcore_jfc *))); + if (jfc == NULL) + return -EINVAL; + + if (udma_destroy_jfc(jfc)) + return -EFAULT; + + return 0; +} + +static int udma_set_cqe_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ex_jfc_addr *jfc_addr; + struct udma_set_cqe_ex cqe_ex; + uint32_t cq_depth; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_set_cqe_ex))) { + dev_err(udev->dev, "parameter invalid in set cqe, len = %u.\n", + in->len); + return -EINVAL; + } + + memcpy(&cqe_ex, (void *)(uintptr_t)in->addr, + min(in->len, sizeof(struct udma_set_cqe_ex))); + + if (cqe_ex.jfc_type != UDMA_STARS_JFC_TYPE && + cqe_ex.jfc_type != UDMA_CCU_JFC_TYPE) { + dev_err(udev->dev, "invalid jfc type, mode = %u.\n", cqe_ex.jfc_type); + return -EINVAL; + } + + if (cqe_ex.addr == 0) { + dev_err(udev->dev, "cq addr is wrong in set cqe.\n"); + return -EINVAL; + } + + cq_depth = cqe_ex.len / udev->caps.cqe_size; + if (cq_depth < UDMA_JFC_DEPTH_MIN || cq_depth > udev->caps.jfc.depth || + (cqe_ex.len % udev->caps.cqe_size) != 0 || + cq_depth != roundup_pow_of_two(cq_depth)) { + dev_err(udev->dev, "cq buff size is wrong in set cqe, size = %u.\n", + cqe_ex.len); + return -EINVAL; + } + + jfc_addr = &udev->cq_addr_array[cqe_ex.jfc_type]; + jfc_addr->cq_addr = cqe_ex.addr; + jfc_addr->cq_len = cqe_ex.len; + + return 0; +} + static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { @@ -1084,6 +1286,9 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_CREATE_JFS_EX] = udma_create_jfs_ops_ex, [UDMA_USER_CTL_DELETE_JFS_EX] = udma_delete_jfs_ops_ex, + [UDMA_USER_CTL_CREATE_JFC_EX] = udma_create_jfc_ops_ex, + [UDMA_USER_CTL_DELETE_JFC_EX] = udma_delete_jfc_ops_ex, + [UDMA_USER_CTL_SET_CQE_ADDR] = udma_set_cqe_ex, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 02b17b6011d2..6f62f33eccdf 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -189,6 +189,10 @@ int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); int udma_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); +int udma_check_jfc_cfg(struct udma_dev *dev, struct udma_jfc *jfc, + struct ubcore_jfc_cfg *cfg); +void udma_init_jfc_param(struct ubcore_jfc_cfg *cfg, struct udma_jfc *jfc); +int udma_post_create_jfc_mbox(struct udma_dev *dev, struct udma_jfc *jfc); void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev *udma_dev); #endif /* __UDMA_JFC_H__ */ -- Gitee From 20667426200a5d0fc58f753701a8b71578d93299 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 12:00:22 +0800 Subject: [PATCH 135/243] ub: udma: Support query entity information. commit 097d0f314ccb55b25ceef209efd174cafc590864 openEuler This patch adds the ability to query entity information. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 9b4c306d4887..1a5c8926f5bc 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -478,6 +478,31 @@ static int udma_set_cqe_ex(struct ubcore_device *dev, struct ubcore_ucontext *uc return 0; } +static int udma_query_ue_info_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ue_info_ex info = {}; + + if (udma_check_base_param(out->addr, out->len, sizeof(struct udma_ue_info_ex))) { + dev_err(udev->dev, "parameter invalid in query ue, len = %u.\n", + out->len); + return -EINVAL; + } + + info.chip_id = udev->chip_id; + info.die_id = udev->die_id; + info.dwqe_addr = udev->db_base + JETTY_DSQE_OFFSET; + info.db_base_addr = info.dwqe_addr + UDMA_DOORBELL_OFFSET; + info.ue_id = udev->ue_id; + info.register_base_addr = udev->db_base; + info.offset_len = PAGE_SIZE; + + memcpy((void *)(uintptr_t)out->addr, &info, sizeof(struct udma_ue_info_ex)); + + return 0; +} + static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { @@ -1289,6 +1314,7 @@ static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_CREATE_JFC_EX] = udma_create_jfc_ops_ex, [UDMA_USER_CTL_DELETE_JFC_EX] = udma_delete_jfc_ops_ex, [UDMA_USER_CTL_SET_CQE_ADDR] = udma_set_cqe_ex, + [UDMA_USER_CTL_QUERY_UE_INFO] = udma_query_ue_info_ex, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, -- Gitee From a599dd52232b40f9ff6b1e5bc253d330421d53d5 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 14:31:04 +0800 Subject: [PATCH 136/243] ub: udma: Support query resource ratio. commit 8eaec1c8a063361668fffcd1a7d9ae6345a51ae5 openEuler This patch adds the ability to query resource ratio. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 2 ++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 41 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 8 +++++ 3 files changed, 51 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 1a5c8926f5bc..2b6f24c80a6a 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -1315,11 +1315,13 @@ static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_DELETE_JFC_EX] = udma_delete_jfc_ops_ex, [UDMA_USER_CTL_SET_CQE_ADDR] = udma_set_cqe_ex, [UDMA_USER_CTL_QUERY_UE_INFO] = udma_query_ue_info_ex, + [UDMA_USER_CTL_GET_DEV_RES_RATIO] = udma_get_dev_resource_ratio, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, + [UDMA_USER_CTL_QUERY_PAIR_DEVNUM] = udma_query_pair_dev_count, }; static udma_user_ctl_ops g_udma_user_ctl_u_ops[] = { diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index fd499a89e131..043786f39b3d 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -740,3 +740,44 @@ int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle return udma_k_ctrlq_deactive_tp(udma_dev, tp_handle, udata); } + +int udma_query_pair_dev_count(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_ctrlq_msg ctrlq_msg = {}; + struct ubase_bus_eid eid = {}; + uint32_t pair_device_num = 0; + int ret; + + if (out->addr == 0 || out->len != sizeof(pair_device_num)) { + dev_err(udev->dev, "query pair dev count, addr is NULL:%d, len:%u.\n", + out->addr == 0, out->len); + return -EINVAL; + } + + ret = ubase_get_bus_eid(udev->comdev.adev, &eid); + if (ret) { + dev_err(udev->dev, "get dev bus eid failed, ret is %d.\n", ret); + return ret; + } + + ctrlq_msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + ctrlq_msg.service_ver = UBASE_CTRLQ_SER_VER_01; + ctrlq_msg.need_resp = 1; + ctrlq_msg.in_size = sizeof(eid); + ctrlq_msg.in = (void *)&eid; + ctrlq_msg.out_size = sizeof(pair_device_num); + ctrlq_msg.out = &pair_device_num; + ctrlq_msg.opcode = UDMA_CTRLQ_GET_DEV_RESOURCE_COUNT; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &ctrlq_msg); + if (ret) { + dev_err(udev->dev, "get dev res send ctrlq msg failed, ret is %d.\n", ret); + return ret; + } + + memcpy((void *)(uintptr_t)out->addr, &pair_device_num, sizeof(pair_device_num)); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index e83cd3e94c56..7c7d1ad39b92 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -179,6 +179,12 @@ struct udma_dev_resource_ratio { uint32_t index; }; +int udma_query_pair_dev_count(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +int udma_get_dev_resource_ratio(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + int udma_register_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); @@ -192,6 +198,8 @@ int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_c void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, bool is_need_flush); +int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, + struct ubcore_active_tp_cfg *active_cfg); int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, -- Gitee From 4b7dde6eea4f88334d688751ca4814da39fa4b1b Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 14:51:50 +0800 Subject: [PATCH 137/243] ub: udma: Support query ub memory info. commit d0c38b53548dcda5ed5fbf1dcc28065b3482e227 openEuler This patch adds the ability to query ub memory info. In addition, this patch add disassociate ucontext ops for urma. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 1 + drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 36 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 ++++++++ drivers/ub/urma/hw/udma/udma_main.c | 5 ++++ 4 files changed, 53 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 2b6f24c80a6a..af0568f3ce74 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -1321,6 +1321,7 @@ static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, + [UDMA_USER_CTL_QUERY_UBMEM_INFO] = udma_ctrlq_query_ubmem_info, [UDMA_USER_CTL_QUERY_PAIR_DEVNUM] = udma_query_pair_dev_count, }; diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 043786f39b3d..966dc7a41d94 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -592,6 +592,42 @@ static int udma_k_ctrlq_deactive_tp(struct udma_dev *udev, union ubcore_tp_handl return (ret == -EAGAIN) ? 0 : ret; } +int udma_ctrlq_query_ubmem_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ +#define UDMA_CTRLQ_SER_TYPE_UBMEM 0x5 + struct udma_ctrlq_ubmem_out_query ubmem_info_out = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_ctrlq_msg ctrlq_msg = {}; + uint32_t input_buf = 0; + int ret; + + if (out->addr == 0 || out->len != sizeof(struct udma_ctrlq_ubmem_out_query)) { + dev_err(udev->dev, "query ubmem info failed, addr is NULL:%d, len:%u.\n", + out->addr == 0, out->len); + return -EINVAL; + } + + ctrlq_msg.service_type = UDMA_CTRLQ_SER_TYPE_UBMEM; + ctrlq_msg.service_ver = UBASE_CTRLQ_SER_VER_01; + ctrlq_msg.need_resp = 1; + ctrlq_msg.in_size = sizeof(input_buf); + ctrlq_msg.in = (void *)&input_buf; + ctrlq_msg.out_size = sizeof(ubmem_info_out); + ctrlq_msg.out = &ubmem_info_out; + ctrlq_msg.opcode = UDMA_CTRLQ_QUERY_UBMEM_INFO; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &ctrlq_msg); + if (ret) { + dev_err(udev->dev, "get dev res send ctrlq msg failed, ret is %d.\n", ret); + return ret; + } + + memcpy((void *)(uintptr_t)out->addr, &ubmem_info_out, sizeof(ubmem_info_out)); + + return ret; +} + int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, const struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 7c7d1ad39b92..bfa3ed44c381 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -14,6 +14,7 @@ #define UDMA_MAX_UE_IDX 256 #define UDMA_MAX_TPID_NUM 5 +#define UDMA_CTRLQ_UBMEM_INFO_NUM (96) #define UDMA_TPN_CNT_MASK 0x1F enum udma_ctrlq_cmd_code_type { @@ -28,6 +29,10 @@ enum udma_ctrlq_cmd_code_type { UDMA_CMD_CTRLQ_MAX }; +enum udma_ctrlq_ubmem_opcode { + UDMA_CTRLQ_QUERY_UBMEM_INFO = 0x1, +}; + enum udma_ctrlq_trans_type { UDMA_CTRLQ_TRANS_TYPE_TP_RM = 0, UDMA_CTRLQ_TRANS_TYPE_CTP, @@ -151,6 +156,10 @@ struct udma_ue_idx_table { uint8_t ue_idx[UDMA_UE_NUM]; }; +struct udma_ctrlq_ubmem_out_query { + uint32_t data[UDMA_CTRLQ_UBMEM_INFO_NUM]; +}; + struct udma_ctrlq_tp_attr { uint32_t tp_attr_bitmap; struct ubcore_tp_attr_value tp_attr_value; @@ -200,6 +209,8 @@ void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpi bool is_need_flush); int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, struct ubcore_active_tp_cfg *active_cfg); +int udma_ctrlq_query_ubmem_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 7726d371476b..a6964f3ab878 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -187,6 +187,10 @@ static int udma_query_stats(struct ubcore_device *dev, struct ubcore_stats_key * return ret; } +static void udma_disassociate_ucontext(struct ubcore_ucontext *uctx) +{ +} + static struct ubcore_ops g_dev_ops = { .owner = THIS_MODULE, .abi_version = 0, @@ -245,6 +249,7 @@ static struct ubcore_ops g_dev_ops = { .poll_jfc = udma_poll_jfc, .query_stats = udma_query_stats, .query_ue_idx = udma_query_ue_idx, + .disassociate_ucontext = udma_disassociate_ucontext, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 1fd7761b5c7e03ccf05b158805564fbc9858ff2e Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Tue, 9 Sep 2025 19:32:21 +0800 Subject: [PATCH 138/243] ub: udma: Support 2M hugepage function. commit 1ae22d037be802e750a85d5de87836795de2f37f openEuler This patch adds the ability to alloc and destroy huge page. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 26 ++++- drivers/ub/urma/hw/udma/udma_common.h | 8 +- drivers/ub/urma/hw/udma/udma_ctx.c | 150 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctx.h | 6 ++ drivers/ub/urma/hw/udma/udma_db.c | 4 +- drivers/ub/urma/hw/udma/udma_def.h | 20 ++++ drivers/ub/urma/hw/udma/udma_dev.h | 5 + drivers/ub/urma/hw/udma/udma_jfc.c | 6 +- drivers/ub/urma/hw/udma/udma_jfr.c | 12 +-- drivers/ub/urma/hw/udma/udma_jfs.c | 6 +- 10 files changed, 225 insertions(+), 18 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 3ec53595bcf8..31b4d504c6e4 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -561,7 +561,7 @@ static void udma_unpin_k_addr(struct ubcore_umem *umem) udma_umem_release(umem, true); } -int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, +int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf) { size_t aligned_memory_size; @@ -593,7 +593,7 @@ int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, return 0; } -void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, +void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf) { udma_unpin_k_addr(buf->umem); @@ -701,3 +701,25 @@ void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size) for (i = 0; i < res_size; i++) res[i] = arr[res_size - i - 1]; } + +void udma_init_hugepage(struct udma_dev *dev) +{ + INIT_LIST_HEAD(&dev->hugepage_list); + mutex_init(&dev->hugepage_lock); +} + +void udma_destroy_hugepage(struct udma_dev *dev) +{ + struct udma_hugepage_priv *priv; + + mutex_lock(&dev->hugepage_lock); + list_for_each_entry(priv, &dev->hugepage_list, list) { + dev_info(dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", + priv->va_len >> UDMA_HUGEPAGE_SHIFT); + udma_unpin_k_addr(priv->umem); + vfree(priv->va_base); + kfree(priv); + } + mutex_unlock(&dev->hugepage_lock); + mutex_destroy(&dev->hugepage_lock); +} diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 2bfededa6203..11497248de57 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -5,6 +5,7 @@ #define __UDMA_COMM_H__ #include +#include #include #include "udma_ctx.h" #include "udma_dev.h" @@ -324,8 +325,8 @@ void udma_dfx_store_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity uint32_t id, const char *name); void udma_dfx_delete_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, uint32_t id); -int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); -void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); @@ -368,4 +369,7 @@ void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, ui uint32_t *ctx); void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size); +void udma_init_hugepage(struct udma_dev *dev); +void udma_destroy_hugepage(struct udma_dev *dev); + #endif /* __UDMA_COMM_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 985abb19929a..71ad304bfa80 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -176,3 +176,153 @@ int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) return 0; } + +int udma_alloc_u_hugepage(struct udma_context *ctx, struct vm_area_struct *vma) +{ + uint32_t page_num = (vma->vm_end - vma->vm_start) >> UDMA_HUGEPAGE_SHIFT; + struct udma_hugepage_priv *priv; + int ret = -ENOMEM; + int i; + + mutex_lock(&ctx->dev->hugepage_lock); + if (page_num > ctx->dev->total_hugepage_num) { + dev_err(ctx->dev->dev, "insufficient resources for mmap.\n"); + mutex_unlock(&ctx->dev->hugepage_lock); + return -EINVAL; + } + ctx->dev->total_hugepage_num -= page_num; + mutex_unlock(&ctx->dev->hugepage_lock); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + goto err_alloc_priv; + + priv->page_num = page_num; + priv->pages = kcalloc(priv->page_num, sizeof(*priv->pages), GFP_KERNEL); + if (!priv->pages) + goto err_alloc_arr; + + for (i = 0; i < priv->page_num; i++) { + priv->pages[i] = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(UDMA_HUGEPAGE_SIZE)); + if (!priv->pages[i]) { + dev_err(ctx->dev->dev, "failed to alloc 2M pages.\n"); + goto err_alloc_pages; + } + ret = remap_pfn_range(vma, vma->vm_start + i * UDMA_HUGEPAGE_SIZE, + page_to_pfn(priv->pages[i]), UDMA_HUGEPAGE_SIZE, + vma->vm_page_prot); + if (ret) { + dev_err(ctx->dev->dev, "failed to remap_pfn_range, ret=%d.\n", ret); + goto err_remap_pfn_range; + } + } + + priv->va_base = (void *)vma->vm_start; + priv->va_len = priv->page_num << UDMA_HUGEPAGE_SHIFT; + priv->left_va_len = priv->va_len; + refcount_set(&priv->refcnt, 1); + + mutex_lock(&ctx->hugepage_lock); + list_add(&priv->list, &ctx->hugepage_list); + mutex_unlock(&ctx->hugepage_lock); + + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "map_hugepage, 2m_page_num=%u.\n", + priv->page_num); + return 0; + +err_remap_pfn_range: +err_alloc_pages: + for (i = 0; i < priv->page_num; i++) { + if (priv->pages[i]) + __free_pages(priv->pages[i], get_order(UDMA_HUGEPAGE_SIZE)); + else + break; + } + kfree(priv->pages); +err_alloc_arr: + kfree(priv); +err_alloc_priv: + mutex_lock(&ctx->dev->hugepage_lock); + ctx->dev->total_hugepage_num += page_num; + mutex_unlock(&ctx->dev->hugepage_lock); + + return ret; +} + +static struct udma_hugepage_priv *udma_list_find_before(struct udma_context *ctx, void *va) +{ + struct udma_hugepage_priv *priv; + + list_for_each_entry(priv, &ctx->hugepage_list, list) { + if (va >= priv->va_base && va < priv->va_base + priv->va_len) + return priv; + } + + return NULL; +} + +int udma_occupy_u_hugepage(struct udma_context *ctx, void *va) +{ + struct udma_hugepage_priv *priv; + + mutex_lock(&ctx->hugepage_lock); + priv = udma_list_find_before(ctx, va); + if (priv) { + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "occupy_hugepage.\n"); + refcount_inc(&priv->refcnt); + } + mutex_unlock(&ctx->hugepage_lock); + + return priv ? 0 : -EFAULT; +} + +void udma_return_u_hugepage(struct udma_context *ctx, void *va) +{ + struct udma_hugepage_priv *priv; + struct vm_area_struct *vma; + uint32_t i; + + mutex_lock(&ctx->hugepage_lock); + priv = udma_list_find_before(ctx, va); + if (!priv) { + mutex_unlock(&ctx->hugepage_lock); + dev_warn(ctx->dev->dev, "va is invalid addr.\n"); + return; + } + + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "return_hugepage.\n"); + refcount_dec(&priv->refcnt); + if (!refcount_dec_if_one(&priv->refcnt)) { + mutex_unlock(&ctx->hugepage_lock); + return; + } + + list_del(&priv->list); + mutex_unlock(&ctx->hugepage_lock); + + if (current->mm) { + mmap_write_lock(current->mm); + vma = find_vma(current->mm, (unsigned long)priv->va_base); + if (vma != NULL && vma->vm_start <= (unsigned long)priv->va_base && + vma->vm_end >= (unsigned long)(priv->va_base + priv->va_len)) + zap_vma_ptes(vma, (unsigned long)priv->va_base, priv->va_len); + mmap_write_unlock(current->mm); + } else { + dev_warn(ctx->dev->dev, "current mm released.\n"); + } + + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", + priv->page_num); + mutex_lock(&ctx->dev->hugepage_lock); + for (i = 0; i < priv->page_num; i++) + __free_pages(priv->pages[i], get_order(UDMA_HUGEPAGE_SIZE)); + ctx->dev->total_hugepage_num += priv->page_num; + mutex_unlock(&ctx->dev->hugepage_lock); + kfree(priv->pages); + kfree(priv); +} diff --git a/drivers/ub/urma/hw/udma/udma_ctx.h b/drivers/ub/urma/hw/udma/udma_ctx.h index a93aab94c1e9..2521d2de3108 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.h +++ b/drivers/ub/urma/hw/udma/udma_ctx.h @@ -16,6 +16,8 @@ struct udma_context { struct mutex pgdir_mutex; struct iommu_sva *sva; uint32_t tid; + struct mutex hugepage_lock; + struct list_head hugepage_list; }; static inline struct udma_context *to_udma_context(struct ubcore_ucontext *uctx) @@ -39,4 +41,8 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, int udma_free_ucontext(struct ubcore_ucontext *ucontext); int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma); +int udma_alloc_u_hugepage(struct udma_context *ctx, struct vm_area_struct *vma); +int udma_occupy_u_hugepage(struct udma_context *ctx, void *va); +void udma_return_u_hugepage(struct udma_context *ctx, void *va); + #endif /* __UDMA_CTX_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_db.c b/drivers/ub/urma/hw/udma/udma_db.c index ea7b5d98ee6b..c66d6b23b2e8 100644 --- a/drivers/ub/urma/hw/udma/udma_db.c +++ b/drivers/ub/urma/hw/udma/udma_db.c @@ -115,7 +115,7 @@ static struct udma_k_sw_db_page *udma_alloc_db_page(struct udma_dev *dev, bitmap_fill(page->bitmap, page->num_db); - ret = udma_k_alloc_buf(dev, PAGE_SIZE, &page->db_buf); + ret = udma_alloc_normal_buf(dev, PAGE_SIZE, &page->db_buf); if (ret) { dev_err(dev->dev, "Failed alloc db page buf, ret is %d.\n", ret); goto err_kva; @@ -165,7 +165,7 @@ void udma_free_sw_db(struct udma_dev *dev, struct udma_sw_db *db) set_bit(db->index, db->kpage->bitmap); if (bitmap_full(db->kpage->bitmap, db->kpage->num_db)) { - udma_k_free_buf(dev, PAGE_SIZE, &db->kpage->db_buf); + udma_free_normal_buf(dev, PAGE_SIZE, &db->kpage->db_buf); bitmap_free(db->kpage->bitmap); list_del(&db->kpage->list); kfree(db->kpage); diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h index ca107e34a37c..b8d80fa7a98d 100644 --- a/drivers/ub/urma/hw/udma/udma_def.h +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -109,6 +109,24 @@ struct udma_sw_db_page { refcount_t refcount; }; +struct udma_hugepage_priv { + struct list_head list; + struct page **pages; + uint32_t page_num; + struct ubcore_umem *umem; + void *va_base; + uint32_t va_len; + uint32_t left_va_offset; + uint32_t left_va_len; + refcount_t refcnt; +}; + +struct udma_hugepage { + void *va_start; + uint32_t va_len; + struct udma_hugepage_priv *priv; +}; + struct udma_buf { dma_addr_t addr; union { @@ -123,6 +141,8 @@ struct udma_buf { uint32_t cnt_per_page_shift; struct xarray id_table_xa; struct mutex id_table_mutex; + bool is_hugepage; + struct udma_hugepage *hugepage; }; struct udma_k_sw_db_page { diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index f4ddf294b769..1f76ccb84c30 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -35,6 +35,8 @@ extern bool dump_aux_info; #define UDMA_HW_PAGE_SHIFT 12 #define UDMA_HW_PAGE_SIZE (1 << UDMA_HW_PAGE_SHIFT) +#define UDMA_HUGEPAGE_SHIFT 21 +#define UDMA_HUGEPAGE_SIZE (1 << UDMA_HUGEPAGE_SHIFT) #define UDMA_DEV_UE_NUM 47 @@ -147,6 +149,9 @@ struct udma_dev { u8 udma_sl[UDMA_MAX_SL_NUM]; int disable_ue_rx_count; struct mutex disable_ue_rx_mutex; + struct mutex hugepage_lock; + struct list_head hugepage_list; + uint32_t total_hugepage_num; }; #define UDMA_ERR_MSG_LEN 128 diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 12c2f143a376..2f3cb33af300 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -150,7 +150,7 @@ static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *u jfc->tid = dev->tid; size = jfc->buf.entry_size * jfc->buf.entry_cnt; - ret = udma_k_alloc_buf(dev, size, &jfc->buf); + ret = udma_alloc_normal_buf(dev, size, &jfc->buf); if (ret) { dev_err(dev->dev, "failed to alloc buffer for jfc.\n"); return ret; @@ -159,7 +159,7 @@ static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *u ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); if (ret) { dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); - udma_k_free_buf(dev, size, &jfc->buf); + udma_free_normal_buf(dev, size, &jfc->buf); return -ENOMEM; } @@ -173,7 +173,7 @@ static void udma_free_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) if (jfc->buf.kva) { size = jfc->buf.entry_size * jfc->buf.entry_cnt; - udma_k_free_buf(dev, size, &jfc->buf); + udma_free_normal_buf(dev, size, &jfc->buf); } else if (jfc->buf.umem) { uctx = to_udma_context(jfc->base.uctx); unpin_queue_addr(jfc->buf.umem); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 5de0fc62c6e7..2790ab87982c 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -58,7 +58,7 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) jfr->rq.buf.entry_cnt = jfr->wqe_cnt; rqe_buf_size = jfr->rq.buf.entry_size * jfr->rq.buf.entry_cnt; - ret = udma_k_alloc_buf(dev, rqe_buf_size, &jfr->rq.buf); + ret = udma_alloc_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); if (ret) { dev_err(dev->dev, "failed to alloc rq buffer for jfr when buffer size = %u.\n", @@ -70,7 +70,7 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) jfr->idx_que.buf.entry_cnt = jfr->wqe_cnt; idx_buf_size = jfr->idx_que.buf.entry_size * jfr->idx_que.buf.entry_cnt; - ret = udma_k_alloc_buf(dev, idx_buf_size, &jfr->idx_que.buf); + ret = udma_alloc_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); if (ret) { dev_err(dev->dev, "failed to alloc idx que buffer for jfr when buffer size = %u.\n", @@ -98,9 +98,9 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) err_alloc_db: kfree(jfr->rq.wrid); err_wrid: - udma_k_free_buf(dev, idx_buf_size, &jfr->idx_que.buf); + udma_free_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); err_idx_que: - udma_k_free_buf(dev, rqe_buf_size, &jfr->rq.buf); + udma_free_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); return -ENOMEM; } @@ -211,13 +211,13 @@ static void udma_put_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) if (jfr->rq.buf.kva) { size = jfr->rq.buf.entry_cnt * jfr->rq.buf.entry_size; - udma_k_free_buf(dev, size, &jfr->rq.buf); + udma_free_normal_buf(dev, size, &jfr->rq.buf); udma_free_sw_db(dev, &jfr->sw_db); } if (jfr->idx_que.buf.kva) { size = jfr->idx_que.buf.entry_cnt * jfr->idx_que.buf.entry_size; - udma_k_free_buf(dev, size, &jfr->idx_que.buf); + udma_free_normal_buf(dev, size, &jfr->idx_que.buf); udma_destroy_udma_table(dev, &jfr->idx_que.jfr_idx_table, "JFR_IDX"); } diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index a1a36a0a136f..fd4e6e025077 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -71,7 +71,7 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; - ret = udma_k_alloc_buf(dev, size, &sq->buf); + ret = udma_alloc_normal_buf(dev, size, &sq->buf); if (ret) { dev_err(dev->dev, "failed to alloc jetty (%u) sq buf when size = %u.\n", sq->id, size); @@ -80,7 +80,7 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); if (!sq->wrid) { - udma_k_free_buf(dev, size, &sq->buf); + udma_free_normal_buf(dev, size, &sq->buf); dev_err(dev->dev, "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", sq->id, sq->buf.entry_cnt); @@ -99,7 +99,7 @@ void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) if (sq->buf.kva) { size = sq->buf.entry_cnt * sq->buf.entry_size; - udma_k_free_buf(dev, size, &sq->buf); + udma_free_normal_buf(dev, size, &sq->buf); kfree(sq->wrid); return; } -- Gitee From d2402753e70ebf070c82d6d9486ef0dcc8641abb Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Tue, 9 Sep 2025 20:37:38 +0800 Subject: [PATCH 139/243] ub: udma: jetty and rct support the hugepage buffer. commit 551b2ed34422a838904274d72789a2a99e29434a openEuler This patch adds the ability to jetty and rct can use the hugepage buffer. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 147 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_common.h | 3 + drivers/ub/urma/hw/udma/udma_ctx.c | 138 ++++++++++++++++++------ drivers/ub/urma/hw/udma/udma_def.h | 2 + drivers/ub/urma/hw/udma/udma_jetty.c | 1 + drivers/ub/urma/hw/udma/udma_jfc.c | 87 ++++++++------- drivers/ub/urma/hw/udma/udma_jfr.c | 68 +++++++----- drivers/ub/urma/hw/udma/udma_jfs.c | 37 ++++--- drivers/ub/urma/hw/udma/udma_main.c | 31 +++++- drivers/ub/urma/hw/udma/udma_rct.c | 66 ++++++++---- include/uapi/ub/urma/udma/udma_abi.h | 10 +- 11 files changed, 451 insertions(+), 139 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 31b4d504c6e4..07d57a5ce96b 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -603,6 +603,153 @@ void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, buf->addr = 0; } +static struct udma_hugepage_priv * +udma_alloc_hugepage_priv(struct udma_dev *dev, uint32_t len) +{ + struct udma_hugepage_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + priv->va_len = ALIGN(len, UDMA_HUGEPAGE_SIZE); + if (priv->va_len >> UDMA_HUGEPAGE_SHIFT > dev->total_hugepage_num) { + dev_err(dev->dev, "insufficient resources for mmap.\n"); + goto err_vmalloc_huge; + } + + priv->left_va_len = priv->va_len; + priv->va_base = vmalloc_huge(priv->va_len, GFP_KERNEL); + if (!priv->va_base) { + dev_err(dev->dev, "failed to vmalloc_huge, size=%u.", priv->va_len); + goto err_vmalloc_huge; + } + memset(priv->va_base, 0, priv->va_len); + + priv->umem = udma_pin_k_addr(&dev->ub_dev, (uint64_t)priv->va_base, priv->va_len); + if (IS_ERR(priv->umem)) { + dev_err(dev->dev, "pin kernel buf failed.\n"); + goto err_pin; + } + + refcount_set(&priv->refcnt, 1); + list_add(&priv->list, &dev->hugepage_list); + dev->total_hugepage_num -= priv->va_len >> UDMA_HUGEPAGE_SHIFT; + + if (dfx_switch) + dev_info_ratelimited(dev->dev, "map_hugepage, 2m_page_num=%u.\n", + priv->va_len >> UDMA_HUGEPAGE_SHIFT); + return priv; + +err_pin: + vfree(priv->va_base); +err_vmalloc_huge: + kfree(priv); + + return NULL; +} + +static struct udma_hugepage * +udma_alloc_hugepage(struct udma_dev *dev, uint32_t len) +{ + struct udma_hugepage_priv *priv = NULL; + struct udma_hugepage *hugepage; + bool b_reuse = false; + + hugepage = kzalloc(sizeof(*hugepage), GFP_KERNEL); + if (!hugepage) + return NULL; + + mutex_lock(&dev->hugepage_lock); + if (!list_empty(&dev->hugepage_list)) { + priv = list_first_entry(&dev->hugepage_list, struct udma_hugepage_priv, list); + b_reuse = len <= priv->left_va_len; + } + + if (b_reuse) { + refcount_inc(&priv->refcnt); + } else { + priv = udma_alloc_hugepage_priv(dev, len); + if (!priv) { + mutex_unlock(&dev->hugepage_lock); + kfree(hugepage); + return NULL; + } + } + + hugepage->va_start = priv->va_base + priv->left_va_offset; + hugepage->va_len = len; + hugepage->priv = priv; + priv->left_va_offset += len; + priv->left_va_len -= len; + mutex_unlock(&dev->hugepage_lock); + + if (dfx_switch) + dev_info_ratelimited(dev->dev, "occupy_hugepage, 4k_page_num=%u.\n", + hugepage->va_len >> UDMA_HW_PAGE_SHIFT); + return hugepage; +} + +static void udma_free_hugepage(struct udma_dev *dev, struct udma_hugepage *hugepage) +{ + struct udma_hugepage_priv *priv = hugepage->priv; + + if (dfx_switch) + dev_info_ratelimited(dev->dev, "return_hugepage, 4k_page_num=%u.\n", + hugepage->va_len >> UDMA_HW_PAGE_SHIFT); + mutex_lock(&dev->hugepage_lock); + if (refcount_dec_and_test(&priv->refcnt)) { + if (dfx_switch) + dev_info_ratelimited(dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", + priv->va_len >> UDMA_HUGEPAGE_SHIFT); + list_del(&priv->list); + dev->total_hugepage_num += priv->va_len >> UDMA_HUGEPAGE_SHIFT; + + udma_unpin_k_addr(priv->umem); + vfree(priv->va_base); + kfree(priv); + } else { + memset(hugepage->va_start, 0, hugepage->va_len); + } + mutex_unlock(&dev->hugepage_lock); + kfree(hugepage); +} + +int udma_k_alloc_buf(struct udma_dev *dev, struct udma_buf *buf) +{ + uint32_t size = buf->entry_size * buf->entry_cnt; + uint32_t hugepage_size; + int ret = 0; + + if (ubase_adev_prealloc_supported(dev->comdev.adev)) { + hugepage_size = ALIGN(size, UDMA_HW_PAGE_SIZE); + buf->hugepage = udma_alloc_hugepage(dev, hugepage_size); + if (buf->hugepage) { + buf->kva = buf->hugepage->va_start; + buf->addr = (uint64_t)buf->kva; + buf->is_hugepage = true; + } else { + dev_warn(dev->dev, + "failed to alloc hugepage buf, switch to alloc normal buf."); + ret = udma_alloc_normal_buf(dev, size, buf); + } + } else { + ret = udma_alloc_normal_buf(dev, size, buf); + } + + return ret; +} + +void udma_k_free_buf(struct udma_dev *dev, struct udma_buf *buf) +{ + uint32_t size = buf->entry_cnt * buf->entry_size; + + if (buf->is_hugepage) + udma_free_hugepage(dev, buf->hugepage); + else + udma_free_normal_buf(dev, size, buf); +} + void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr) { struct iova_slot *slot; diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 11497248de57..dee92a4186d3 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -46,6 +46,7 @@ struct udma_jetty_queue { uint32_t lock_free; /* Support kernel mode lock-free mode */ uint32_t ta_timeout; /* ms */ enum ubcore_jetty_state state; + struct udma_context *udma_ctx; bool non_pin; struct udma_jetty_grp *jetty_grp; enum udma_jetty_type jetty_type; @@ -327,6 +328,8 @@ void udma_dfx_delete_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entit uint32_t id); int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +int udma_k_alloc_buf(struct udma_dev *dev, struct udma_buf *buf); +void udma_k_free_buf(struct udma_dev *dev, struct udma_buf *buf); void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 71ad304bfa80..5f60fca10d86 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -31,6 +31,7 @@ static int udma_init_ctx_resp(struct udma_dev *dev, struct ubcore_udrv_priv *udr resp.die_id = dev->die_id; resp.dump_aux_info = dump_aux_info; resp.jfr_sge = dev->caps.jfr_sge; + resp.hugepage_enable = ubase_adev_prealloc_supported(dev->comdev.adev); byte = copy_to_user((void *)(uintptr_t)udrv_data->out_addr, &resp, (uint32_t)sizeof(resp)); @@ -70,6 +71,8 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, ctx->dev = dev; INIT_LIST_HEAD(&ctx->pgdir_list); mutex_init(&ctx->pgdir_mutex); + INIT_LIST_HEAD(&ctx->hugepage_list); + mutex_init(&ctx->hugepage_lock); ret = udma_init_ctx_resp(dev, udrv_data); if (ret) { @@ -91,8 +94,11 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, int udma_free_ucontext(struct ubcore_ucontext *ucontext) { struct udma_dev *udma_dev = to_udma_dev(ucontext->ub_dev); + struct udma_hugepage_priv *priv; + struct vm_area_struct *vma; struct udma_context *ctx; int ret; + int i; ctx = to_udma_context(ucontext); @@ -103,20 +109,109 @@ int udma_free_ucontext(struct ubcore_ucontext *ucontext) mutex_destroy(&ctx->pgdir_mutex); ummu_sva_unbind_device(ctx->sva); + mutex_lock(&ctx->hugepage_lock); + list_for_each_entry(priv, &ctx->hugepage_list, list) { + if (current->mm) { + mmap_write_lock(current->mm); + vma = find_vma(current->mm, (unsigned long)priv->va_base); + if (vma != NULL && vma->vm_start <= (unsigned long)priv->va_base && + vma->vm_end >= (unsigned long)(priv->va_base + priv->va_len)) + zap_vma_ptes(vma, (unsigned long)priv->va_base, priv->va_len); + mmap_write_unlock(current->mm); + } + + dev_info(udma_dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", priv->page_num); + for (i = 0; i < priv->page_num; i++) + __free_pages(priv->pages[i], get_order(UDMA_HUGEPAGE_SIZE)); + kfree(priv->pages); + kfree(priv); + } + mutex_unlock(&ctx->hugepage_lock); + mutex_destroy(&ctx->hugepage_lock); + kfree(ctx); return 0; } -int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) +static int udma_mmap_jetty_dsqe(struct udma_dev *dev, struct ubcore_ucontext *uctx, + struct vm_area_struct *vma) { -#define JFC_DB_UNMAP_BOUND 1 - struct udma_dev *udma_dev = to_udma_dev(uctx->ub_dev); struct ubcore_ucontext *jetty_uctx; struct udma_jetty_queue *sq; - resource_size_t db_addr; uint64_t address; uint64_t j_id; + + j_id = get_mmap_idx(vma); + + xa_lock(&dev->jetty_table.xa); + sq = xa_load(&dev->jetty_table.xa, j_id); + if (!sq) { + dev_err(dev->dev, + "mmap failed, j_id: %llu not exist\n", j_id); + xa_unlock(&dev->jetty_table.xa); + return -EINVAL; + } + + if (sq->is_jetty) + jetty_uctx = to_udma_jetty_from_queue(sq)->ubcore_jetty.uctx; + else + jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; + + if (jetty_uctx != uctx) { + dev_err(dev->dev, + "mmap failed, j_id: %llu, uctx invalid\n", j_id); + xa_unlock(&dev->jetty_table.xa); + return -EINVAL; + } + xa_unlock(&dev->jetty_table.xa); + + address = (uint64_t)dev->db_base + JETTY_DSQE_OFFSET + j_id * UDMA_HW_PAGE_SIZE; + + if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static int udma_mmap_hugepage(struct udma_dev *dev, struct ubcore_ucontext *uctx, + struct vm_area_struct *vma) +{ + uint32_t max_map_size = dev->caps.cqe_size * dev->caps.jfc.depth; + uint32_t map_size = vma->vm_end - vma->vm_start; + + if (!IS_ALIGNED(map_size, UDMA_HUGEPAGE_SIZE)) { + dev_err(dev->dev, "mmap size is not 2m alignment.\n"); + return -EINVAL; + } + + if (map_size == 0) { + dev_err(dev->dev, "mmap size is zero.\n"); + return -EINVAL; + } + + if (map_size > max_map_size) { + dev_err(dev->dev, "mmap size(%u) is greater than the max_size.\n", + map_size); + return -EINVAL; + } + + vm_flags_set(vma, VM_IO | VM_LOCKED | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY); + vma->vm_page_prot = __pgprot(((~PTE_ATTRINDX_MASK) & vma->vm_page_prot.pgprot) | + PTE_ATTRINDX(MT_NORMAL)); + if (udma_alloc_u_hugepage(to_udma_context(uctx), vma)) { + dev_err(dev->dev, "failed to alloc hugepage.\n"); + return -ENOMEM; + } + + return 0; +} + +int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) +{ +#define JFC_DB_UNMAP_BOUND 1 + struct udma_dev *udma_dev = to_udma_dev(uctx->ub_dev); uint32_t cmd; if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { @@ -125,7 +220,6 @@ int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) return -EINVAL; } - db_addr = udma_dev->db_base; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); cmd = get_mmap_cmd(vma); @@ -133,41 +227,15 @@ int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) case UDMA_MMAP_JFC_PAGE: if (io_remap_pfn_range(vma, vma->vm_start, jfc_arm_mode > JFC_DB_UNMAP_BOUND ? - (uint64_t)db_addr >> PAGE_SHIFT : + (uint64_t)udma_dev->db_base >> PAGE_SHIFT : page_to_pfn(udma_dev->db_page), PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; break; case UDMA_MMAP_JETTY_DSQE: - j_id = get_mmap_idx(vma); - xa_lock(&udma_dev->jetty_table.xa); - sq = xa_load(&udma_dev->jetty_table.xa, j_id); - if (!sq) { - dev_err(udma_dev->dev, - "mmap failed, j_id: %llu not exist\n", j_id); - xa_unlock(&udma_dev->jetty_table.xa); - return -EINVAL; - } - - if (sq->is_jetty) - jetty_uctx = to_udma_jetty_from_queue(sq)->ubcore_jetty.uctx; - else - jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; - - if (jetty_uctx != uctx) { - dev_err(udma_dev->dev, - "mmap failed, j_id: %llu, uctx invalid\n", j_id); - xa_unlock(&udma_dev->jetty_table.xa); - return -EINVAL; - } - xa_unlock(&udma_dev->jetty_table.xa); - - address = (uint64_t)db_addr + JETTY_DSQE_OFFSET + j_id * UDMA_HW_PAGE_SIZE; - - if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; - break; + return udma_mmap_jetty_dsqe(udma_dev, uctx, vma); + case UDMA_MMAP_HUGEPAGE: + return udma_mmap_hugepage(udma_dev, uctx, vma); default: dev_err(udma_dev->dev, "mmap failed, cmd(%u) not support\n", cmd); diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h index b8d80fa7a98d..0681f6dd950d 100644 --- a/drivers/ub/urma/hw/udma/udma_def.h +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -63,6 +63,8 @@ struct udma_caps { uint16_t rc_queue_num; uint16_t rc_queue_depth; uint8_t rc_entry_size; + uint64_t rc_dma_len; + dma_addr_t rc_dma_addr; uint8_t ack_queue_num; uint8_t port_num; uint8_t cqe_size; diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 2bc86bdb2421..c3f3f9a90fb3 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -66,6 +66,7 @@ static int udma_get_user_jetty_cmd(struct udma_dev *dev, struct udma_jetty *jett } uctx = to_udma_context(udata->uctx); + jetty->sq.udma_ctx = uctx; jetty->sq.tid = uctx->tid; jetty->jetty_addr = ucmd->jetty_addr; jetty->pi_type = ucmd->pi_type; diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 2f3cb33af300..92c9fcbaae9f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -120,70 +120,83 @@ static int udma_get_cmd_from_user(struct udma_create_jfc_ucmd *ucmd, return 0; } -static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *ucmd, - struct ubcore_udata *udata, struct udma_jfc *jfc) +static int udma_alloc_u_cq(struct udma_dev *dev, struct udma_create_jfc_ucmd *ucmd, + struct udma_jfc *jfc) { - struct udma_context *uctx; - uint32_t size; - int ret = 0; + int ret; - if (udata) { + if (ucmd->is_hugepage) { + jfc->buf.addr = ucmd->buf_addr; + if (udma_occupy_u_hugepage(jfc->ctx, (void *)jfc->buf.addr)) { + dev_err(dev->dev, "failed to create cq, va not map.\n"); + return -EINVAL; + } + jfc->buf.is_hugepage = true; + } else { ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfc->buf); if (ret) { dev_err(dev->dev, "failed to pin queue for jfc, ret = %d.\n", ret); return ret; } - uctx = to_udma_context(udata->uctx); - jfc->tid = uctx->tid; - ret = udma_pin_sw_db(uctx, &jfc->db); - if (ret) { - dev_err(dev->dev, "failed to pin sw db for jfc, ret = %d.\n", ret); - unpin_queue_addr(jfc->buf.umem); - } + } + jfc->tid = jfc->ctx->tid; - return ret; + ret = udma_pin_sw_db(jfc->ctx, &jfc->db); + if (ret) { + dev_err(dev->dev, "failed to pin sw db for jfc, ret = %d.\n", ret); + goto err_pin_db; } + return 0; +err_pin_db: + if (ucmd->is_hugepage) + udma_return_u_hugepage(jfc->ctx, (void *)jfc->buf.addr); + else + unpin_queue_addr(jfc->buf.umem); + + return ret; +} + +static int udma_alloc_k_cq(struct udma_dev *dev, struct udma_jfc *jfc) +{ + int ret; + if (!jfc->lock_free) spin_lock_init(&jfc->lock); + jfc->buf.entry_size = dev->caps.cqe_size; jfc->tid = dev->tid; - size = jfc->buf.entry_size * jfc->buf.entry_cnt; - - ret = udma_alloc_normal_buf(dev, size, &jfc->buf); + ret = udma_k_alloc_buf(dev, &jfc->buf); if (ret) { - dev_err(dev->dev, "failed to alloc buffer for jfc.\n"); + dev_err(dev->dev, "failed to alloc cq buffer, id=%u.\n", jfc->jfcn); return ret; } ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); if (ret) { dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); - udma_free_normal_buf(dev, size, &jfc->buf); - return -ENOMEM; + udma_k_free_buf(dev, &jfc->buf); } return ret; } -static void udma_free_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) +static void udma_free_cq(struct udma_dev *dev, struct udma_jfc *jfc) { - struct udma_context *uctx; - uint32_t size; - - if (jfc->buf.kva) { - size = jfc->buf.entry_size * jfc->buf.entry_cnt; - udma_free_normal_buf(dev, size, &jfc->buf); - } else if (jfc->buf.umem) { - uctx = to_udma_context(jfc->base.uctx); - unpin_queue_addr(jfc->buf.umem); + if (jfc->mode != UDMA_NORMAL_JFC_TYPE) { + udma_free_sw_db(dev, &jfc->db); + return; } - if (jfc->db.page) { - uctx = to_udma_context(jfc->base.uctx); - udma_unpin_sw_db(uctx, &jfc->db); - } else if (jfc->db.kpage) { + if (jfc->buf.kva) { + udma_k_free_buf(dev, &jfc->buf); udma_free_sw_db(dev, &jfc->db); + } else { + if (jfc->buf.is_hugepage) + udma_return_u_hugepage(jfc->ctx, (void *)jfc->buf.addr); + else + unpin_queue_addr(jfc->buf.umem); + udma_unpin_sw_db(jfc->ctx, &jfc->db); } } @@ -369,7 +382,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, goto err_store_jfcn; } - ret = udma_get_jfc_buf(dev, &ucmd, udata, jfc); + ret = udata ? udma_alloc_u_cq(dev, &ucmd, jfc) : udma_alloc_k_cq(dev, jfc); if (ret) goto err_get_jfc_buf; @@ -387,7 +400,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, err_alloc_cqc: jfc->base.uctx = (udata == NULL ? NULL : udata->uctx); - udma_free_jfc_buf(dev, jfc); + udma_free_cq(dev, jfc); err_get_jfc_buf: xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); __xa_erase(&dev->jfc_table.xa, jfc->jfcn); @@ -497,7 +510,7 @@ int udma_destroy_jfc(struct ubcore_jfc *jfc) if (dfx_switch) udma_dfx_delete_id(dev, &dev->dfx_info->jfc, jfc->id); - udma_free_jfc_buf(dev, ujfc); + udma_free_cq(dev, ujfc); udma_id_free(&dev->jfc_table.ida_table, ujfc->jfcn); kfree(ujfc); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 2790ab87982c..6bfc135fa846 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -48,28 +48,20 @@ static int udma_verify_jfr_param(struct udma_dev *dev, static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) { - uint32_t rqe_buf_size; uint32_t idx_buf_size; - uint32_t sge_per_wqe; int ret; - sge_per_wqe = min(jfr->max_sge, dev->caps.jfr_sge); - jfr->rq.buf.entry_size = UDMA_SGE_SIZE * sge_per_wqe; + jfr->rq.buf.entry_size = UDMA_SGE_SIZE * min(jfr->max_sge, dev->caps.jfr_sge); jfr->rq.buf.entry_cnt = jfr->wqe_cnt; - rqe_buf_size = jfr->rq.buf.entry_size * jfr->rq.buf.entry_cnt; - - ret = udma_alloc_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); + ret = udma_k_alloc_buf(dev, &jfr->rq.buf); if (ret) { - dev_err(dev->dev, - "failed to alloc rq buffer for jfr when buffer size = %u.\n", - rqe_buf_size); + dev_err(dev->dev, "failed to alloc rq buffer, id=%u.\n", jfr->rq.id); return ret; } jfr->idx_que.buf.entry_size = UDMA_IDX_QUE_ENTRY_SZ; jfr->idx_que.buf.entry_cnt = jfr->wqe_cnt; idx_buf_size = jfr->idx_que.buf.entry_size * jfr->idx_que.buf.entry_cnt; - ret = udma_alloc_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); if (ret) { dev_err(dev->dev, @@ -100,24 +92,22 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) err_wrid: udma_free_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); err_idx_que: - udma_free_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); + udma_k_free_buf(dev, &jfr->rq.buf); return -ENOMEM; } -static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, - struct ubcore_udata *udata, +static int udma_jfr_get_u_cmd(struct udma_dev *dev, struct ubcore_udata *udata, struct udma_create_jetty_ucmd *ucmd) { unsigned long byte; - int ret; if (!udata->udrv_data) { dev_err(dev->dev, "jfr udata udrv_data is null.\n"); return -EINVAL; } - if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len != sizeof(*ucmd)) { dev_err(dev->dev, "jfr in_len %u or addr is invalid.\n", udata->udrv_data->in_len); return -EINVAL; @@ -131,14 +121,41 @@ static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, return -EFAULT; } - if (!ucmd->non_pin) { + return 0; +} + +static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + int ret; + + ret = udma_jfr_get_u_cmd(dev, udata, ucmd); + if (ret) + return ret; + + jfr->udma_ctx = to_udma_context(udata->uctx); + if (ucmd->non_pin) { + jfr->rq.buf.addr = ucmd->buf_addr; + } else if (ucmd->is_hugepage) { + jfr->rq.buf.addr = ucmd->buf_addr; + if (udma_occupy_u_hugepage(jfr->udma_ctx, (void *)jfr->rq.buf.addr)) { + dev_err(dev->dev, "failed to create rq, va not map.\n"); + return -EINVAL; + } + jfr->rq.buf.is_hugepage = true; + } else { ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfr->rq.buf); if (ret) { dev_err(dev->dev, "failed to pin jfr rqe buf addr, ret = %d.\n", ret); return ret; } + } + if (ucmd->non_pin) { + jfr->idx_que.buf.addr = ucmd->idx_addr; + } else { ret = pin_queue_addr(dev, ucmd->idx_addr, ucmd->idx_len, &jfr->idx_que.buf); if (ret) { @@ -146,12 +163,8 @@ static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, "failed to pin jfr idx que addr, ret = %d.\n", ret); goto err_pin_idx_buf; } - } else { - jfr->rq.buf.addr = ucmd->buf_addr; - jfr->idx_que.buf.addr = ucmd->idx_addr; } - jfr->udma_ctx = to_udma_context(udata->uctx); jfr->sw_db.db_addr = ucmd->db_addr; jfr->jfr_sleep_buf.db_addr = ucmd->jfr_sleep_buf; @@ -181,7 +194,10 @@ static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, err_pin_sw_db: unpin_queue_addr(jfr->idx_que.buf.umem); err_pin_idx_buf: - unpin_queue_addr(jfr->rq.buf.umem); + if (ucmd->is_hugepage) + udma_return_u_hugepage(jfr->udma_ctx, (void *)jfr->rq.buf.addr); + else + unpin_queue_addr(jfr->rq.buf.umem); return ret; } @@ -205,13 +221,15 @@ static void udma_put_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) udma_unpin_sw_db(jfr->udma_ctx, &jfr->jfr_sleep_buf); udma_unpin_sw_db(jfr->udma_ctx, &jfr->sw_db); unpin_queue_addr(jfr->idx_que.buf.umem); - unpin_queue_addr(jfr->rq.buf.umem); + if (jfr->rq.buf.is_hugepage) + udma_return_u_hugepage(jfr->udma_ctx, (void *)jfr->rq.buf.addr); + else + unpin_queue_addr(jfr->rq.buf.umem); return; } if (jfr->rq.buf.kva) { - size = jfr->rq.buf.entry_cnt * jfr->rq.buf.entry_size; - udma_free_normal_buf(dev, size, &jfr->rq.buf); + udma_k_free_buf(dev, &jfr->rq.buf); udma_free_sw_db(dev, &jfr->sw_db); } diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index fd4e6e025077..7277db44da12 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -21,7 +21,7 @@ int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct udma_create_jetty_ucmd *ucmd) { - int ret; + int ret = 0; if (ucmd->sqe_bb_cnt == 0 || ucmd->buf_len == 0) { dev_err(dev->dev, "invalid param, sqe_bb_cnt=%u, buf_len=%u.\n", @@ -33,17 +33,22 @@ int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, sq->buf.entry_cnt = ucmd->buf_len >> WQE_BB_SIZE_SHIFT; if (sq->non_pin) { sq->buf.addr = ucmd->buf_addr; + } else if (ucmd->is_hugepage) { + sq->buf.addr = ucmd->buf_addr; + if (udma_occupy_u_hugepage(sq->udma_ctx, (void *)sq->buf.addr)) { + dev_err(dev->dev, "failed to create sq, va not map.\n"); + return -EINVAL; + } + sq->buf.is_hugepage = true; } else { ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &sq->buf); if (ret) { - dev_err(dev->dev, - "failed to pin jetty/jfs queue addr, ret = %d.\n", - ret); + dev_err(dev->dev, "failed to pin sq, ret = %d.\n", ret); return ret; } } - return 0; + return ret; } int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, @@ -71,19 +76,18 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; - ret = udma_alloc_normal_buf(dev, size, &sq->buf); + ret = udma_k_alloc_buf(dev, &sq->buf); if (ret) { - dev_err(dev->dev, - "failed to alloc jetty (%u) sq buf when size = %u.\n", sq->id, size); + dev_err(dev->dev, "failed to alloc sq buffer, id=%u.\n", sq->id); return ret; } sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); if (!sq->wrid) { - udma_free_normal_buf(dev, size, &sq->buf); dev_err(dev->dev, "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", sq->id, sq->buf.entry_cnt); + udma_k_free_buf(dev, &sq->buf); return -ENOMEM; } @@ -95,18 +99,20 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) { - uint32_t size; - if (sq->buf.kva) { - size = sq->buf.entry_cnt * sq->buf.entry_size; - udma_free_normal_buf(dev, size, &sq->buf); + udma_k_free_buf(dev, &sq->buf); kfree(sq->wrid); return; } + if (sq->non_pin) return; - unpin_queue_addr(sq->buf.umem); + if (sq->buf.is_hugepage) { + udma_return_u_hugepage(sq->udma_ctx, (void *)sq->buf.addr); + } else { + unpin_queue_addr(sq->buf.umem); + } } void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, @@ -277,6 +283,7 @@ static int udma_get_user_jfs_cmd(struct udma_dev *dev, struct udma_jfs *jfs, } uctx = to_udma_context(udata->uctx); + jfs->sq.udma_ctx = uctx; jfs->sq.tid = uctx->tid; jfs->jfs_addr = ucmd->jetty_addr; jfs->pi_type = ucmd->pi_type; @@ -292,7 +299,7 @@ static int udma_get_user_jfs_cmd(struct udma_dev *dev, struct udma_jfs *jfs, } static int udma_alloc_jfs_sq(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, - struct udma_jfs *jfs, struct ubcore_udata *udata) + struct udma_jfs *jfs, struct ubcore_udata *udata) { struct udma_create_jetty_ucmd ucmd = {}; int ret; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index a6964f3ab878..cbf773d01c48 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -487,7 +487,6 @@ static void udma_get_jetty_id_range(struct udma_dev *udma_dev, static int query_caps_from_firmware(struct udma_dev *udma_dev) { -#define RC_QUEUE_ENTRY_SIZE 128 struct udma_cmd_ue_resource cmd = {}; int ret; @@ -514,10 +513,6 @@ static int query_caps_from_firmware(struct udma_dev *udma_dev) udma_get_jetty_id_range(udma_dev, &cmd); - udma_dev->caps.rc_queue_num = cmd.rc_queue_num; - udma_dev->caps.rc_queue_depth = cmd.rc_depth; - udma_dev->caps.rc_entry_size = RC_QUEUE_ENTRY_SIZE; - udma_dev->caps.feature = cmd.cap_info; udma_dev->caps.ue_cnt = cmd.ue_cnt >= UDMA_DEV_UE_NUM ? UDMA_DEV_UE_NUM - 1 : cmd.ue_cnt; @@ -581,9 +576,24 @@ static int udma_construct_qos_param(struct udma_dev *dev) return 0; } +static void cal_max_2m_num(struct udma_dev *dev) +{ + uint32_t jfs_pg = ALIGN(dev->caps.jfs.depth * MAX_WQEBB_IN_SQE * + UDMA_JFS_WQEBB_SIZE, UDMA_HUGEPAGE_SIZE) >> UDMA_HUGEPAGE_SHIFT; + uint32_t jfr_pg = ALIGN(dev->caps.jfr.depth * dev->caps.jfr_sge * + UDMA_SGE_SIZE, UDMA_HUGEPAGE_SIZE) >> UDMA_HUGEPAGE_SHIFT; + uint32_t jfc_pg = ALIGN(dev->caps.jfc.depth * dev->caps.cqe_size, + UDMA_HUGEPAGE_SIZE) >> UDMA_HUGEPAGE_SHIFT; + + dev->total_hugepage_num = + (dev->caps.jetty.start_idx + dev->caps.jetty.max_cnt) * jfs_pg + + dev->caps.jfr.max_cnt * jfr_pg + dev->caps.jfc.max_cnt * jfc_pg; +} + static int udma_set_hw_caps(struct udma_dev *udma_dev) { #define MAX_MSG_LEN 0x10000 +#define RC_QUEUE_ENTRY_SIZE 64 struct ubase_adev_caps *a_caps; uint32_t jetty_grp_cnt; int ret; @@ -609,6 +619,14 @@ static int udma_set_hw_caps(struct udma_dev *udma_dev) udma_dev->caps.jetty.start_idx = a_caps->jfs.start_idx; udma_dev->caps.jetty.next_idx = udma_dev->caps.jetty.start_idx; udma_dev->caps.cqe_size = UDMA_CQE_SIZE; + udma_dev->caps.rc_queue_num = a_caps->rc_max_cnt; + udma_dev->caps.rc_queue_depth = a_caps->rc_que_depth; + udma_dev->caps.rc_entry_size = RC_QUEUE_ENTRY_SIZE; + udma_dev->caps.rc_dma_len = a_caps->pmem.dma_len; + udma_dev->caps.rc_dma_addr = a_caps->pmem.dma_addr; + + cal_max_2m_num(udma_dev); + ret = udma_construct_qos_param(udma_dev); if (ret) return ret; @@ -657,11 +675,14 @@ static int udma_init_dev_param(struct udma_dev *udma_dev) for (i = 0; i < UDMA_DB_TYPE_NUM; i++) INIT_LIST_HEAD(&udma_dev->db_list[i]); + udma_init_hugepage(udma_dev); + return 0; } static void udma_uninit_dev_param(struct udma_dev *udma_dev) { + udma_destroy_hugepage(udma_dev); mutex_destroy(&udma_dev->db_mutex); dev_set_drvdata(&udma_dev->comdev.adev->dev, NULL); udma_destroy_tables(udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_rct.c b/drivers/ub/urma/hw/udma/udma_rct.c index 599c80c118fd..ee11d3ef3ee9 100644 --- a/drivers/ub/urma/hw/udma/udma_rct.c +++ b/drivers/ub/urma/hw/udma/udma_rct.c @@ -51,13 +51,50 @@ static int udma_destroy_rc_queue_ctx(struct udma_dev *dev, struct udma_rc_queue return ret; } +static int udma_alloc_rct_buffer(struct udma_dev *dev, struct ubcore_device_cfg *cfg, + struct udma_rc_queue *rcq) +{ + uint32_t rct_buffer_size = dev->caps.rc_entry_size * cfg->rc_cfg.depth; + uint32_t buf_num_per_hugepage; + + rcq->buf.entry_size = dev->caps.rc_entry_size; + rcq->buf.entry_cnt = cfg->rc_cfg.depth; + if (ubase_adev_prealloc_supported(dev->comdev.adev)) { + rct_buffer_size = ALIGN(rct_buffer_size, PAGE_SIZE); + if (rct_buffer_size > UDMA_HUGEPAGE_SIZE) { + rcq->buf.addr = dev->caps.rc_dma_addr + rcq->id * rct_buffer_size; + } else { + buf_num_per_hugepage = UDMA_HUGEPAGE_SIZE / rct_buffer_size; + rcq->buf.addr = dev->caps.rc_dma_addr + + rcq->id / buf_num_per_hugepage * UDMA_HUGEPAGE_SIZE + + rcq->id % buf_num_per_hugepage * rct_buffer_size; + } + } else { + rcq->buf.kva_or_slot = udma_alloc_iova(dev, rct_buffer_size, &rcq->buf.addr); + if (!rcq->buf.kva_or_slot) { + dev_err(dev->dev, "failed to alloc rct buffer.\n"); + return -ENOMEM; + } + } + + return 0; +} + +static void udma_free_rct_buffer(struct udma_dev *dev, struct udma_rc_queue *rcq) +{ + uint32_t rct_buffer_size = rcq->buf.entry_size * rcq->buf.entry_cnt; + + if (!ubase_adev_prealloc_supported(dev->comdev.adev)) { + udma_free_iova(dev, rct_buffer_size, rcq->buf.kva_or_slot, rcq->buf.addr); + rcq->buf.kva_or_slot = NULL; + rcq->buf.addr = 0; + } +} + static int udma_alloc_rc_queue(struct udma_dev *dev, struct ubcore_device_cfg *cfg, int rc_queue_id) { - uint32_t rcq_entry_size = dev->caps.rc_entry_size; - uint32_t rcq_entry_num = cfg->rc_cfg.depth; struct udma_rc_queue *rcq; - uint32_t size; int ret; rcq = kzalloc(sizeof(struct udma_rc_queue), GFP_KERNEL); @@ -65,15 +102,9 @@ static int udma_alloc_rc_queue(struct udma_dev *dev, return -ENOMEM; rcq->id = rc_queue_id; - size = rcq_entry_size * rcq_entry_num; - rcq->buf.kva_or_slot = udma_alloc_iova(dev, size, &rcq->buf.addr); - if (!rcq->buf.kva_or_slot) { - ret = -ENOMEM; - dev_err(dev->dev, "failed to alloc rc queue buffer.\n"); - goto err_alloc_rcq; - } - rcq->buf.entry_size = rcq_entry_size; - rcq->buf.entry_cnt = rcq_entry_num; + ret = udma_alloc_rct_buffer(dev, cfg, rcq); + if (ret) + goto err_alloc_rct_buffer; ret = udma_create_rc_queue_ctx(dev, rcq); if (ret) { @@ -101,10 +132,8 @@ static int udma_alloc_rc_queue(struct udma_dev *dev, dev_err(dev->dev, "udma destroy rc queue ctx failed when alloc rc queue.\n"); err_create_rcq_ctx: - udma_free_iova(dev, size, rcq->buf.kva_or_slot, rcq->buf.addr); - rcq->buf.kva_or_slot = NULL; - rcq->buf.addr = 0; -err_alloc_rcq: + udma_free_rct_buffer(dev, rcq); +err_alloc_rct_buffer: kfree(rcq); return ret; @@ -131,10 +160,7 @@ void udma_free_rc_queue(struct udma_dev *dev, int rc_queue_id) if (dfx_switch) udma_dfx_delete_id(dev, &dev->dfx_info->rc, rc_queue_id); - udma_free_iova(dev, rcq->buf.entry_size * rcq->buf.entry_cnt, - rcq->buf.kva_or_slot, rcq->buf.addr); - rcq->buf.kva_or_slot = NULL; - rcq->buf.addr = 0; + udma_free_rct_buffer(dev, rcq); kfree(rcq); } diff --git a/include/uapi/ub/urma/udma/udma_abi.h b/include/uapi/ub/urma/udma/udma_abi.h index 02440d162c8d..5859f5254b5e 100644 --- a/include/uapi/ub/urma/udma/udma_abi.h +++ b/include/uapi/ub/urma/udma/udma_abi.h @@ -74,7 +74,8 @@ struct udma_create_jetty_ucmd { __aligned_u64 jetty_addr; __u32 pi_type : 1; __u32 non_pin : 1; - __u32 rsv : 30; + __u32 is_hugepage : 1; + __u32 rsv : 29; __u32 jetty_type; __aligned_u64 jfr_sleep_buf; __u32 jfs_id; @@ -86,6 +87,9 @@ struct udma_create_jfc_ucmd { __u32 buf_len; __u32 mode; /* 0: normal, 1: user stars, 2: kernel stars */ __aligned_u64 db_addr; + __u32 is_hugepage : 1; + __u32 rsv : 31; + __u32 rsv1; }; struct udma_create_ctx_resp { @@ -93,7 +97,8 @@ struct udma_create_ctx_resp { __u32 dwqe_enable : 1; __u32 reduce_enable : 1; __u32 dump_aux_info : 1; - __u32 rsv : 21; + __u32 hugepage_enable : 1; + __u32 rsv : 20; __u32 ue_id; __u32 chip_id; __u32 die_id; @@ -109,6 +114,7 @@ struct udma_create_jfr_resp { enum db_mmap_type { UDMA_MMAP_JFC_PAGE, UDMA_MMAP_JETTY_DSQE, + UDMA_MMAP_HUGEPAGE, }; enum { -- Gitee From ae4c6bec332616b85ed2a31475552936277150b2 Mon Sep 17 00:00:00 2001 From: Liming An Date: Thu, 27 Nov 2025 19:12:05 +0800 Subject: [PATCH 140/243] iommu/ummu: Add UMMU documentation description commit 3a2ebcdd4d6c72fcd64521ffcfe496b8950be8d0 openEuler This patch add ummu documentation description Signed-off-by: Sihui Jiang Signed-off-by: Jingbin Wu Signed-off-by: Yanlong Zhu Signed-off-by: Liming An --- .../sysfs-class-iommu-ummu-bypass-mpam | 31 +++ .../ABI/testing/sysfs-class-iommu-ummu-iommu | 113 ++++++++++ .../testing/sysfs-class-iommu-ummu-uotr-mpam | 31 +++ .../testing/sysfs-devices-platform-ummu_vdev | 19 ++ Documentation/admin-guide/perf/ummu-pmu.rst | 112 ++++++++++ Documentation/driver-api/ub/index.rst | 1 + Documentation/driver-api/ub/ummu-core.rst | 7 + Documentation/ub/index.rst | 1 + Documentation/ub/ummu/index.rst | 12 + Documentation/ub/ummu/ummu-core.rst | 128 +++++++++++ Documentation/ub/ummu/ummu.rst | 134 ++++++++++++ Documentation/userspace-api/ummu_core.rst | 103 +++++++++ include/linux/ummu_core.h | 206 +++++++++++++++--- 13 files changed, 863 insertions(+), 35 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam create mode 100644 Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu create mode 100644 Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam create mode 100644 Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev create mode 100644 Documentation/admin-guide/perf/ummu-pmu.rst create mode 100644 Documentation/driver-api/ub/ummu-core.rst create mode 100644 Documentation/ub/ummu/index.rst create mode 100644 Documentation/ub/ummu/ummu-core.rst create mode 100644 Documentation/ub/ummu/ummu.rst create mode 100644 Documentation/userspace-api/ummu_core.rst diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam b/Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam new file mode 100644 index 000000000000..c28753fb8b7d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam @@ -0,0 +1,31 @@ +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_partid +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The partID value used by the MPAM function in the bypass UMMU + scenario. Format: %x. + +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_pmg +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The PMG value used by the MPAM function in the bypass UMMU scenario. + Format: %x. + +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_run +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + After setting the bp_partid and bp_pmg values, write 1 to bp_run to + apply these values to the UMMU device. These values define the IO + regions that bypass the UMMU device in the bypass UMMU scenario. + +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_mpam_info +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Retrieve the currently active MPAM configuration from the UMMU device. diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu new file mode 100644 index 000000000000..48ba4d6d4c60 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu @@ -0,0 +1,113 @@ +What: /sys/class/iommu/ummu./ummu-iommu/eid_list +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + List of all EIDs registered to UMMU. + +What: /sys/class/iommu/ummu./ummu-iommu/evtq_log2num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The number of Event Queues in the non-secure state of the + UMMU device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/evtq_log2size +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The depth of each Event Queue in the non-secure state of the + UMMU device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/features +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + UMMU device capabilities. + +What: /sys/class/iommu/ummu./ummu-iommu/mcmdq_log2num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The number of Command Queues in the non-secure state of the + UMMU device in kernel mode. + +What: /sys/class/iommu/ummu./ummu-iommu/mcmdq_log2size +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The depth of each command queue in the non-secure state of the + UMMU device in kernel mode. + +What: /sys/class/iommu/ummu./ummu-iommu/permq_ent_num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The depth of the permission queue in user mode of the UMMU + device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/permq_num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The number of permission queues in user mode of the UMMU + device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/ias +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The bit width of the input address supported by the UMMU + device. + +What: /sys/class/iommu/ummu./ummu-iommu/oas +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The bit width of the output address of the UMMU device. + +What: /sys/class/iommu/ummu./ummu-iommu/options +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Configurable features of the UMMU device. + +What: /sys/class/iommu/ummu./ummu-iommu/pgsize_bitmap +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Supported page size bitmap of the UMMU translation table. + +What: /sys/class/iommu/ummu./ummu-iommu/ptsize_bitmap +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Supported page size bitmap of the UMMU MAPT table + (permission check). + +What: /sys/class/iommu/ummu./ummu-iommu/tid_bits +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Maximum TokenID bit width supported in non-secure state. + +What: /sys/class/iommu/ummu./ummu-iommu/tid_type +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The domain_type corresponding to the TokenID, which requires + the TokenID value as input. Format: %x. diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam b/Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam new file mode 100644 index 000000000000..8bbe9d65c7d9 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam @@ -0,0 +1,31 @@ +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_partid +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The PartID value used by the MPAM function to tag UMMU-initiated + traffic. Format: %x. + +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_pmg +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The PMG value used by the MPAM function to tag UMMU-initiated traffic. + Format: %x. + +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_run +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + After setting the uotr_partid and uotr_pmg values, write 1 to uotr_run + to apply them to the UMMU device. These values tag I/O traffic initiated + by the UMMU itself. + +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_mpam_info +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Retrieve the MPAM configuration last applied to the UMMU device. diff --git a/Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev b/Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev new file mode 100644 index 000000000000..2812b512c20e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev @@ -0,0 +1,19 @@ +What: /sys/devices/platform/ummu_tid_root/logic_ummu/ummu_vdev..auto/ummu-vdev-attr/tid_mode +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Yanlong Zhu +Description: + (RO) Displays the MAPT mode of Token ID. Format: %d. Allowed values: + + == =========================================== + 0 Token ID is table mode. + 1 Token ID is entry mode. + 2 Fatal error occurred. + == =========================================== + +What: /sys/devices/platform/ummu_tid_root/logic_ummu/ummu_vdev..auto/ummu-vdev-attr/tid_val +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Yanlong Zhu +Description: + (RO) Displays the value of Token ID. Format: %u. diff --git a/Documentation/admin-guide/perf/ummu-pmu.rst b/Documentation/admin-guide/perf/ummu-pmu.rst new file mode 100644 index 000000000000..5447f8c61ac6 --- /dev/null +++ b/Documentation/admin-guide/perf/ummu-pmu.rst @@ -0,0 +1,112 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +====================================== +UMMU Performance Monitoring Unit (PMU) +====================================== + +The UMMU includes a Performance Monitor Unit (PMU) to track and collect +statistics on key hardware events, such as TLB/PLB cache hit rates and +lookup latencies. By leveraging the Linux kernel's perf subsystem, the +collected event data can be efficiently processed, analyzed, and +visualized to drive targeted optimizations for UMMU performance. + +Usage +===== + +Basic usage follows the standard Linux kernel perf interface. The UMMU +device supports the following PMU events, which are exposed under the perf +event directory: + +.. code-block:: bash + + ls -l /sys/bus/event_source/devices/ummu_pmcg_0/events + +Constraints +=========== + +- No more than 8 events can be monitored at the same time. + +UMMU PMU Events +=============== + +.. table:: PMU Events in UMMU and Their Meanings + + +---------------------------------+-------------------------------------------------------------+ + | Event | Meaning | + +=================================+=============================================================+ + | kv_table_rd_average_latency | Average bus latency for reading key-value (KV) and | + | | content-addressable memory (CAM) tables during the | + | | conversion from DstEID to tecte_tag. | + +---------------------------------+-------------------------------------------------------------+ + | swif_cmd_send_num | Command count generated by SWIF. | + +---------------------------------+-------------------------------------------------------------+ + | swif_dvm_sync_latency | Average latency during execution of Sync commands issued | + | | by SWIF DVM. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_ns_sync_latency | Average latency during execution of Sync commands issued | + | | by the SWIF KCMD non-secure queue. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_s_sync_latency | Average latency during execution of Sync commands issued | + | | by the SWIF KCMD secure queue. | + +---------------------------------+-------------------------------------------------------------+ + | swif_ucmd_sync_latency | Average latency during execution of Sync commands issued | + | | by SWIF UCMD. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_plb_cache_hit_rate | The hit rate observed during table lookups in the TBU PLB | + | | table. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_tlb_cache_hit_rate | The hit rate observed during table lookups in the TBU TLB | + | | table. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_cntx_cache_miss_num | The number of cache misses observed in the TCU context | + | | cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_gpc_cache_hit_rate | The hit rate observed during access operations in the | + | | TCU GPC cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_gpc_req_latency | The latency observed during GPC lookup operations in the | + | | TCU GPC module. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_pptw_cache_hit_rate | The hit rate observed during access operations in the | + | | TCU PPTW cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_pptw_req_latency | The latency observed during permission-based PTW lookup | + | | operations in the TCU PPTW module. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_pptw_req_num | PPTW Request Count. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_tptw_cache_hit_rate | The hit rate observed during access operations in the | + | | TCU TPTW cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_tptw_req_latency | The latency observed during PTW lookup operations in the | + | | TCU TPTW module. | + +---------------------------------+-------------------------------------------------------------+ + | ubif_kv_cache_hit_rate | The hit rate observed during access operations in the | + | | UBIF KV cache. | + +---------------------------------+-------------------------------------------------------------+ + | ummu_req_average_latency | The average latency of table lookup requests during system | + | | operation. | + +---------------------------------+-------------------------------------------------------------+ + | ummu_req_rate | The rate of table lookup requests during system operation. | + +---------------------------------+-------------------------------------------------------------+ + | ummu_rsp_rate | The rate of table lookup results during system operation. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_ptw_pack_rate | The rate of address translation table lookup requests sent | + | | by TBU RAB to TCU. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_pptw_pack_rate | The rate of permission table lookup requests sent by TBU | + | | RAB to TCU. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_ptw_latency | Average end-to-end latency of PTW requests from TBU RAB. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_pptw_latency | Average end-to-end latency of PPTW requests from TBU RAB. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_rab_buf_use_rate | Buffer utilization rate of TBU RAB. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_gpc_sync_latency | Average execution latency of Sync commands issued by the | + | | SWIF KCMD GPC queue, excluding those via the DVM interface. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_realm_sync_latency | Average execution latency of Sync commands issued by the | + | | SWIF KCMD REALM queue, excluding those via the DVM | + | | interface. | + +---------------------------------+-------------------------------------------------------------+ diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst index 5738694649be..0f9472ba6451 100644 --- a/Documentation/driver-api/ub/index.rst +++ b/Documentation/driver-api/ub/index.rst @@ -15,5 +15,6 @@ The Linux UnifiedBus implementer's API guide ubfi ubus + ummu-core ubase cdma diff --git a/Documentation/driver-api/ub/ummu-core.rst b/Documentation/driver-api/ub/ummu-core.rst new file mode 100644 index 000000000000..7bd07e0e1aff --- /dev/null +++ b/Documentation/driver-api/ub/ummu-core.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +UMMU Core Support Library +--------------------------- + +.. kernel-doc:: include/linux/ummu_core.h + :functions: diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 0a3973b98512..c9366b0608dc 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -14,4 +14,5 @@ UnifiedBus Subsystem ubase/index ubfi/index ubus/index + ummu-core cdma/index diff --git a/Documentation/ub/ummu/index.rst b/Documentation/ub/ummu/index.rst new file mode 100644 index 000000000000..21360586e1f4 --- /dev/null +++ b/Documentation/ub/ummu/index.rst @@ -0,0 +1,12 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +======================= +UB UMMU +======================= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ummu + ummu-core diff --git a/Documentation/ub/ummu/ummu-core.rst b/Documentation/ub/ummu/ummu-core.rst new file mode 100644 index 000000000000..6a16bbaa641f --- /dev/null +++ b/Documentation/ub/ummu/ummu-core.rst @@ -0,0 +1,128 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +====================================== +UMMU-CORE +====================================== + +:Authors: - Yanlong Zhu + +Introduction +============ +The Unified Bus Memory Management Unit (abbreviated as UMMU) is a component +that provides memory address mapping and access permission verification +during memory access processes. +It supports the sharing of memory resources between UBPU (UB Processing Units) +and ensures legitimate access to memory. + +The UMMU-Core is designed to work with the Linux IOMMU framework, as an +extension, providing the necessary interfaces to integrate with the system. +To maintain flexibility in deployment, the UMMU-Core can be compiled as a +loadable kernel module or built-in kernel image. + +EID Management +-------------- + +UMMU uses the following inputs — DstEID, TokenID and UBA (Unified Bus Address) — +to determine whether the entity is valid and which address domain it should access. + +Every UB entity must register its EID (Entity ID) with the UB domain to +communicate with other entities. UMMU-Core provides :c:func:`ummu_core_add_eid()` +and :c:func:`ummu_core_del_eid()` functions to manage EID. + +In some cases, UB devices may register before all UMMU devices. To handle +this, we designed an EID cached list to temporary save EIDs. Upon an UMMU +device register as global core device, the UMMU-Core will flushes the EID +cached list to it. Thread safety is guaranteed by the UMMU-Core. For +detailed information, refer to the `UB-Base-Specification-2.0`_. + +.. _UB-Base-Specification-2.0: https://www.unifiedbus.com/ + +TokenID Management +------------------ + +Each UB entity has multiple address spaces, such as DMA space, SVA space, +and others. The TokenID identifies the address space associated with each entity. + +The UMMU-Core introduces tdev (TID Device), a pseudo-device used to abstract +the concept of TID. It also supports UMMU driver functionality, enabling driver +management. The tdev can be used to allocate and grant memory address spaces. +When tdev is released, all associated resources will be freed. + +UMMU-Core acts as the TID manager in the UB system, offering TID allocation +strategies and TID allocation APIs to the UMMU driver. + +UMMU-Core supports multiple TID allocation strategies: + +- TRANSPARENT: + The TID is compatible with the global PASID (Process Address Space ID), + enabling seamless integration with system-wide address space management. +- ASSIGNED: + A pre-allocated TID, assigned from an external framework or management system. +- NORMAL: + The default TID allocation strategy, suitable for the majority of use cases. + +UMMU Device Registration +------------------------ + +The UMMU device registration is performed in two steps. An UMMU device +must implement the `ummu_core_device` interface and initialize it using +:c:func:`ummu_core_device_init()` function. This function initializes +the core device and allocates a dedicated TID manager to handle TID operations. + +Multiple UMMU devices can register to UMMU-Core by :c:func:`ummu_core_device_register()` +function. However, only global core device can take the charge of all UB device requests, +such as :c:func:`add_eid()` and :c:func:`del_eid()` functions. + +.. code-block:: none + + +-------------------+ + | IOMMU Framework | + +---------+---------+ + | + +----------+---------+ + | Global Core Device | + +----------+---------+ + | + +------------------------+-----------+-----------+------------------------+ + | | | | + +-------------------+ +-------------------+ +-------------------+ +-------------------+ + | Core Device 0 | | Core Device 1 | | Core Device 2 | ... | Core Device x | + +-------------------+ +-------------------+ +-------------------+ +-------------------+ + +Support KSVA mode +----------------- + +The KSVA (Kernel-space Shared Virtual Addressing) is not supported in the +current IOMMU framework, as it maps the entire kernel address space to +devices, which may cause critical errors. + +By leveraging isolated address space IDs and fine-grained permission controls, +we can restrict each device only access to the authorized address space +with KSVA mode. + +To manage the access permissions of each PASID, the IOMMU can implement a +permission checking mechanism. We abstract the permission management +operations into four fundamental types: + +- grant: + Grant access to a specified memory address range with defined + permissions (e.g., read, write, execute). +- ungrant: + Revoke previously granted access to a memory address range, invalidating + the device's permissions for that region. +- plb_sync_all: + Synchronize the PLB (Permission Lookaside Buffer) for all registered + PASIDs, ensuring global consistency of permission state across the IOMMU. +- plb_sync: + Synchronize the PLB for a specific PASID and memory range, minimizing + latency while maintaining access control integrity. + +These operations are integrated into the `iommu_domain` as part of the +`iommu_perm_ops` interface. + +UMMU SVA maintains a set of permission tables and page tables for each TID. +These resources can be allocated via the :c:func:`alloc_tid()` operation. +Once a TID is assigned, read and write permissions for the specific virtual +memory address ranges can be granted or ungranted. + +To access granted memory address ranges, permission verification is required. diff --git a/Documentation/ub/ummu/ummu.rst b/Documentation/ub/ummu/ummu.rst new file mode 100644 index 000000000000..b4f39749ff79 --- /dev/null +++ b/Documentation/ub/ummu/ummu.rst @@ -0,0 +1,134 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +=========== +UMMU Driver +=========== + +UMMU Functionality +================== + +The UMMU driver implements IOMMU functionality, enabling address +translation and access control for DMA transactions initiated by +peripheral devices. + +UMMU plays a critical role in system virtualization, device isolation, +and secure DMA address translation. + +In Shared Virtual Addressing (SVA) scenarios, UMMU enforces permission +checks to protect data within the shared address space, ensuring access +integrity and confidentiality. + +UMMU performs address translation and permission checking using input +parameters derived from the UB Memory Descriptor (EID + TokenID + UBA). + +For detailed information on the UB Memory Descriptor format and semantics, +refer to the `UB-Base-Specification-2.0`_. + +.. _UB-Base-Specification-2.0: https://www.unifiedbus.com/ + +The functionality of UMMU is primarily organized into the following three +core components: + +Configuration Table Lookup +-------------------------- + +The configuration data for address translation and permission checking in +UMMU is stored in memory and organized into two levels of configuration +tables: TECT (Target Entity Configuration Table) and TCT (Target Context +Table). + +- **TECT (Target Entity Configuration Table)**: + UMMU uses the DstEID to locate the corresponding TECT entry. This entry + primarily contains local entity information and serves as a storage + location for the entry points of the TCT and the Stage 2 address + translation tables. + +- **TCT (Target Context Table)**: + UMMU uses the TokenID to locate the corresponding TCT entry. This entry + describes the address space-level information, which may have a + granularity equal to or finer than that of the process level. The TCT + entry primarily stores the base addresses of the Stage 1 address + translation table and the MAPT (Memory Address Permission Table) used for + SVA mode permission checking. + +Address Translation +------------------- + +UMMU uses the EID and TokenID to locate the corresponding entries in the +TECT (Target Entity Configuration Table) and TCT (Target Context Table). +Based on the configuration table entries, it determines the base address +of the page table. It then uses the UBA and the page table base address to +perform the page table entry lookup and complete the address translation. + +In DMA scenarios, UMMU uses separate Stage 1 and Stage 2 translation +tables to support multiple-stage address translation. + +In user-space SVA scenarios, UMMU enables the device to directly access +the process's virtual address space. Similarly, kernel-space SVA allows +the device to access kernel-level virtual memory, enabling efficient data +sharing between the device and the kernel. + +Permission Checking +------------------- + +In SVA scenarios, UMMU performs permission checks to ensure the security +of the address space. + +UMMU performs permission checking in parallel with address translation. +After retrieving the TECT and TCT entries, if permission checking is +enabled for the currently accessed TECT entity, UMMU can obtain the MAPT +(Memory Address Permission Table) entry from the TCT entry. UMMU then +retrieves the permission information for the target memory from the MAPT, +compares it with the permissions specified in the memory access request, +and determines whether the access passes the permission check. + +The permission checking feature enables fine-grained control over memory +segment access, allowing the system to authorize or deauthorize specific +memory regions. It is recommended to enable the permission checking +feature to enforce security policies and protect the SVA address space +from unauthorized access. + +UMMU Driver Initialization +========================== + +When the UMMU driver detects an UMMU-capable platform device, it invokes +the probe function `ummu_device_probe()`. This function identifies the +device's hardware capabilities, allocates queues, configuration tables, +and interrupt handlers, and initializes the associated resources. + +UMMU Device Registration +======================== + +After the UMMU device completes its initialization, it is registered with +the UMMU framework. The UB system supports multiple UMMU devices within a +single chip. The UMMU framework abstracts a Logic UMMU device to uniformly +manage multiple physical UMMU devices. Once wrapped by the framework, the +Logic UMMU is ultimately registered with the IOMMU framework. + +In addition to calling the `struct iommu_ops` registered by individual UMMU +devices, the Logic UMMU leverages the extended operation set `struct +ummu_core_ops` provided by the UMMU framework to uniformly manage all +underlying UMMU device instances. This includes sharing configuration and +page table information across devices, and synchronizing invalidation +operations to ensure consistent table lookup results across the entire +device set. + +.. code-block:: none + + +-------------------+ + | IOMMU Framework | + +-------------------+ + ^ + | + Register + | + +--------------------+ + | UMMU-CORE Framework| + +--------------------+ + ^ + | + Register + | + +----------------+ +----------------+ +----------------+ + | ummu device 0 | | ummu device 1 | ... | ummu device x | + +----------------+ +----------------+ +----------------+ diff --git a/Documentation/userspace-api/ummu_core.rst b/Documentation/userspace-api/ummu_core.rst new file mode 100644 index 000000000000..79d9dd7a5740 --- /dev/null +++ b/Documentation/userspace-api/ummu_core.rst @@ -0,0 +1,103 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. ummu_core: + +======================= +UMMU_CORE Userspace API +======================= + +The UMMU UAPI provides APIs that enable communication between user-space components +and kernel-space components.The primary use case is Shared Virtual Address (SVA). + +.. contents:: :local: + +Functionalities +=============== +Only kernel-mode process expose the APIs. The supported user-kernel APIs +are as follows: + +1. Allocate/Free a TID +2. Send one or more PLBI commands +3. Map or unmap resources, including MAPT block and command queues + +Interfaces +========== +Although the data structures defined in UMMU_CORE UAPI are self-contained, +no user-facing API functions are provided. Instead, UMMU_CORE UAPI is +designed to work with UMMU_CORE driver. + +Upon loading, the UMMU_CORE driver registers a TID device, and sets up its operation function table. +The supported operations include open, release, map, and ioctl. + +Datastructures and Definitions +------------------------------ +1. struct ummu_token_info: stores token information for a shared-memory segment. + + - input: specifies the token generation mode.If input is 0, the tokenVal field is used as the token value. + If input is 1, the UMMU library generates a random token value, and tokenVal is ignored. + - tokenVal: the token value to use when input is 0. + +2. enum ummu_mapt_perm: access permissions for a shared-memory segment + + - MAPT_PERM_W: write only + - MAPT_PERM_R: read only + - MAPT_PERM_RW: read and write + - MAPT_PERM_ATOMIC_W: atomic write only + - MAPT_PERM_ATOMIC_R: atomic read only + - MAPT_PERM_ATOMIC_RW: atomic read and write + +3. enum ummu_mapt_mode: Memory Address Permission Table mode + + - MAPT_MODE_ENTRY: only one memory address segment can be managed per TID. + - MAPT_MODE_TABLE: multiple memory address segments can be managed per TID. + +4. enum ummu_ebit_state: + + - UMMU_EBIT_OFF: disable ebit check + - UMMU_EBIT_ON: enable ebit check + +5. definitions: + + - TID_DEVICE_NAME: a character device that enables user-mode processes to interact + with hardware or software through system calls. + - UMMU_IOCALLOC_TID: operation code for allocating a TID. + - UMMU_IOCFREE_TID: operation code for freeing a TID. + - UMMU_IOCPLBI_VA: operation code to flush the PLB cache for a specific virtual address. + - UMMU_IOCPLBI_ALL: operation code to flush the PLB cache for all virtual addresses. + +Descriptions and Examples +------------------------- +1. allocate/free tid + +The input parameters is *struct ummu_tid_info*.Below is an example: +:: + struct ummu_tid_info info = {}; + + int fd = open("/dev/ummu/tid", O_RDWR | O_CLOEXEC); + ioctl(fd, UMMU_IOCALLOC_TID, &info); + ioctl(fd, UMMU_IOCFREE_TID, &info); + +The PLBI command operation is performed via the ioctl interface, +using the operation codes UMMU_IOCPLBI_VA or UMMU_IOCPLBI_ALL. + +2. map resources + +This interface is used in two scenarios: +(1) Creating a new MAPT block +(2) Initializing user-mode queues + +For example: +:: + mmap(NULL, size, prot, flags, fd, PA); + +On success, this returns a virtual address. + +3. unmap resources + +This interface is used in two scenarios: +(1) Clearing MAPT blocks +(2) When user-mode process exits, all associated MAPT blocks and use-mode queue resources +are cleared. + +For example: +:: + munmap(buf, BLOCK_SIZE_4K); diff --git a/include/linux/ummu_core.h b/include/linux/ummu_core.h index eda283f7b524..29d0952e35e7 100644 --- a/include/linux/ummu_core.h +++ b/include/linux/ummu_core.h @@ -25,18 +25,41 @@ #define UMMU_DEV_READ 2 #define UMMU_DEV_ATOMIC 4 +/** + * enum eid_type - the eid type + * + * @EID_NONE: nommal EID type + * @EID_BYPASS: ummu address translations are bypassed + * @EID_TYPE_MAX: max of eid type + */ enum eid_type { EID_NONE = 0, EID_BYPASS, EID_TYPE_MAX, }; +/** + * enum tid_alloc_mode - tid different allocated mode + * + * @TID_ALLOC_TRANSPARENT: use pasid as tid, no need to assign again + * @TID_ALLOC_ASSIGNED: pre-allocated tid, no need to assign again + * @TID_ALLOC_NORMAL: alloc tid normal + */ enum tid_alloc_mode { TID_ALLOC_TRANSPARENT = 0, TID_ALLOC_ASSIGNED = 1, TID_ALLOC_NORMAL = 2, }; +/** + * enum ummu_resource_type - SVA resource type + * + * @UMMU_BLOCK: mapt block + * @UMMU_QUEUE: permission queue + * @UMMU_QUEUE_LIST: permission queue for multi ummu + * @UMMU_CNT: ummu count + * @UMMU_TID_RES: tid resource + */ enum ummu_resource_type { UMMU_BLOCK, UMMU_QUEUE, @@ -51,6 +74,13 @@ enum default_tid_ops_types { TID_OPS_MAX, }; +/** + * enum ummu_register_type - ummu device register type + * + * @REGISTER_TYPE_GLOBAL: register as the global iommu device + * @REGISTER_TYPE_NORMAL: register to the iommu framework + * @REGISTER_TYPE_MAX: max of ummu device register type + */ enum ummu_register_type { REGISTER_TYPE_GLOBAL, REGISTER_TYPE_NORMAL, @@ -62,6 +92,12 @@ struct ummu_tid_manager; struct ummu_base_domain; struct ummu_core_device; +/** + * struct block_args - param related to mapt block + * @index: mapt block index + * @block_size_order: block size in PAGE_SIZE + * @out_addr: allocated physical address + */ struct block_args { u32 index; int block_size_order; @@ -75,6 +111,12 @@ struct block_args { KABI_RESERVE(6) }; +/** + * struct queue_args - param related to queue + * @pcmdq_base: base address of command queue + * @pcplq_base: base address of completion queue + * @ctrl_page: base address of permission queue + */ struct queue_args { phys_addr_t pcmdq_base; phys_addr_t pcplq_base; @@ -87,6 +129,13 @@ struct queue_args { KABI_RESERVE(5) }; +/** + * struct tid_args - param related to tid + * @pcmdq_order: base address of command queue + * @pcplq_order: base address of completion queue + * @blk_exp_size: block size in PAGE_SIZE + * @hw_cap: cap of hardware + */ struct tid_args { u8 pcmdq_order; u8 pcplq_order; @@ -100,6 +149,16 @@ struct tid_args { KABI_RESERVE(5) }; +/** + * struct resource_args - SVA resource related args + * @type: SVA resource type + * @block: arg related to mapt block + * @queue: arg related to mapt queue for UMMU_QUEUE + * @queues: arg related to mapt queue for UMMU_QUEUE_LIST in multi ummu mode + * @tid_res: tid resource + * @ummu_cnt: return value number of ummu + * @block_index: block index for release + */ struct resource_args { enum ummu_resource_type type; union { @@ -117,6 +176,10 @@ struct resource_args { KABI_RESERVE(3) }; +/** + * struct ummu_param - param related to tid + * @mode: mapt mode: table mode or entry mode + */ struct ummu_param { enum ummu_mapt_mode mode; @@ -129,6 +192,14 @@ struct ummu_param { KABI_RESERVE(7) }; +/** + * struct ummu_tid_param - param related to alloc tid + * @device: device pointer + * @mode: mapt mode: table mode or entry mode + * @alloc_mode: tid alloc mode + * @assign_tid: assigned tid, for TID_ALLOC_TRANSPARENT or TID_ALLOC_ASSIGNED + * @domain_type: more about domain-types in iommu.h + */ struct ummu_tid_param { struct device *device; enum ummu_mapt_mode mode; @@ -143,6 +214,13 @@ struct ummu_tid_param { KABI_RESERVE(5) }; +/** + * struct tdev_attr - attr for tdev + * @name: tdev name + * @dma_attr: dma mode + * @priv: private data pointer + * @priv_len: private data length + */ struct tdev_attr { const char *name; enum dev_dma_attr dma_attr; @@ -189,7 +267,7 @@ struct ummu_core_ops { }; /** - * ummu-core defined iommu device type + * struct ummu_core_device - ummu-core defined iommu device type * @list: used to link all ummu-core devices * @tid_manager: tid domain manager. * @iommu: iommu prototype @@ -212,6 +290,14 @@ struct ummu_core_device { KABI_RESERVE(8) }; +/** + * struct ummu_base_domain - domain info + * @domain: iommu domain + * @core_dev: ummu device + * @parent: point to father domain + * @list: base address of domain list + * @tid: token id + */ struct ummu_base_domain { struct iommu_domain domain; struct ummu_core_device *core_dev; @@ -224,6 +310,14 @@ struct ummu_base_domain { KABI_RESERVE(3) KABI_RESERVE(4) }; + +/** + * struct tid_ops - ummu ops for normal use, expand from iommu_ops + * @alloc_tid_manager: alloc manager for tid + * @free_tid_manager: free all tid and manager for tid + * @alloc_tid: alloc tid func + * @free_tid: free tid func + */ struct tid_ops { struct ummu_tid_manager *(*alloc_tid_manager)( struct ummu_core_device *core_device, u32 min_tid, @@ -239,6 +333,13 @@ struct tid_ops { KABI_RESERVE(4) }; +/** + * struct ummu_tid_manager - assigned tid manager + * @ops: ummu tid ops for normal use, expand from iommu_ops + * @token_ids: xarray of assigned tid + * @min_tid: min tid range for alloc + * @max_tid: max tid range for alloc + */ struct ummu_tid_manager { const struct tid_ops *ops; struct xarray token_ids; @@ -252,6 +353,12 @@ struct ummu_tid_manager { KABI_RESERVE(4) }; +/** + * struct ummu_core_tid_args - tid related args + * @tid_ops: ummu tid ops for normal use, expand from iommu_ops + * @max_tid: max tid range for alloc + * @min_tid: min tid range for alloc + */ struct ummu_core_tid_args { const struct tid_ops *tid_ops; u32 max_tid; @@ -265,6 +372,13 @@ struct ummu_core_tid_args { KABI_RESERVE(6) }; +/** + * struct ummu_core_init_args - ummu core init args + * @core_ops: the ummu device need ummu core ops capability + * @tid_args: parameters related to tid + * @iommu_ops: iommu_ops is mandatory + * @hwdev: related hwdev + */ struct ummu_core_init_args { const struct ummu_core_ops *core_ops; struct ummu_core_tid_args tid_args; @@ -276,7 +390,16 @@ struct ummu_core_init_args { KABI_RESERVE(3) }; -/* Memory traffic monitoring of the UB device */ +/** + * struct ummu_mpam - Memory traffic monitoring of the UB device + * @flags: flags, see constants above + * @eid: entity id + * @tid: tid + * @partid: mpam partition id + * @pmg: mpam pmg + * @s1mpam: 0 for ste mpam, 1 for cd mpam + * @user_mpam_en: 0 for ummu mpam, 1 for user mpam + */ struct ummu_mpam { #define UMMU_DEV_SET_MPAM (1 << 0) #define UMMU_DEV_GET_MPAM (1 << 1) @@ -326,7 +449,7 @@ static inline void tdev_attr_init(struct tdev_attr *attr) #ifdef CONFIG_UB_UMMU_CORE /* EID API */ /** - * Add a new EID to the UMMU. + * ummu_core_add_eid() - Add a new EID to the UMMU. * @guid: entity/device identity. * @eid: entity id to be added. * @type: eid type. @@ -335,7 +458,7 @@ static inline void tdev_attr_init(struct tdev_attr *attr) */ int ummu_core_add_eid(guid_t *guid, eid_t eid, enum eid_type type); /** - * Delete an EID from the UMMU. + * ummu_core_del_eid() - Delete an EID from the UMMU. * @guid: entity/device identity. * @eid: entity id to be deleted. * @type: eid type. @@ -344,7 +467,7 @@ void ummu_core_del_eid(guid_t *guid, eid_t eid, enum eid_type type); /* UMMU IOVA API */ /** - * Allocate a range of IOVA. The input iova size might be aligned. + * dma_alloc_iova() - Allocate a range of IOVA. The input iova size might be aligned. * @dev: related device. * @size: iova size. * @attrs: dma attributes. @@ -358,14 +481,14 @@ struct iova_slot *dma_alloc_iova(struct device *dev, size_t size, size_t *sizep); /** - * Free a range of IOVA. + * dma_free_iova() - Free a range of IOVA. * The API is not thread-safe. * @slot: iova slot, generated from dma_alloc_iova. */ void dma_free_iova(struct iova_slot *slot); /** - * Fill a range of IOVA. It allocates pages and maps pages to the iova. + * ummu_fill_pages() - Fill a range of IOVA. It allocates pages and maps pages to the iova. * The API is not thread-safe. * @slot: iova slot, generated from dma_alloc_iova. * @iova: iova start. @@ -376,7 +499,7 @@ void dma_free_iova(struct iova_slot *slot); int ummu_fill_pages(struct iova_slot *slot, dma_addr_t iova, unsigned long nr_pages); /** - * Drain a range of IOVA. It unmaps iova and releases pages. + * ummu_drain_pages() - Drain a range of IOVA. It unmaps iova and releases pages. * The API is not thread-safe. * @slot: iova slot, generated from dma_alloc_iova. * @iova: iova start. @@ -422,12 +545,15 @@ static inline int ummu_drain_pages(struct iova_slot *slot, dma_addr_t iova, #if IS_ENABLED(CONFIG_UB_UMMU_CORE_DRIVER) /* UMMU SVA API */ /** - * Grant va range permission to sva. + * ummu_sva_grant_range() - Grant va range permission to sva. * @sva: related sva handle. * @va: va start * @size: va size * @perm: permission * @cookie: struct ummu_token_info* + * + * .. code-block:: c + * * if (!cookie) { * do not use cookie check. * } else if (cookie->input == 0) { @@ -437,18 +563,20 @@ static inline int ummu_drain_pages(struct iova_slot *slot, dma_addr_t iova, * } else { * invalid para * } - * * Return: 0 on success, or an error. */ int ummu_sva_grant_range(struct iommu_sva *sva, void *va, size_t size, int perm, void *cookie); /** - * Ungrant va range permission from sva. + * ummu_sva_ungrant_range() - Ungrant va range permission from sva. * @sva: related sva handle. * @va: va start * @size: va size * @cookie: va related cookie,struct ummu_token_info* + * + * .. code-block:: c + * * if (!cookie) { * do not use cookie check. * } else { @@ -461,7 +589,7 @@ int ummu_sva_ungrant_range(struct iommu_sva *sva, void *va, size_t size, void *cookie); /** - * Get tid from dev or sva. + * ummu_get_tid() - Get tid from dev or sva. * @dev: related device. * @sva: if sva is set, return sva mode related tid; otherwise * return the dma mode tid. @@ -472,7 +600,7 @@ int ummu_sva_ungrant_range(struct iommu_sva *sva, void *va, size_t size, int ummu_get_tid(struct device *dev, struct iommu_sva *sva, u32 *tidp); /** - * Get iommu_domain by tid and dev. + * ummu_core_get_domain_by_tid() - Get iommu_domain by tid and dev. * @dev: related device. * @tid: tid * @@ -482,7 +610,7 @@ struct iommu_domain *ummu_core_get_domain_by_tid(struct device *dev, u32 tid); /** - * Check whether the UMMU works in ksva mode. + * ummu_is_ksva() - Check whether the UMMU works in ksva mode. * @domain: related iommu domain * * Return: true or false. @@ -490,7 +618,7 @@ struct iommu_domain *ummu_core_get_domain_by_tid(struct device *dev, bool ummu_is_ksva(struct iommu_domain *domain); /** - * Check whether the UMMU works in sva mode. + * ummu_is_sva() - Check whether the UMMU works in sva mode. * @domain: related iommu domain * * Return: true or false. @@ -498,10 +626,13 @@ bool ummu_is_ksva(struct iommu_domain *domain); bool ummu_is_sva(struct iommu_domain *domain); /** - * Bind device to a process mm. + * ummu_sva_bind_device() - Bind device to a process mm. * @dev: related device. * @mm: process memory management. * @drvdata: ummu_param related to tid. + * + * .. code-block:: c + * * if (!drvdata) { * sva is in the bypass mapt mode. * } else { @@ -514,7 +645,7 @@ struct iommu_sva *ummu_sva_bind_device(struct device *dev, struct mm_struct *mm, struct ummu_param *drvdata); /** - * Bind device to kernel mm. + * ummu_ksva_bind_device() - Bind device to kernel mm. * @dev: related device. * @drvdata: ummu_param related to tid. ksva doesn't support bypass mapt. * @@ -527,50 +658,55 @@ void ummu_ksva_unbind_device(struct iommu_sva *handle); /* UMMU CORE API */ /** - * Initialiase ummu core device. + * ummu_core_device_init() - Initialiase ummu core device. * @ummu_core: ummu core device. * @args: ummu core init args. + * * UMMU driver should carefully choose the args based on its requirement. * iommu_ops is mandatory. * a. the ummu device need tid allocation capability. + * * a.1 default tid strategies satisfy the ummu device * -> set tid_ops form ummu_core_tid_ops[TID_OPS_MAX] * a.2 default tid strategies do not satisfy the ummu device * -> implement a new tid_ops in the driver. + * * b. the ummu device need ummu core ops capability. * -> set core_ops. + * * c. the ummu device has related hwdev. * -> set hwdev. */ int ummu_core_device_init(struct ummu_core_device *ummu_core, struct ummu_core_init_args *args); /** - * Deinitialiase ummu core device. + * ummu_core_device_deinit() - Deinitialiase ummu core device. * @ummu_core: ummu core device. */ void ummu_core_device_deinit(struct ummu_core_device *ummu_core); /** - * Register ummu core device to the ummu framework. + * ummu_core_device_register() - Register ummu core device to the ummu framework. * @ummu_core: ummu core device. * @type: register type. - REGISTER_TYPE_GLOBAL: register the ummu device as the global device, - The ummu device will be the device handle all request. - e.g. 1. add_eid/del_eid 2. provide ubus iommu ops. etc. - - REGISTER_TYPE_NORMAL: follow the iommu_device register. will not be - related to the global device. it work as a normal iommu device. + * + * REGISTER_TYPE_GLOBAL: register the ummu device as the global device, + * The ummu device will be the device handle all request. + * e.g. 1. add_eid/del_eid 2. provide ubus iommu ops. etc. + * + * REGISTER_TYPE_NORMAL: follow the iommu_device register. will not be + * related to the global device. it work as a normal iommu device. */ int ummu_core_device_register(struct ummu_core_device *ummu_core, enum ummu_register_type type); /** - * Unregister ummu core device from the ummu framework. + * ummu_core_device_unregister() - Unregister ummu core device from the ummu framework. * @dev: the ummu_core device tid belongs to. */ void ummu_core_device_unregister(struct ummu_core_device *dev); /** - * Invalidate ummu global configuration by tid. + * ummu_core_invalidate_cfg_table() - Invalidate ummu global configuration by tid. * @tid: tid * Return: 0 on success, or an error. */ @@ -578,7 +714,7 @@ int ummu_core_invalidate_cfg_table(u32 tid); /* UMMU TID API */ /** - * Alloc a tid from ummu framework, and alloc related pasid. + * ummu_core_alloc_tid() - Alloc a tid from ummu framework, and alloc related pasid. * @dev: the allocated tid will be attached to. * @drvdata: ummu_tid_param related to tid * @tidp: the allocated tid returned here. @@ -589,14 +725,14 @@ int ummu_core_alloc_tid(struct ummu_core_device *dev, struct ummu_tid_param *drvdata, u32 *tidp); /** - * Free a tid to ummu framework. + * ummu_core_free_tid() - Free a tid to ummu framework. * @dev: the ummu_core device tid belongs to. * @tid: token id. */ void ummu_core_free_tid(struct ummu_core_device *dev, u32 tid); /** - * Get mapt_mode related to the tid. + * ummu_core_get_mapt_mode() - Get mapt_mode related to the tid. * @dev: the ummu_core device tid belongs to. * @tid: token id. * @@ -606,7 +742,7 @@ enum ummu_mapt_mode ummu_core_get_mapt_mode(struct ummu_core_device *dev, u32 tid); /** - * Get device related to the tid. + * ummu_core_get_device() - Get device related to the tid. * It will increase the ref count of the device. * @dev: the ummu_core device tid belongs to. * @tid: token id. @@ -617,7 +753,7 @@ struct device *ummu_core_get_device(struct ummu_core_device *dev, u32 tid); void ummu_core_put_device(struct device *dev); /** - * Allocate a virtual device to hold a tid. + * ummu_core_alloc_tdev() - Allocate a virtual device to hold a tid. * @attr: attributes of tdev * @ptid: tid pointer * Return: device on success or NULL error. @@ -625,7 +761,7 @@ void ummu_core_put_device(struct device *dev); struct device *ummu_core_alloc_tdev(struct tdev_attr *attr, u32 *ptid); /** - * Free the virtual device + * ummu_core_free_tdev() - Free the virtual device * @dev: Return value allocated by ummu_core_alloc_tdev * * Return: 0 on success or an error. @@ -633,7 +769,7 @@ struct device *ummu_core_alloc_tdev(struct tdev_attr *attr, u32 *ptid); int ummu_core_free_tdev(struct device *dev); /** - * Get ummu_tid_type related to the tid. + * ummu_core_get_tid_type() - Get ummu_tid_type related to the tid. * @dev: the ummu_core device tid belongs to. * @tid: token id. * @tid_type: out param, ummu_tid_type -- Gitee From 7714eeb9b0aaac080079126aed2f9a674e20bd00 Mon Sep 17 00:00:00 2001 From: Liming An Date: Thu, 27 Nov 2025 19:13:07 +0800 Subject: [PATCH 141/243] iommu/ummu: Add UMMU devicetree file commit 04493490f0224dd57538943f92ccc108ad3af38c openEuler This patch add ummu devicetree file Signed-off-by: Jingbin Wu Signed-off-by: Liming An --- .../devicetree/bindings/iommu/hisi,ummu.yaml | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 Documentation/devicetree/bindings/iommu/hisi,ummu.yaml diff --git a/Documentation/devicetree/bindings/iommu/hisi,ummu.yaml b/Documentation/devicetree/bindings/iommu/hisi,ummu.yaml new file mode 100644 index 000000000000..61d0074f7c4c --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/hisi,ummu.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iommu/hisi,ummu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon UMMU Architecture Implementation + +maintainers: + - Jingbin Wu + +description: |+ + UMMU is an IOMMU device that performs address translation and permission checking + using DstEID, TokenID, and UBA as input parameters. + +properties: + $nodename: + pattern: "^ummu@[0-9a-f]*" + compatible: + const: ub,ummu + index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + UMMU device index. Used to identify a specific UMMU instance in systems + with multiple UMMU devices. + msi-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: | + MSI parent device phandle. Required for MSI interrupt handling. + +required: + - compatible + - index + - msi-parent + +additionalProperties: false + +examples: + - |+ + ummu@0 { + compatible = "ub,ummu"; + index = <0x0>; + msi-parent = <&its>; + }; -- Gitee From d739dce7dc323547213cfae0ac62f3fa52a86bd7 Mon Sep 17 00:00:00 2001 From: Liming An Date: Thu, 27 Nov 2025 19:16:22 +0800 Subject: [PATCH 142/243] iommu/ummu: Add UMMU-PMU devicetree file commit 48ebec01b515a3e8f665bbbd415e721c658d30d8 openEuler This patch add ummu-pmu devicetree file Signed-off-by: Lizhi He Wu Signed-off-by: Liming An --- .../bindings/perf/hisi,ummu-pmu.yaml | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml diff --git a/Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml b/Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml new file mode 100644 index 000000000000..c16fad1c35fe --- /dev/null +++ b/Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/perf/hisi,ummu-pmu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon UMMU Performance Monitor Unit (PMU) + +maintainers: + - Jingbin Wu + +description: | + The UMMU includes a PMU(Performance Monitor Unit ) to monitor and collect + statistics on key hardware events, such as TLB/PLB cache hit rates and + lookup latencies. + +properties: + $nodename: + pattern: "^ummu-pmu@[0-9a-f]*" + compatible: + const: ub,ummu_pmu + index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + PMU device index. Identifies a specific UMMU-PMU instance in multi-UMMU-PMU + systems. + msi-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: | + MSI parent device phandle. Required for MSI interrupt handling. + +required: + - compatible + - index + - msi-parent + +additionalProperties: false + +examples: + - | + ummu-pmu@0 { + compatible = "ub,ummu_pmu"; + index = <0x0>; + msi-parent = <&its>; + }; -- Gitee From 18a0187f1a08643c72dc6ecfb39daf0cc69c9bca Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Wed, 12 Nov 2025 15:29:26 +0800 Subject: [PATCH 143/243] ub: udma: reset and segment permission issues are resolved. commit 524bc5788973aee8196c2cda37a63a2b38f2a863 openEuler This patch adds the bugfix to 1. fix ubase and udma locker circular dependency. 2. modify segment permission issue. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Kconfig | 2 +- drivers/ub/urma/hw/udma/udma_ctx.c | 1 + drivers/ub/urma/hw/udma/udma_main.c | 2 +- drivers/ub/urma/hw/udma/udma_segment.c | 26 ++++++++++++++++++++------ 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/drivers/ub/urma/hw/udma/Kconfig b/drivers/ub/urma/hw/udma/Kconfig index fd5d27ef9813..c6d5ca89e7ef 100644 --- a/drivers/ub/urma/hw/udma/Kconfig +++ b/drivers/ub/urma/hw/udma/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0+ # Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. -menuconfig UB_UDMA +config UB_UDMA default n tristate "UB UDMA Driver" depends on UB_UBASE && UB_URMA && UB_UMMU_CORE diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 5f60fca10d86..6f7f1ecef0c5 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -83,6 +83,7 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, return &ctx->base; err_init_ctx_resp: + mutex_destroy(&ctx->hugepage_lock); mutex_destroy(&ctx->pgdir_mutex); err_unbind_dev: ummu_sva_unbind_device(ctx->sva); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index cbf773d01c48..be76c20c1ff0 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1124,8 +1124,8 @@ void udma_remove(struct auxiliary_device *adev) { struct udma_dev *udma_dev; - mutex_lock(&udma_reset_mutex); ubase_reset_unregister(adev); + mutex_lock(&udma_reset_mutex); udma_dev = get_udma_dev(adev); if (!udma_dev) { mutex_unlock(&udma_reset_mutex); diff --git a/drivers/ub/urma/hw/udma/udma_segment.c b/drivers/ub/urma/hw/udma/udma_segment.c index 90615d1ae2b4..7bb52ed4d5a1 100644 --- a/drivers/ub/urma/hw/udma/udma_segment.c +++ b/drivers/ub/urma/hw/udma/udma_segment.c @@ -70,14 +70,28 @@ static void udma_init_seg_cfg(struct udma_segment *seg, struct ubcore_seg_cfg *c static int udma_u_get_seg_perm(struct ubcore_seg_cfg *cfg) { - if (cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_ONLY || - cfg->flag.bs.access & UBCORE_ACCESS_ATOMIC) + bool local_only_flag = cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_ONLY; + bool atomic_flag = cfg->flag.bs.access & UBCORE_ACCESS_ATOMIC; + bool write_flag = cfg->flag.bs.access & UBCORE_ACCESS_WRITE; + bool read_flag = cfg->flag.bs.access & UBCORE_ACCESS_READ; + + /* After setting ACCESS_LOCAL, other operations cannot be configured. */ + if (local_only_flag && !atomic_flag && !write_flag && !read_flag) + return UMMU_DEV_ATOMIC | UMMU_DEV_WRITE | UMMU_DEV_READ; + + /* Atomic require additional configuration of write and read. */ + if (!local_only_flag && atomic_flag && write_flag && read_flag) return UMMU_DEV_ATOMIC | UMMU_DEV_WRITE | UMMU_DEV_READ; - if (cfg->flag.bs.access & UBCORE_ACCESS_WRITE) + /* Write require additional configuration of read. */ + if (!local_only_flag && !atomic_flag && write_flag && read_flag) return UMMU_DEV_WRITE | UMMU_DEV_READ; - return UMMU_DEV_READ; + if (!local_only_flag && !atomic_flag && !write_flag && read_flag) + return UMMU_DEV_READ; + + /* All other configurations are illegal. */ + return 0; } static int udma_sva_grant(struct ubcore_seg_cfg *cfg, struct iommu_sva *ksva) @@ -245,8 +259,8 @@ struct ubcore_target_seg *udma_register_seg(struct ubcore_device *ub_dev, ret = udma_sva_grant(cfg, ksva); if (ret) { dev_err(udma_dev->dev, - "ksva grant failed with token policy %d, ret = %d.\n", - cfg->flag.bs.token_policy, ret); + "ksva grant failed token policy %d, access %d, ret = %d.\n", + cfg->flag.bs.token_policy, cfg->flag.bs.access, ret); goto err_load_ksva; } mutex_unlock(&udma_dev->ksva_mutex); -- Gitee From 67e673ccb537097eaf22d28c19392367dc29a4f4 Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Wed, 12 Nov 2025 15:57:52 +0800 Subject: [PATCH 144/243] ub: udma: Resolve issues related to stream logout and interrupt lock. commit cfc1a43a32ef15ec77372b0695759cb6e17bc96d openEuler This patch adds the bugfix to 1. modify some review comments. 2. resolve the problem of deregistration with stream. 3. disabling interrupts when jetty is locked. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 2 +- drivers/ub/urma/hw/udma/udma_ctx.c | 2 +- drivers/ub/urma/hw/udma/udma_eq.c | 81 +++++++++++++------------ drivers/ub/urma/hw/udma/udma_jfc.c | 7 ++- drivers/ub/urma/hw/udma/udma_jfr.c | 5 +- drivers/ub/urma/hw/udma/udma_jfs.c | 5 +- drivers/ub/urma/hw/udma/udma_main.c | 19 +++--- 7 files changed, 65 insertions(+), 56 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index bfa3ed44c381..bdd4617cb4c4 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -8,7 +8,7 @@ #define UDMA_EID_SIZE 16 #define UDMA_CNA_SIZE 16 -#define UDMA_PID_MASK 24 +#define UDMA_PID_MASK 0xFFFFFF #define UDMA_DEFAULT_PID 1 #define UDMA_UE_NUM 64 #define UDMA_MAX_UE_IDX 256 diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 6f7f1ecef0c5..ccc3b4905af9 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -381,7 +381,7 @@ void udma_return_u_hugepage(struct udma_context *ctx, void *va) zap_vma_ptes(vma, (unsigned long)priv->va_base, priv->va_len); mmap_write_unlock(current->mm); } else { - dev_warn(ctx->dev->dev, "current mm released.\n"); + dev_warn_ratelimited(ctx->dev->dev, "current mm released.\n"); } if (dfx_switch) diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index 53dfb2bdebd5..655714b872a5 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -629,6 +629,9 @@ static int udma_ctrlq_eid_update(struct auxiliary_device *adev, uint8_t service_ } udma_dev = get_udma_dev(adev); + if (udma_dev->status != UDMA_NORMAL) + return udma_ctrlq_send_eid_update_response(udma_dev, seq, 0); + if (len < sizeof(struct udma_ctrlq_eid_out_update)) { dev_err(udma_dev->dev, "msg len(%u) is invalid.\n", len); return udma_ctrlq_send_eid_update_response(udma_dev, seq, -EINVAL); @@ -656,15 +659,18 @@ static int udma_ctrlq_eid_update(struct auxiliary_device *adev, uint8_t service_ return udma_ctrlq_send_eid_update_response(udma_dev, seq, ret); } -static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, - uint16_t len, uint32_t tp_num, - struct udma_ctrlq_check_tp_active_rsp_info *rsp_info) +static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, uint16_t len, + struct udma_ctrlq_check_tp_active_rsp_info **rsp_info, + uint32_t *rsp_info_len) { +#define UDMA_CTRLQ_CHECK_TP_OFFSET 0xFF struct udma_ctrlq_check_tp_active_req_info *req_info = NULL; - uint32_t req_info_len = 0; + uint32_t req_info_len; + uint32_t tp_num; int i; - req_info_len = sizeof(uint32_t) + + tp_num = *((uint32_t *)data) & UDMA_CTRLQ_CHECK_TP_OFFSET; + req_info_len = sizeof(struct udma_ctrlq_check_tp_active_req_info) + sizeof(struct udma_ctrlq_check_tp_active_req_data) * tp_num; if (len < req_info_len) { dev_err(udev->dev, "msg param num(%u) is invalid.\n", tp_num); @@ -675,35 +681,45 @@ static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, return -ENOMEM; memcpy(req_info, data, req_info_len); + *rsp_info_len = sizeof(struct udma_ctrlq_check_tp_active_rsp_info) + + sizeof(struct udma_ctrlq_check_tp_active_rsp_data) * tp_num; + *rsp_info = kzalloc(*rsp_info_len, GFP_KERNEL); + if (!(*rsp_info)) { + *rsp_info_len = 0; + kfree(req_info); + req_info = NULL; + return -ENOMEM; + } + rcu_read_lock(); for (i = 0; i < req_info->num; i++) { if (find_vpid(req_info->data[i].pid_flag)) - rsp_info->data[i].result = UDMA_CTRLQ_TPID_IN_USE; + (*rsp_info)->data[i].result = UDMA_CTRLQ_TPID_IN_USE; else - rsp_info->data[i].result = UDMA_CTRLQ_TPID_EXITED; + (*rsp_info)->data[i].result = UDMA_CTRLQ_TPID_EXITED; - rsp_info->data[i].tp_id = req_info->data[i].tp_id; + (*rsp_info)->data[i].tp_id = req_info->data[i].tp_id; } - rsp_info->num = tp_num; + (*rsp_info)->num = tp_num; rcu_read_unlock(); + + if (debug_switch) + udma_dfx_ctx_print(udev, "udma check tp active", (*rsp_info)->data[0].tp_id, + *rsp_info_len / sizeof(uint32_t), (uint32_t *)(*rsp_info)); kfree(req_info); + req_info = NULL; return 0; } -static int udma_ctrlq_check_param(struct udma_dev *udev, void *data, uint16_t len) +static int udma_ctrlq_check_tp_active_param(struct udma_dev *udev, void *data, uint16_t len) { -#define UDMA_CTRLQ_HDR_LEN 12 -#define UDMA_CTRLQ_MAX_BB 32 -#define UDMA_CTRLQ_BB_LEN 32 - if (data == NULL) { dev_err(udev->dev, "data is NULL.\n"); return -EINVAL; } - if ((len < UDMA_CTRLQ_BB_LEN - UDMA_CTRLQ_HDR_LEN) || - len > (UDMA_CTRLQ_BB_LEN * UDMA_CTRLQ_MAX_BB - UDMA_CTRLQ_HDR_LEN)) { + if (len < sizeof(struct udma_ctrlq_check_tp_active_req_info)) { dev_err(udev->dev, "msg data len(%u) is invalid.\n", len); return -EINVAL; } @@ -715,29 +731,17 @@ static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, uint8_t service_ver, void *data, uint16_t len, uint16_t seq) { -#define UDMA_CTRLQ_CHECK_TP_OFFSET 0xFF struct udma_ctrlq_check_tp_active_rsp_info *rsp_info = NULL; struct udma_dev *udev = get_udma_dev(adev); struct ubase_ctrlq_msg msg = {}; uint32_t rsp_info_len = 0; - uint32_t tp_num = 0; - int ret_val; int ret; - ret_val = udma_ctrlq_check_param(udev, data, len); - if (ret_val == 0) { - tp_num = *((uint32_t *)data) & UDMA_CTRLQ_CHECK_TP_OFFSET; - rsp_info_len = sizeof(uint32_t) + - sizeof(struct udma_ctrlq_check_tp_active_rsp_data) * tp_num; - rsp_info = kzalloc(rsp_info_len, GFP_KERNEL); - if (!rsp_info) { - dev_err(udev->dev, "check tp mag malloc failed.\n"); - return -ENOMEM; - } - - ret_val = udma_ctrlq_check_tp_status(udev, data, len, tp_num, rsp_info); - if (ret_val) - dev_err(udev->dev, "check tp status failed, ret_val(%d).\n", ret_val); + ret = udma_ctrlq_check_tp_active_param(udev, data, len); + if (ret == 0) { + ret = udma_ctrlq_check_tp_status(udev, data, len, &rsp_info, &rsp_info_len); + if (ret) + dev_err(udev->dev, "check tp status failed, ret(%d).\n", ret); } msg.service_ver = UBASE_CTRLQ_SER_VER_01; @@ -748,17 +752,16 @@ static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, msg.in_size = (uint16_t)rsp_info_len; msg.in = (void *)rsp_info; msg.resp_seq = seq; - msg.resp_ret = (uint8_t)(-ret_val); + msg.resp_ret = (uint8_t)(-ret); ret = ubase_ctrlq_send_msg(adev, &msg); - if (ret) { - kfree(rsp_info); + if (ret) dev_err(udev->dev, "send check tp active ctrlq msg failed, ret(%d).\n", ret); - return ret; - } + kfree(rsp_info); + rsp_info = NULL; - return (ret_val) ? ret_val : 0; + return ret; } static struct ubase_ctrlq_event_nb udma_ctrlq_opts[] = { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 92c9fcbaae9f..50ef624629df 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -528,9 +528,9 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, xa_lock(&udma_dev->jfc_table.xa); udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, jfcn); if (!udma_jfc) { + xa_unlock(&udma_dev->jfc_table.xa); dev_warn(udma_dev->dev, "Completion event for bogus jfcn %lu.\n", jfcn); - xa_unlock(&udma_dev->jfc_table.xa); return -EINVAL; } @@ -1034,13 +1034,14 @@ int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) struct udma_jfc *udma_jfc = to_udma_jfc(jfc); enum jfc_poll_state err = JFC_OK; struct list_head tid_list; + unsigned long flags; uint32_t ci; int npolled; INIT_LIST_HEAD(&tid_list); if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_lock(&udma_jfc->lock); + spin_lock_irqsave(&udma_jfc->lock, flags); for (npolled = 0; npolled < cr_cnt; ++npolled) { err = udma_poll_one(dev, udma_jfc, cr + npolled, &tid_list); @@ -1054,7 +1055,7 @@ int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) } if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_unlock(&udma_jfc->lock); + spin_unlock_irqrestore(&udma_jfc->lock, flags); if (!list_empty(&tid_list)) udma_inv_tid(dev, &tid_list); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 6bfc135fa846..8e98319715e0 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -894,11 +894,12 @@ int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, { struct udma_dev *dev = to_udma_dev(ubcore_jfr->ub_dev); struct udma_jfr *jfr = to_udma_jfr(ubcore_jfr); + unsigned long flags; uint32_t nreq; int ret = 0; if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) - spin_lock(&jfr->lock); + spin_lock_irqsave(&jfr->lock, flags); for (nreq = 0; wr; ++nreq, wr = wr->next) { ret = post_recv_one(dev, jfr, wr); @@ -919,7 +920,7 @@ int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, } if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) - spin_unlock(&jfr->lock); + spin_unlock_irqrestore(&jfr->lock, flags); return ret; } diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 7277db44da12..5d520a0cea00 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -1240,11 +1240,12 @@ int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct udma_sqe_ctl *wqe_addr; bool dwqe_enable = false; struct ubcore_jfs_wr *it; + unsigned long flags; int wr_cnt = 0; int ret = 0; if (!sq->lock_free) - spin_lock(&sq->lock); + spin_lock_irqsave(&sq->lock, flags); for (it = wr; it != NULL; it = (struct ubcore_jfs_wr *)(void *)it->next) { ret = udma_post_one_wr(sq, it, udma_dev, &wqe_addr, &dwqe_enable); @@ -1265,7 +1266,7 @@ int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, } if (!sq->lock_free) - spin_unlock(&sq->lock); + spin_unlock_irqrestore(&sq->lock, flags); return ret; } diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index be76c20c1ff0..44a93fd000b0 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1067,12 +1067,6 @@ void udma_reset_down(struct auxiliary_device *adev) } ubcore_stop_requests(&udma_dev->ub_dev); - if (udma_close_ue_rx(udma_dev, false, false, true, 0)) { - mutex_unlock(&udma_reset_mutex); - dev_err(&adev->dev, "udma close ue rx failed in reset down process.\n"); - return; - } - udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); udma_dev->status = UDMA_SUSPEND; mutex_unlock(&udma_reset_mutex); @@ -1096,11 +1090,18 @@ void udma_reset_uninit(struct auxiliary_device *adev) return; } + if (udma_close_ue_rx(udma_dev, false, false, true, 0)) { + mutex_unlock(&udma_reset_mutex); + dev_err(&adev->dev, "udma close ue rx failed in reset process.\n"); + return; + } + + /* Event should unregister before unset ubcore dev. */ + udma_unregister_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); udma_open_ue_rx(udma_dev, false, false, true, 0); - udma_unregister_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); } @@ -1143,14 +1144,16 @@ void udma_remove(struct auxiliary_device *adev) udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); + /* Event should unregister before unset ubcore dev. */ + udma_unregister_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); check_and_wait_flush_done(udma_dev); (void)ubase_activate_dev(adev); - udma_unregister_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); + dev_info(&adev->dev, "udma device remove success.\n"); } static struct auxiliary_driver udma_drv = { -- Gitee From b4316aba9334b86c6ef072e1a9d21dad816a81ca Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 14:42:06 +0800 Subject: [PATCH 145/243] ub: udma: add udma driver module doc. commit 51fa1dfca96022eaee4f3795557ebe11b74051cd openEuler This patch adds the document related to udma. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/index.rst | 1 + Documentation/ub/urma/udma/index.rst | 14 ++ Documentation/ub/urma/udma/udma.rst | 296 +++++++++++++++++++++++++++ 3 files changed, 311 insertions(+) create mode 100644 Documentation/ub/urma/udma/index.rst create mode 100644 Documentation/ub/urma/udma/udma.rst diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index c9366b0608dc..22276b791363 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -16,3 +16,4 @@ UnifiedBus Subsystem ubus/index ummu-core cdma/index + urma/udma/index diff --git a/Documentation/ub/urma/udma/index.rst b/Documentation/ub/urma/udma/index.rst new file mode 100644 index 000000000000..3a721ff1efcc --- /dev/null +++ b/Documentation/ub/urma/udma/index.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============ +UDMA Driver +============ + +.. toctree:: + :maxdepth: 2 + :numbered: + + udma diff --git a/Documentation/ub/urma/udma/udma.rst b/Documentation/ub/urma/udma/udma.rst new file mode 100644 index 000000000000..8e17734fa580 --- /dev/null +++ b/Documentation/ub/urma/udma/udma.rst @@ -0,0 +1,296 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============================================== +UnifiedBus DIRECT MEMORY ACCESS DRIVER (UDMA) +=============================================== + +Overview +========= + +This document describes the context and capabilities of the UDMA driver. + +**UnifiedBus** is an interconnect protocol for SuperPoD, It unifies IO, +memory access, and communication between various processing units under +a single interconnect technology framework. +The UnifiedBus specifications are open source and available on the official +website: `UB Specification Documents `_. + +**UDMA** (UnifiedBus Direct Memory Access), is a hardware I/O device that +provides direct memory access capabilities. + +**URMA(Unified Remote Memory Access)** is a component within the UnifiedBus +protocol stack, designed to abstract and facilitate communication between +different hardware and software entities. + +The UDMA driver integrates with the UnifiedBus protocol by implementing +the **URMA programming API**, through this API, the driver exposes the +UnifiedBus remote memory access programming model to application developers. + + +Device Driver Model +===================== + +The UDMA device is a UnifiedBus auxiliary device attached to the auxiliary bus. +The UDMA driver is developed based on the UBASE driver framework and uses +the auxiliary bus to perform device-driver binding. + +.. code-block:: none + + +---------------+ +-------+ + | UDMA Driver | | ... | + +-------+-------+ +-------+ + | + \|/ + +-------+-------------------+ + | auxiliary bus | + +-------+-------------------+ + /|\ + | + +-------+-------+ + | UDMA Device | + +----+----------+ + /|\ + | UBASE driver creates UDMA device + +----+------------------+ + | UBASE Driver | + +-----------+-----------+ + | + \|/ + +-----------+---------------+ + | ubus | + +---------------------------+ + +The figure above illustrates the hierarchy between the UDMA driver and the +UBASE driver. The UBASE driver is responsible for creating the UDMA auxiliary device +and registering it with the auxiliary bus. + + +Context & Submodules +======================= + +The UDMA driver depends on the ``Hardware programming interface``, +``UBASE driver``, and ``UMMU driver``. +It implements the URMA API and provides direct memory access capabilities. + +Below figure describe the UDMA driver's context and submodules. + +.. code-block:: none + + +-------------+ + | 5. URMA API | + +-----+-------+ + ^ + | + | + +-----------------+-----------------------+ + | UDMA Driver | + | | + | | + | +--------------------+ +-----------+ | + | | udma_main | | udma_comon| | + | +--------------------+ +-----------+ | + | +----------------++--------++--------+ | + | | udma_context ||udma_eid||udma_tid| | + | +----------------++--------++--------+ | + | +-----------------+ +----------------+ | +---------------+ + | | udma_jetty | | udma_segment | +----->| 4. UMMU Driver| + | +-----------------+ +----------------+ | +---------------+ + | +------------++---------++-----------+ | + | | udma_jfs ||udma_jfr || udma_jfc | | + | +------------++---------++-----------+ | + | +---------++---------------++--------+ | + | | udma_db || udma_ctrlq_tp ||udma_dfx| | + | +---------++---------------++--------+ | + | +-----------+ +---------+ +----------+ | + | | udma_cmd | |udma_ctl | | udma_eq | | + | +-----------+ +---------+ +----------+ | + +-----------------------------+-----------+ + | +---------------------+ + | | 3. Management Module| + \|/ +----------+----------+ + +--------+----------+ | + | 2. UBASE Driver +<---------------+ + +---------+---------+ + Software + -------------------------------+----------------------------------------- + \|/ Hardware + +-----------------------------+----------+ + | 1. Hardware programming interface | + +----------------------------------------+ + +Context +--------- + +1. Hardware programming interface: The UDMA driver encapsulates the + hardware programming interface, abstracting the hardware specifics. + +2. UBASE: UBASE driver responsible for managing UDMA auxiliary devices. + It also provides common management capabilities for auxiliary bus devices + and interacts with the Management module. + The UDMA device driver is built upon the UBASE driver and reuses its common utility functions. + +3. Management module: responsible for device management and configuration. + +4. UMMU: UnifiedBus Memory Management Unit, providing memory management + functionality(address translation, access permission, etc.) for UnifiedBus devices. + +5. URMA API: URMA programming interface, URMA API abstracts the memory operations, + and the UDMA driver implements it, so application developers do not need to be + aware of the details of the UDMA driver. + + +Submodules +------------ + +The UDMA driver submodules can be divided into 4 categories: +common utility and main functions, UDMA communication, device management and configuration, +UDMA device debugging. + +**Common Utility and Main Functions** + +* udma_main: Implements module_init/module_exit, and registers the UDMA driver + to the auxiliary bus. + +* udma_common: Provides common utility functions for UDMA driver. + +**UDMA Communication** + +Theses submodules handle UDMA communication setup and +processes(e.g read/write or send/recv operations). + +* udma_context: Manages UDMA communication context (allocates context, frees context, etc.). +* udma_eid: Manages UDMA Entity IDs (adds, removes, and queries UDMA entities). +* udma_tid: Manages TIDs (Token IDs) (allocates, frees token IDs). +* udma_segment: Manages memory segments, including local memory segment + registration and remote memory segment import. +* udma_jetty, udma_jfs, udma_jfr, udma_jfc: Manages UnifiedBus communication + jetty-related resources, including jetty, jfs, jfr, and jfc. + +**UDMA Device Management and Configuration** + +These submodules handle the UDMA device management and UDMA communication configuration. + +* udma_cmd: Encapsulates hardware configuration commands for UDMA communication, + e.g., create jfs, create jfc, etc. +* udma_db: Encapsulates UDMA hardware doorbell operations. +* udma_ctrlq_tp: Encapsulates control queue (CtrlQ) operations for UDMA hardware + configuration, e.g., get the transport channels. +* udma_ctl: Encapsulates UDMA hardware-specific configure operations, which are + not defined in the URMA API. Application developers should include the header file ``include/ub/urma/udma/udma_ctl.h`` separately. +* udma_eq: Encapsulates hardware event queue operations, e.g., register + CtrlQ event handle to receive CtrlQ events. + +**UDMA Device Debugging** + +* udma_dfx: Queries the UDMA hardware runtime configurations, e.g., + jetty state, transport mode, etc. + + +Supported Hardware +==================== + +UDMA driver supported hardware: + +=========== ============= +Vendor ID Device ID +=========== ============= +0xCC08 0xA001 +0xCC08 0xA002 +0xCC08 0xD802 +0xCC08 0xD803 +0xCC08 0xD80B +0xCC08 0xD80C +=========== ============= + +You can use the ``lsub`` command on your host OS to query UB devices. Below is an example output: + +.. code-block:: shell + + UB network controller <0002>: Huawei Technologies Co., Ltd. URMA management ub entity : + UB network controller <0082>: Huawei Technologies Co., Ltd. URMA management ub entity : + UB network controller <0002>: Huawei Technologies Co., Ltd. URMA management ub entity : + +The ``Vendor ID`` and ``Device ID`` are located at the end of each output line +with the format ``:``, e.g., ``:``. + +Note the ``lsub`` command is from ubutils; make sure it is installed on your host. + + +Module Parameters +=================== + +UDMA driver supports 4 parameters: **cqe_mode**, **jfc_arm_mode**, +**jfr_sleep_time**, **dump_aux_info**. +The default value represents the best practices; however, you may need to change +the default value in certain cases. + +cqe_mode +----------- + +``cqe_mode`` controls the method of **Completion Queue Entry (CQE)** event generation. + +In interrupt mode, UDMA provides two mechanisms for generating CQE events: +**Producer Index (PI)/Consumer Index (CI) difference comparison** +and **counter-based threshold**. + +* PI/CI difference comparison: PI (Producer Index) and CI (Consumer Index) + respectively point to the next CQE to be written and read in the Completion Queue. + The device generates an interrupt to notify the upper layer when the + difference (the number of pending CQEs) exceeds a certain threshold. +* Counter-based threshold: An interrupt is generated when the total number of + CQEs written to the Completion Queue reaches a programmed threshold. + +**Parameter values:** + +* 0: Counter-based threshold +* 1: PI/CI difference comparison + +**Default value**: 1 + + +jfc_arm_mode +-------------- + +`jfc_arm_mode` controls the completion event interrupt mode. + +**Parameter Values:** + +* 0: Always ARM, interrupt always enabled +* 1: No ARM, interrupt is disabled and cannot be modified +* Other value (e.g., 2): Interrupt is disabled, but can be modified + +**Default value:** 0 + + +jfr_sleep_time +---------------- + +``jfr_sleep_time`` configures the maximum blocking wait time (in microseconds) +when deregistering a JFR (Jetty-related resource). The default value is 1000 us. +You can adjust this parameter value as needed. + +The allowed range is: ``[0,UINT32_MAX]`` + +dump_aux_info +--------------- + +``dump_aux_info`` controls whether to dump auxiliary information +(the hardware register values) into the event body when reporting asynchronous +or completion events. + + +**Parameter Values:** + +* false: Disables the dumping of auxiliary information. +* true: Enables the dumping of auxiliary information. + +**Default value**: false + + +Support +======= + +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. -- Gitee From ca864385b7df04018b9482f002fc9120395c4e9c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 15:00:52 +0800 Subject: [PATCH 146/243] ub: udma: mask jetty context addr info. commit 5b8885c72d25a4694b448a6085c1fd156a45ac29 openEuler For security reasons, the jetty context address will not be printed. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index c3f3f9a90fb3..e41b85e71054 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -796,6 +796,14 @@ static bool udma_wait_timeout(uint32_t *sum_times, uint32_t times, uint32_t ta_t return false; } +static void udma_mask_jetty_ctx(struct udma_jetty_ctx *ctx) +{ + ctx->sqe_base_addr_l = 0; + ctx->sqe_base_addr_h = 0; + ctx->user_data_l = 0; + ctx->user_data_h = 0; +} + static bool udma_query_jetty_fd(struct udma_dev *dev, struct udma_jetty_queue *sq) { struct udma_jetty_ctx ctx = {}; @@ -821,6 +829,7 @@ static bool udma_query_jetty_fd(struct udma_dev *dev, struct udma_jetty_queue *s if (ctx.flush_ssn_vld && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF) return true; + udma_mask_jetty_ctx(&ctx); udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, sizeof(ctx) / sizeof(uint32_t), (uint32_t *)&ctx); @@ -1098,6 +1107,8 @@ static bool udma_batch_query_jetty_fd(struct udma_dev *dev, *bad_jetty_index = 0; all_query_done = false; + + udma_mask_jetty_ctx(&ctx); udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, sizeof(ctx) / sizeof(uint32_t), (uint32_t *)&ctx); break; -- Gitee From dfb25d16aa666ab1af06cb3d7d9d7fa0d36ef982 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 15:35:43 +0800 Subject: [PATCH 147/243] ub: udma: bugfix for set and get tp attr. commit 7b406a1a737767a4aec23c778f6f631453e77b6d openEuler This patch fix a bug about set and get tp attr. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 966dc7a41d94..86d68ace7000 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -170,6 +170,11 @@ static int udma_dev_res_ratio_ctrlq_handler(struct auxiliary_device *adev, struct udma_ctrlq_event_nb *udma_cb; int ret; + if (service_ver != UBASE_CTRLQ_SER_VER_01) { + dev_err(udev->dev, "Unsupported server version (%u).\n", service_ver); + return -EOPNOTSUPP; + } + mutex_lock(&udev->npu_nb_mutex); udma_cb = xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); if (!udma_cb) { @@ -646,6 +651,14 @@ int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, tp_attr_req.tp_attr.tp_attr_bitmap = tp_attr_bitmap; memcpy(&tp_attr_req.tp_attr.tp_attr_value, (void *)tp_attr, sizeof(*tp_attr)); + udma_swap_endian((uint8_t *)tp_attr->sip, tp_attr_req.tp_attr.tp_attr_value.sip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr->dip, tp_attr_req.tp_attr.tp_attr_value.dip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr->sma, tp_attr_req.tp_attr.tp_attr_value.sma, + UBCORE_MAC_BYTES); + udma_swap_endian((uint8_t *)tp_attr->dma, tp_attr_req.tp_attr.tp_attr_value.dma, + UBCORE_MAC_BYTES); udma_ctrlq_set_tp_msg(&msg, &tp_attr_req, sizeof(tp_attr_req), NULL, 0); msg.opcode = UDMA_CMD_CTRLQ_SET_TP_ATTR; @@ -687,6 +700,14 @@ int udma_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, *tp_attr_bitmap = tp_attr_resp.tp_attr.tp_attr_bitmap; memcpy((void *)tp_attr, &tp_attr_resp.tp_attr.tp_attr_value, sizeof(tp_attr_resp.tp_attr.tp_attr_value)); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.sip, tp_attr->sip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.dip, tp_attr->dip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.sma, tp_attr->sma, + UBCORE_MAC_BYTES); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.dma, tp_attr->dma, + UBCORE_MAC_BYTES); return 0; } -- Gitee From d7a3d407c5d42c7b2b2d2b71629514c2883a22e0 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 16:55:54 +0800 Subject: [PATCH 148/243] ub: udma: Support eid and guid updates. commit 53d00d68d89395191c185ae7fa29a6a1d8ef7cd5 openEuler This patch support update eid and guid for udma. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.c | 1 + drivers/ub/urma/hw/udma/udma_cmd.h | 14 ++++++ drivers/ub/urma/hw/udma/udma_eq.c | 71 +++++++++++++++++++++++++++++ include/ub/ubase/ubase_comm_ctrlq.h | 1 + 4 files changed, 87 insertions(+) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 5a0332667885..5dcb25012d61 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -20,6 +20,7 @@ static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_unic[] = { static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_udma[] = { {UBASE_CTRLQ_SER_TYPE_TP_ACL, UBASE_CTRLQ_OPC_CHECK_TP_ACTIVE, NULL, NULL}, {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_UPDATE_SEID, NULL, NULL}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_UPDATE_UE_SEID_GUID, NULL, NULL}, {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_NOTIFY_RES_RATIO, NULL, NULL}, }; diff --git a/drivers/ub/urma/hw/udma/udma_cmd.h b/drivers/ub/urma/hw/udma/udma_cmd.h index 6ec531913033..24f6d65bf1ad 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.h +++ b/drivers/ub/urma/hw/udma/udma_cmd.h @@ -49,9 +49,23 @@ enum udma_ctrlq_eid_update_op { UDMA_CTRLQ_EID_DEL, }; +enum udma_ctrlq_eid_guid_update_op { + UDMA_CTRLQ_EID_GUID_ADD = 0, + UDMA_CTRLQ_EID_GUID_DEL, +}; + +struct udma_ctrlq_ue_eid_guid_out { + struct udma_ctrlq_eid_info eid_info; + uint32_t op_type : 4; + uint32_t rsv : 28; + uint32_t ue_id; + guid_t ue_guid; +} __packed; + enum udma_ctrlq_dev_mgmt_opcode { UDMA_CTRLQ_GET_SEID_INFO = 0x1, UDMA_CTRLQ_UPDATE_SEID_INFO = 0x2, + UDMA_CTRLQ_OPC_UPDATE_UE_SEID_GUID = 0x3, UDMA_CTRLQ_GET_DEV_RESOURCE_COUNT = 0x11, UDMA_CTRLQ_GET_DEV_RESOURCE_RATIO = 0x12, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO = 0x13, diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index 655714b872a5..d3b6813b1d55 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -764,11 +764,82 @@ static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, return ret; } +static int udma_ctrlq_send_eid_guid_response(struct udma_dev *udma_dev, + uint16_t seq, + int ret_val) +{ + struct ubase_ctrlq_msg msg = {}; + int in_buf = 0; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + msg.opcode = UDMA_CTRLQ_OPC_UPDATE_UE_SEID_GUID; + msg.need_resp = 0; + msg.is_resp = 1; + msg.resp_seq = seq; + msg.resp_ret = (uint8_t)(-ret_val); + msg.in = (void *)&in_buf; + msg.in_size = sizeof(in_buf); + + ret = ubase_ctrlq_send_msg(udma_dev->comdev.adev, &msg); + if (ret) + dev_err(udma_dev->dev, "send eid-guid rsp failed, ret = %d.\n", + ret); + + return ret; +} + +static int udma_ctrlq_notify_mue_eid_guid(struct auxiliary_device *adev, + uint8_t service_ver, + void *data, + uint16_t len, + uint16_t seq) +{ + struct udma_ctrlq_ue_eid_guid_out eid_guid_entry = {}; + struct udma_dev *udma_dev; + + if (adev == NULL || data == NULL) { + pr_err("adev is null : %d, data is null : %d.\n", + adev == NULL, data == NULL); + return -EINVAL; + } + + udma_dev = get_udma_dev(adev); + if (udma_dev->is_ue) + return 0; + + if (udma_dev->status != UDMA_NORMAL) + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, 0); + if (len < sizeof(struct udma_ctrlq_ue_eid_guid_out)) { + dev_err(udma_dev->dev, "eid-guid len(%u) is invalid.\n", len); + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, -EINVAL); + } + memcpy(&eid_guid_entry, data, sizeof(eid_guid_entry)); + if (eid_guid_entry.op_type != UDMA_CTRLQ_EID_GUID_ADD && + eid_guid_entry.op_type != UDMA_CTRLQ_EID_GUID_DEL) { + dev_err(udma_dev->dev, "eid-guid type(%u) is invalid.\n", + eid_guid_entry.op_type); + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, + -EINVAL); + } + if (eid_guid_entry.eid_info.eid_idx >= SEID_TABLE_SIZE) { + dev_err(udma_dev->dev, "invalid ue eid_idx = %u.\n", + eid_guid_entry.eid_info.eid_idx); + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, + -EINVAL); + } + + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, 0); +} + static struct ubase_ctrlq_event_nb udma_ctrlq_opts[] = { {UBASE_CTRLQ_SER_TYPE_TP_ACL, UDMA_CMD_CTRLQ_CHECK_TP_ACTIVE, NULL, udma_ctrlq_check_tp_active}, {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UDMA_CTRLQ_UPDATE_SEID_INFO, NULL, udma_ctrlq_eid_update}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UDMA_CTRLQ_OPC_UPDATE_UE_SEID_GUID, NULL, + udma_ctrlq_notify_mue_eid_guid}, }; static int udma_register_one_ctrlq_event(struct auxiliary_device *adev, diff --git a/include/ub/ubase/ubase_comm_ctrlq.h b/include/ub/ubase/ubase_comm_ctrlq.h index 3e08a5ab5a4f..c50bfd60047f 100644 --- a/include/ub/ubase/ubase_comm_ctrlq.h +++ b/include/ub/ubase/ubase_comm_ctrlq.h @@ -53,6 +53,7 @@ enum ubase_ctrlq_opc_type_ip { enum ubase_ctrlq_opc_type_dev_register { UBASE_CTRLQ_OPC_UPDATE_SEID = 0x02, + UBASE_CTRLQ_OPC_UPDATE_UE_SEID_GUID = 0x03, UBASE_CTRLQ_OPC_NOTIFY_RES_RATIO = 0x13, UBASE_CTRLQ_OPC_CTRLQ_CTRL = 0x14, UBASE_CTRLQ_OPC_UE_RESET_CTRL = 0x15, -- Gitee From d7e7a7599b224deb3ce76840800a3f898d066786 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Fri, 14 Nov 2025 11:54:54 +0800 Subject: [PATCH 149/243] ub:ubus: Bugfix of ubus and ubfi commit 7e992348e2c3a9064e165fbf5a075a2945790756 openEuler driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID34DG CVE: NA ----------------------------------------------------------- 1. Add the CONFIG_UB option to the ub directory in Makefile; 2. Add static for inner functions; 3. fix ubfi's ubc_list init bug. Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/Makefile | 2 +- drivers/irqchip/irq-gic-v3-its-ub-msi.c | 2 +- drivers/ub/ubfi/ubc.c | 4 +--- drivers/ub/ubus/ioctl.c | 1 + drivers/ub/ubus/reset.c | 3 ++- drivers/ub/ubus/resource.c | 6 +++--- drivers/ub/ubus/route.c | 1 + drivers/ub/ubus/services/ras.c | 4 ++-- drivers/ub/ubus/ubus_config.c | 12 ++++++------ drivers/ub/ubus/ubus_driver.c | 6 +++--- drivers/ub/ubus/ubus_entity.c | 2 +- 11 files changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/Makefile b/drivers/Makefile index 269267ac3b4f..f36e00dfd1bd 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_GPIOLIB) += gpio/ obj-y += pwm/ obj-y += pci/ -obj-y += ub/ +obj-$(CONFIG_UB) += ub/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ diff --git a/drivers/irqchip/irq-gic-v3-its-ub-msi.c b/drivers/irqchip/irq-gic-v3-its-ub-msi.c index 4caccd12fdc4..08274a57c5d5 100644 --- a/drivers/irqchip/irq-gic-v3-its-ub-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-ub-msi.c @@ -146,7 +146,7 @@ static int its_ub_of_msi_init(void) return 0; } -int __init its_ub_msi_init(void) +static int __init its_ub_msi_init(void) { its_ub_of_msi_init(); its_ub_acpi_msi_init(); diff --git a/drivers/ub/ubfi/ubc.c b/drivers/ub/ubfi/ubc.c index e89aeeafb913..a3f7bab8863f 100644 --- a/drivers/ub/ubfi/ubc.c +++ b/drivers/ub/ubfi/ubc.c @@ -28,7 +28,7 @@ #define to_ub_ubc(n) container_of(n, struct ub_bus_controller, dev) -struct list_head ubc_list; +LIST_HEAD(ubc_list); EXPORT_SYMBOL_GPL(ubc_list); u32 ubc_eid_start; @@ -593,8 +593,6 @@ int handle_ubc_table(u64 pointer) if (!info_node) return -EINVAL; - INIT_LIST_HEAD(&ubc_list); - ret = parse_ubc_table(info_node); if (ret) goto err_handle; diff --git a/drivers/ub/ubus/ioctl.c b/drivers/ub/ubus/ioctl.c index abcd4e878755..0825a87d8b81 100644 --- a/drivers/ub/ubus/ioctl.c +++ b/drivers/ub/ubus/ioctl.c @@ -10,6 +10,7 @@ #include "ubus.h" #include "instance.h" +#include "ioctl.h" #define UBUS_MAX_DEVICES 1 #define UBUS_DEVICE_NAME "unified_bus" diff --git a/drivers/ub/ubus/reset.c b/drivers/ub/ubus/reset.c index 4b7d86624e24..596d848c2c11 100644 --- a/drivers/ub/ubus/reset.c +++ b/drivers/ub/ubus/reset.c @@ -12,6 +12,7 @@ #include "route.h" #include "ubus_controller.h" #include "ubus_config.h" +#include "reset.h" enum elr_type { ELR_PREPARE = 0, @@ -30,7 +31,7 @@ static u32 saved_cfg_offset[] = { * ub_elr - Initiate an UB entity level reset * @dev: UB entity to reset */ -int ub_elr(struct ub_entity *dev) +static int ub_elr(struct ub_entity *dev) { u8 command; u8 val = 0; diff --git a/drivers/ub/ubus/resource.c b/drivers/ub/ubus/resource.c index b57117e02415..d4516d672bb5 100644 --- a/drivers/ub/ubus/resource.c +++ b/drivers/ub/ubus/resource.c @@ -97,7 +97,7 @@ static int _ub_assign_resource(struct ub_entity *uent, int idx, return -ENOMEM; } -int ub_assign_resource(struct ub_entity *uent, int idx) +static int ub_assign_resource(struct ub_entity *uent, int idx) { struct resource *res = &uent->zone[idx].res; resource_size_t size; @@ -123,7 +123,7 @@ int ub_assign_resource(struct ub_entity *uent, int idx) return 0; } -void ub_release_resource(struct ub_entity *uent, int idx) +static void ub_release_resource(struct ub_entity *uent, int idx) { int ret; @@ -292,7 +292,7 @@ int ub_insert_resource(struct ub_entity *dev, int idx) return 0; } -int ub_entity_alloc_mmio_idx(struct ub_entity *dev, int idx) +static int ub_entity_alloc_mmio_idx(struct ub_entity *dev, int idx) { if (is_ibus_controller(dev) || is_idev(dev)) return ub_insert_resource(dev, idx); diff --git a/drivers/ub/ubus/route.c b/drivers/ub/ubus/route.c index 73e0df5436dc..364bf78d93c3 100644 --- a/drivers/ub/ubus/route.c +++ b/drivers/ub/ubus/route.c @@ -12,6 +12,7 @@ #include "enum.h" #include "port.h" #include "ubus_driver.h" +#include "route.h" #define UB_ROUTE_TABLE_ENTRY_START (UB_ROUTE_TABLE_SLICE_START + (0x10 << 2)) #define EBW(port_nums) ((((port_nums) - 1) >> 5) + 1) /* Entry Bit Width */ diff --git a/drivers/ub/ubus/services/ras.c b/drivers/ub/ubus/services/ras.c index c38b300e1646..fc4365f819a3 100644 --- a/drivers/ub/ubus/services/ras.c +++ b/drivers/ub/ubus/services/ras.c @@ -29,7 +29,7 @@ enum ras_err_level { RAS_ERR_DEVICE_LEVEL, }; -int cper_severity_to_ub_ras(int cper_severity) +static int cper_severity_to_ub_ras(int cper_severity) { switch (cper_severity) { case CPER_SEV_FATAL: @@ -269,7 +269,7 @@ static inline void ras_recover_entry_init(struct ras_recover_entry *entry, static DEFINE_SPINLOCK(ub_ras_recover_ring_lock); static DECLARE_WORK(ub_ras_recover_work, ub_ras_recover_work_func); -void ub_ras_recover_queue(struct cper_sec_ubus *ubus_err, int severity) +static void ub_ras_recover_queue(struct cper_sec_ubus *ubus_err, int severity) { #define PORT_VALID_BIT 0b100ULL #define OVERFLOW_FLAG_BIT 0b10000ULL diff --git a/drivers/ub/ubus/ubus_config.c b/drivers/ub/ubus/ubus_config.c index a2d285da998a..2c1f068dc11a 100644 --- a/drivers/ub/ubus/ubus_config.c +++ b/drivers/ub/ubus/ubus_config.c @@ -308,7 +308,7 @@ int ub_send_cfg(struct ub_entity *uent, u8 size, u64 pos, u32 *val) req_pkt.header.msgetah.code); } -int __ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val) +static int __ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val) { if (!uent || !uent->message || !uent->message->mdev || !val) { pr_err("uent or message or mdev is null\n"); @@ -318,7 +318,7 @@ int __ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val) return ub_sync_cfg(uent, (u8)sizeof(u8), pos, false, (u32 *)val); } -int __ub_cfg_read_word(struct ub_entity *uent, u64 pos, u16 *val) +static int __ub_cfg_read_word(struct ub_entity *uent, u64 pos, u16 *val) { if (!uent || !uent->message || !uent->message->mdev || !val) { pr_err("uent or message or mdev is null\n"); @@ -328,7 +328,7 @@ int __ub_cfg_read_word(struct ub_entity *uent, u64 pos, u16 *val) return ub_sync_cfg(uent, (u8)sizeof(u16), pos, false, (u32 *)val); } -int __ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val) +static int __ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val) { if (!uent || !uent->message || !uent->message->mdev || !val) { pr_err("uent or message or mdev is null\n"); @@ -338,7 +338,7 @@ int __ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val) return ub_sync_cfg(uent, (u8)sizeof(u32), pos, false, val); } -int __ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val) +static int __ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val) { if (!uent || !uent->message || !uent->message->mdev) { pr_err("uent or message or mdev is null\n"); @@ -348,7 +348,7 @@ int __ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val) return ub_sync_cfg(uent, (u8)sizeof(u8), pos, true, (u32 *)&val); } -int __ub_cfg_write_word(struct ub_entity *uent, u64 pos, u16 val) +static int __ub_cfg_write_word(struct ub_entity *uent, u64 pos, u16 val) { if (!uent || !uent->message || !uent->message->mdev) { pr_err("uent or message or mdev is null\n"); @@ -358,7 +358,7 @@ int __ub_cfg_write_word(struct ub_entity *uent, u64 pos, u16 val) return ub_sync_cfg(uent, (u8)sizeof(u16), pos, true, (u32 *)&val); } -int __ub_cfg_write_dword(struct ub_entity *uent, u64 pos, u32 val) +static int __ub_cfg_write_dword(struct ub_entity *uent, u64 pos, u32 val) { if (!uent || !uent->message || !uent->message->mdev) { pr_err("uent or message or mdev is null\n"); diff --git a/drivers/ub/ubus/ubus_driver.c b/drivers/ub/ubus/ubus_driver.c index 9431bbccd3b0..974020bf3c38 100644 --- a/drivers/ub/ubus/ubus_driver.c +++ b/drivers/ub/ubus/ubus_driver.c @@ -134,7 +134,7 @@ ub_match_one_device(const struct ub_device_id *id, const struct ub_entity *dev) return NULL; } -const struct ub_device_id *ub_match_id(const struct ub_device_id *ids, +static const struct ub_device_id *ub_match_id(const struct ub_device_id *ids, struct ub_entity *dev) { if (ids && dev) { @@ -567,7 +567,7 @@ static int ub_bus_num_ue(struct device *dev) return ub_num_ue(to_ub_entity(dev)); } -void ub_bus_type_init(void) +static void ub_bus_type_init(void) { ub_bus_type.match = ub_bus_match; ub_bus_type.uevent = ub_uevent; @@ -580,7 +580,7 @@ void ub_bus_type_init(void) ub_bus_type.num_vf = ub_bus_num_ue; } -void ub_bus_type_uninit(void) +static void ub_bus_type_uninit(void) { ub_bus_type.match = NULL; ub_bus_type.uevent = NULL; diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index fcea27373ccb..6031469a27b9 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -63,7 +63,7 @@ struct ub_entity *ub_alloc_ent(void) EXPORT_SYMBOL_GPL(ub_alloc_ent); static DEFINE_IDA(uent_num_ida); -void ub_entity_num_free(struct ub_entity *uent) +static void ub_entity_num_free(struct ub_entity *uent) { ida_free(&uent_num_ida, uent->uent_num); } -- Gitee From c2f684b092c92c26b799b20b0563e09853ff34af Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Mon, 17 Nov 2025 14:25:58 +0800 Subject: [PATCH 150/243] ub:ubus: Add ummu_map attribute in sysfs commit d9875bf05b18a022f352201fec2e517e72236ca4 openEuler driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID77OO CVE: NA ----------------------------------------------------------- Add ummu_map attribute in sysfs Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/sysfs.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/ub/ubus/sysfs.c b/drivers/ub/ubus/sysfs.c index a583cf7efa97..2e997c5c5f1a 100644 --- a/drivers/ub/ubus/sysfs.c +++ b/drivers/ub/ubus/sysfs.c @@ -325,6 +325,15 @@ static ssize_t primary_cna_show(struct device *dev, } DEVICE_ATTR_RO(primary_cna); +static ssize_t ummu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ub_entity *uent = to_ub_entity(dev); + + return sysfs_emit(buf, "%#04x\n", uent->ubc->attr.ummu_map); +} +DEVICE_ATTR_RO(ummu_map); + static ssize_t instance_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -393,6 +402,7 @@ static struct attribute *ub_entity_attrs[] = { &dev_attr_tid.attr, &dev_attr_primary_entity.attr, &dev_attr_kref.attr, + &dev_attr_ummu_map.attr, NULL }; -- Gitee From 579b287a607a64d5e56bb28f7c58ea15d55a7ac5 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Sat, 29 Nov 2025 10:43:16 +0800 Subject: [PATCH 151/243] ub: hisi-ubus: Fix ub memory decoder create commit 2f37e06abbc56da4affaa2620d07148052395b05 openEuler drivers: inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------ Fix ub memory decoder create cannot get data Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/vendor/hisilicon/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ub/ubus/vendor/hisilicon/memory.c b/drivers/ub/ubus/vendor/hisilicon/memory.c index fa9747171eea..e49a4a82322f 100644 --- a/drivers/ub/ubus/vendor/hisilicon/memory.c +++ b/drivers/ub/ubus/vendor/hisilicon/memory.c @@ -301,8 +301,8 @@ static u8 get_mem_decoder_number(struct hi_ubc_private_data *data) int hi_mem_decoder_create(struct ub_bus_controller *ubc) { + struct hi_ubc_private_data *data = ubc->data; struct ub_mem_device *mem_device; - struct hi_ubc_private_data *data; void *priv_data; int ret; -- Gitee From d3c1d7af73b8802a3ab47afc7db427c327e728d7 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 26 Nov 2025 09:30:46 +0800 Subject: [PATCH 152/243] ub:hisi-ubus ub:hisi-ubus: Move the decoder's page table operations to hisi-ubus commit bd9e1c9e31192a1eabadca924fab339f91eec5ae openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ Move the decoder's page table operations to hisi-ubus Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubfi/irq.c | 4 +- drivers/ub/ubus/Makefile | 2 +- drivers/ub/ubus/decoder.c | 115 +++++++--------- drivers/ub/ubus/decoder.h | 43 +++--- drivers/ub/ubus/omm.h | 32 ----- drivers/ub/ubus/resource.c | 1 - drivers/ub/ubus/ubus_controller.h | 9 ++ drivers/ub/ubus/vendor/hisilicon/Makefile | 2 +- drivers/ub/ubus/vendor/hisilicon/controller.c | 14 +- .../hisilicon/hisi-decoder.c} | 128 +++++++++++++----- .../ub/ubus/vendor/hisilicon/hisi-decoder.h | 50 +++++++ drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h | 2 - 12 files changed, 232 insertions(+), 170 deletions(-) delete mode 100644 drivers/ub/ubus/omm.h rename drivers/ub/ubus/{omm.c => vendor/hisilicon/hisi-decoder.c} (86%) create mode 100644 drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h diff --git a/drivers/ub/ubfi/irq.c b/drivers/ub/ubfi/irq.c index d47e4ce67bd6..846af8d6c5f1 100644 --- a/drivers/ub/ubfi/irq.c +++ b/drivers/ub/ubfi/irq.c @@ -61,8 +61,10 @@ int ubrt_register_gsi(u32 hwirq, int trigger, int polarity, const char *name, res->start = irq; res->end = irq; res->flags = IORESOURCE_IRQ; -#endif return 0; +#else + return -EINVAL; +#endif } EXPORT_SYMBOL_GPL(ubrt_register_gsi); diff --git a/drivers/ub/ubus/Makefile b/drivers/ub/ubus/Makefile index 59505977dd2f..96efd80d502f 100644 --- a/drivers/ub/ubus/Makefile +++ b/drivers/ub/ubus/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_UB_UBUS) += ub-driver.o controller.o config.o entity.o ras.o obj-$(CONFIG_UB_UBUS) += msi/ ubus-y := ubus_driver.o sysfs.o ubus_controller.o msg.o ubus_config.o port.o cc.o eid.o cna.o route.o -ubus-y += enum.o resource.o ubus_entity.o reset.o cap.o interrupt.o decoder.o omm.o ioctl.o eu.o link.o +ubus-y += enum.o resource.o ubus_entity.o reset.o cap.o interrupt.o decoder.o ioctl.o eu.o link.o ubus-y += instance.o pool.o memory.o ubus-y += services/ras.o services/service.o services/gucd.o diff --git a/drivers/ub/ubus/decoder.c b/drivers/ub/ubus/decoder.c index f5a8e69fd1cf..288e33a96038 100644 --- a/drivers/ub/ubus/decoder.c +++ b/drivers/ub/ubus/decoder.c @@ -13,7 +13,6 @@ #include "ubus.h" #include "ubus_controller.h" -#include "omm.h" #include "decoder.h" #define MMIO_SIZE_MASK GENMASK_ULL(18, 16) @@ -33,8 +32,6 @@ #define EVTQ_ENABLE 0x1 #define EVT_ENTRY_SIZE 16 -#define DECODER_PAGE_TABLE_ENTRY_SIZE 8 - #define DECODER_QUEUE_TIMEOUT_US 1000000 /* 1s */ static void ub_decoder_uninit_queue(struct ub_decoder *decoder) @@ -170,68 +167,24 @@ static u32 ub_decoder_device_set(struct ub_decoder *decoder) return ret; } -static int ub_decoder_create_page_table(struct ub_decoder *decoder) +static int ub_decoder_create_page_table(struct ub_bus_controller *ubc, + struct ub_decoder *decoder) { - struct page_table_desc *invalid_desc = &decoder->invalid_desc; - struct ub_entity *uent = decoder->uent; - struct page_table *pgtlb; - void *pgtlb_base; - size_t size; - - size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; - pgtlb = &decoder->pgtlb; - pgtlb_base = dmam_alloc_coherent(decoder->dev, size, - &pgtlb->pgtlb_dma, GFP_KERNEL); - if (!pgtlb_base) { - ub_err(uent, "allocate ub decoder page table fail\n"); - return -ENOMEM; - } - pgtlb->pgtlb_base = pgtlb_base; - - size = sizeof(*pgtlb->desc_base) * DECODER_PAGE_TABLE_SIZE; - pgtlb->desc_base = kzalloc(size, GFP_KERNEL); - if (!pgtlb->desc_base) { - ub_err(uent, "allocate ub decoder page table desc fail\n"); - goto release_pgtlb; - } - - invalid_desc->page_base = dmam_alloc_coherent(decoder->dev, - RANGE_TABLE_PAGE_SIZE, - &invalid_desc->page_dma, - GFP_KERNEL); - if (!invalid_desc->page_base) { - ub_err(uent, "decoder alloc free page fail\n"); - goto release_desc; - } - decoder->invalid_page_dma = (invalid_desc->page_dma & - DECODER_PGTBL_PGPRT_MASK) >> - DECODER_DMA_PAGE_ADDR_OFFSET; - - ub_decoder_init_page_table(decoder, pgtlb_base); + if (ubc->ops->create_decoder_table) + return ubc->ops->create_decoder_table(decoder); - return 0; - -release_desc: - kfree(pgtlb->desc_base); - pgtlb->desc_base = NULL; -release_pgtlb: - size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; - dmam_free_coherent(decoder->dev, size, pgtlb_base, pgtlb->pgtlb_dma); - return -ENOMEM; + ub_err(decoder->uent, "ub bus controller can't create decoder table\n"); + return -EPERM; } -static void ub_decoder_free_page_table(struct ub_decoder *decoder) +static void ub_decoder_free_page_table(struct ub_bus_controller *ubc, + struct ub_decoder *decoder) { - struct page_table_desc *invalid_desc = &decoder->invalid_desc; - size_t size; - - dmam_free_coherent(decoder->dev, RANGE_TABLE_PAGE_SIZE, - invalid_desc->page_base, invalid_desc->page_dma); - kfree(decoder->pgtlb.desc_base); - - size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; - dmam_free_coherent(decoder->dev, size, decoder->pgtlb.pgtlb_base, - decoder->pgtlb.pgtlb_dma); + if (ubc->ops->free_decoder_table) + ubc->ops->free_decoder_table(decoder); + else + ub_err(decoder->uent, + "ub bus controller can't free decoder table\n"); } static void ub_get_decoder_mmio_base(struct ub_bus_controller *ubc, @@ -302,7 +255,7 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) if (ret) goto release_decoder; - ret = ub_decoder_create_page_table(decoder); + ret = ub_decoder_create_page_table(ubc, decoder); if (ret) { ub_err(uent, "decoder create page table failed\n"); goto release_queue; @@ -321,7 +274,7 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) return ret; release_page_table: - ub_decoder_free_page_table(decoder); + ub_decoder_free_page_table(ubc, decoder); release_queue: ub_decoder_uninit_queue(decoder); release_decoder: @@ -397,7 +350,7 @@ static void ub_remove_decoder(struct ub_bus_controller *ubc) ub_decoder_device_unset(decoder); - ub_decoder_free_page_table(decoder); + ub_decoder_free_page_table(ubc, decoder); ub_decoder_uninit_queue(decoder); @@ -635,6 +588,7 @@ int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, ret = wait_for_cmdq_notify(decoder); return ret; } +EXPORT_SYMBOL_GPL(ub_decoder_cmd_request); static bool queue_empty(struct ub_decoder_queue *q) { @@ -839,3 +793,38 @@ void ub_decoder_uninit(struct ub_entity *uent) ub_remove_decoder(uent->ubc); } + +int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) +{ + struct ub_bus_controller *ubc; + + if (!decoder) { + pr_err("unmap mmio decoder ptr is null\n"); + return -EINVAL; + } + + ubc = decoder->uent->ubc; + if (!ubc->ops->decoder_unmap) { + pr_err("decoder_unmap ops not exist\n"); + return -EINVAL; + } + return ubc->ops->decoder_unmap(ubc->decoder, addr, size); +} + +int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) +{ + struct ub_bus_controller *ubc; + + if (!decoder || !info) { + pr_err("decoder or map info is null\n"); + return -EINVAL; + } + + ubc = decoder->uent->ubc; + if (!ubc->ops->decoder_map) { + pr_err("decoder_map ops not exist\n"); + return -EINVAL; + } + + return ubc->ops->decoder_map(ubc->decoder, info); +} diff --git a/drivers/ub/ubus/decoder.h b/drivers/ub/ubus/decoder.h index 47710ead71db..37d628dc45e2 100644 --- a/drivers/ub/ubus/decoder.h +++ b/drivers/ub/ubus/decoder.h @@ -87,33 +87,20 @@ struct ub_decoder { struct mutex table_lock; }; -#define DECODER_PGTBL_PGPRT_MASK GENMASK_ULL(47, 12) -#define DECODER_DMA_PAGE_ADDR_OFFSET 12 - -#define PGTLB_CACHE_IR_NC 0b00 -#define PGTLB_CACHE_IR_WBRA 0b01 -#define PGTLB_CACHE_IR_WT 0b10 -#define PGTLB_CACHE_IR_WB 0b11 -#define PGTLB_CACHE_OR_NC 0b0000 -#define PGTLB_CACHE_OR_WBRA 0b0100 -#define PGTLB_CACHE_OR_WT 0b1000 -#define PGTLB_CACHE_OR_WB 0b1100 -#define PGTLB_CACHE_SH_NSH 0b000000 -#define PGTLB_CACHE_SH_OSH 0b100000 -#define PGTLB_CACHE_SH_ISH 0b110000 - -#define PGTLB_ATTR_DEFAULT (PGTLB_CACHE_IR_WBRA | \ - PGTLB_CACHE_OR_WBRA | \ - PGTLB_CACHE_SH_ISH) - -#define RGTLB_TO_PGTLB 8 -#define DECODER_PAGE_ENTRY_SIZE 64 -#define DECODER_PAGE_SIZE (1 << 12) -#define DECODER_PAGE_TABLE_SIZE (1 << 12) -#define PAGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * DECODER_PAGE_SIZE) -#define RANGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * \ - DECODER_PAGE_SIZE * \ - RGTLB_TO_PGTLB) +struct decoder_map_info { + phys_addr_t pa; + phys_addr_t uba; + u64 size; + u32 tpg_num; + u8 order_id; + u8 order_type; + u64 eid_low; + u64 eid_high; + u32 token_id; + u32 token_value; + u32 upi; + u32 src_eid; +}; void ub_decoder_init(struct ub_entity *uent); void ub_decoder_uninit(struct ub_entity *uent); @@ -121,4 +108,6 @@ void ub_init_decoder_usi(struct ub_entity *uent); void ub_uninit_decoder_usi(struct ub_entity *uent); int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, u64 size, enum ub_cmd_op_type op); +int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); +int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); #endif /* __DECODER_H__ */ diff --git a/drivers/ub/ubus/omm.h b/drivers/ub/ubus/omm.h deleted file mode 100644 index e90ce9cb1f7f..000000000000 --- a/drivers/ub/ubus/omm.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) HiSilicon Technologies Co., Ltd. 2025. All rights reserved. - */ - -#ifndef __OMM_H__ -#define __OMM_H__ - -#include - -extern u8 ubc_feature; - -struct decoder_map_info { - phys_addr_t pa; - phys_addr_t uba; - u64 size; - u32 tpg_num; - u8 order_id; - u8 order_type; - u64 eid_low; - u64 eid_high; - u32 token_id; - u32 token_value; - u32 upi; - u32 src_eid; -}; - -void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_base); -int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); -int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); - -#endif /* __OMM_H__ */ diff --git a/drivers/ub/ubus/resource.c b/drivers/ub/ubus/resource.c index d4516d672bb5..6e8ceeb9fa93 100644 --- a/drivers/ub/ubus/resource.c +++ b/drivers/ub/ubus/resource.c @@ -12,7 +12,6 @@ #include "ubus.h" #include "msg.h" #include "decoder.h" -#include "omm.h" #include "resource.h" struct query_token_msg_pld_req { diff --git a/drivers/ub/ubus/ubus_controller.h b/drivers/ub/ubus/ubus_controller.h index 4b3c7a74a414..04eb4a3d7648 100644 --- a/drivers/ub/ubus/ubus_controller.h +++ b/drivers/ub/ubus/ubus_controller.h @@ -6,6 +6,9 @@ #ifndef __UBUS_CONTROLLER_H__ #define __UBUS_CONTROLLER_H__ +#include +#include "decoder.h" + struct ub_bus_controller; struct ub_bus_controller_ops { int (*eu_table_init)(struct ub_bus_controller *ubc); @@ -18,6 +21,12 @@ struct ub_bus_controller_ops { void (*register_decoder_base_addr)(struct ub_bus_controller *ubc, u64 *cmd_queue, u64 *event_queue); int (*entity_enable)(struct ub_entity *uent, u8 enable); + int (*create_decoder_table)(struct ub_decoder *decoder); + void (*free_decoder_table)(struct ub_decoder *decoder); + int (*decoder_map)(struct ub_decoder *decoder, + struct decoder_map_info *info); + int (*decoder_unmap)(struct ub_decoder *decoder, phys_addr_t addr, + u64 size); KABI_RESERVE(1) KABI_RESERVE(2) diff --git a/drivers/ub/ubus/vendor/hisilicon/Makefile b/drivers/ub/ubus/vendor/hisilicon/Makefile index 998c0e09aeef..fec1dbe15796 100644 --- a/drivers/ub/ubus/vendor/hisilicon/Makefile +++ b/drivers/ub/ubus/vendor/hisilicon/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ hisi_ubus-objs := hisi-ubus.o controller.o vdm.o local-ras.o msg.o msg-core.o -hisi_ubus-objs += msg-debugfs.o eu-table.o memory.o +hisi_ubus-objs += msg-debugfs.o eu-table.o memory.o hisi-decoder.o obj-$(CONFIG_UB_HISI_UBUS) += hisi_ubus.o diff --git a/drivers/ub/ubus/vendor/hisilicon/controller.c b/drivers/ub/ubus/vendor/hisilicon/controller.c index d7ea5c118d32..6c9c8e320479 100644 --- a/drivers/ub/ubus/vendor/hisilicon/controller.c +++ b/drivers/ub/ubus/vendor/hisilicon/controller.c @@ -10,6 +10,7 @@ #include #include "../../ubus_controller.h" +#include "hisi-decoder.h" #include "hisi-ubus.h" #include "hisi-msg.h" @@ -23,17 +24,12 @@ static struct ub_bus_controller_ops hi_ubc_ops = { .unregister_ubmem_irq = hi_unregister_ubmem_irq, .register_decoder_base_addr = hi_register_decoder_base_addr, .entity_enable = hi_send_entity_enable_msg, + .create_decoder_table = hi_create_decoder_table, + .free_decoder_table = hi_free_decoder_table, + .decoder_map = hi_decoder_map, + .decoder_unmap = hi_decoder_unmap, }; -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, u64 *cmd_queue, - u64 *event_queue) -{ - struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; - - *cmd_queue = data->io_decoder_cmdq; - *event_queue = data->io_decoder_evtq; -} - static void ub_bus_controller_debugfs_init(struct ub_bus_controller *ubc) { if (!debugfs_initialized()) diff --git a/drivers/ub/ubus/omm.c b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c similarity index 86% rename from drivers/ub/ubus/omm.c rename to drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c index b8b59a9da4f1..ac1fa0498ffc 100644 --- a/drivers/ub/ubus/omm.c +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c @@ -3,11 +3,13 @@ * Copyright (c) HiSilicon Technologies Co., Ltd. 2025. All rights reserved. */ -#define pr_fmt(fmt) "ubus omm: " fmt +#define pr_fmt(fmt) "ubus hisi decoder: " fmt #include -#include "decoder.h" -#include "omm.h" +#include +#include "../../ubus.h" +#include "hisi-ubus.h" +#include "hisi-decoder.h" enum entry_type { INVALID_ENTRY = 0x0, @@ -95,6 +97,7 @@ struct range_table_entry { u64 reserve11 : 12; }; +#define DECODER_PAGE_TABLE_ENTRY_SIZE 8 #define UBA_ADDR_OFFSET 12 #define DECODER_PAGE_INDEX_LOC 20 @@ -107,6 +110,11 @@ struct range_table_entry { #define DECODER_RGTLB_ADDRESS_MASK GENMASK_ULL(34, 20) #define DECODER_RGTLB_ADDRESS_OFFSET 20 #define TOKEN_VALID_MASK GENMASK(0, 0) +#define MEM_LMT_MAX 0x7FFF +#define RANGE_UBA_LOW_MASK GENMASK_ULL(34, 20) +#define RANGE_UBA_HIGH_MASK GENMASK_ULL(63, 35) +#define UBA_CARRY 0x800000000 +#define UBA_NOCARRY 0x0 #define get_pgtlb_idx(decoder, pa) ((((pa) - (decoder)->mmio_base_addr) & \ DECODER_PAGE_TABLE_MASK) >> \ @@ -485,12 +493,6 @@ static int handle_page_table(struct ub_decoder *decoder, u64 *offset, return ret; } -#define MEM_LMT_MAX 0x7FFF -#define RANGE_UBA_LOW_MASK GENMASK_ULL(34, 20) -#define RANGE_UBA_HIGH_MASK GENMASK_ULL(63, 35) -#define UBA_CARRY 0x800000000 -#define UBA_NOCARRY 0x0 - static void fill_range_table(struct ub_decoder *decoder, struct range_table_entry *rg_entry, struct decoder_map_info *info, u64 *offset) @@ -593,7 +595,7 @@ static int handle_table(struct ub_decoder *decoder, ret); /* if it is map operation, revert it. unmap operation can't revert */ if (is_map) - (void)ub_decoder_unmap(decoder, info->pa, + (void)hi_decoder_unmap(decoder, info->pa, rollback_size); break; } @@ -601,7 +603,88 @@ static int handle_table(struct ub_decoder *decoder, return ret; } -int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) +static void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_base) +{ + struct page_table_entry *pgtlb_entry; + int i; + + for (i = 0; i < DECODER_PAGE_TABLE_SIZE; i++) { + pgtlb_entry = (struct page_table_entry *)pgtlb_base + i; + pgtlb_entry->entry_type = PAGE_TABLE; + pgtlb_entry->next_lv_addr = decoder->invalid_page_dma; + pgtlb_entry->pgtlb_attr = PGTLB_ATTR_DEFAULT; + } +} + +void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, + u64 *cmd_queue, u64 *event_queue) +{ + struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; + + *cmd_queue = data->io_decoder_cmdq; + *event_queue = data->io_decoder_evtq; +} + +int hi_create_decoder_table(struct ub_decoder *decoder) +{ + struct page_table_desc *invalid_desc = &decoder->invalid_desc; + struct page_table *pgtlb; + void *pgtlb_base; + size_t size; + + size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; + pgtlb = &decoder->pgtlb; + pgtlb_base = dmam_alloc_coherent(decoder->dev, size, + &pgtlb->pgtlb_dma, GFP_KERNEL); + if (!pgtlb_base) + return -ENOMEM; + + pgtlb->pgtlb_base = pgtlb_base; + + size = sizeof(*pgtlb->desc_base) * DECODER_PAGE_TABLE_SIZE; + pgtlb->desc_base = kzalloc(size, GFP_KERNEL); + if (!pgtlb->desc_base) + goto release_pgtlb; + + invalid_desc->page_base = dmam_alloc_coherent(decoder->dev, + RANGE_TABLE_PAGE_SIZE, + &invalid_desc->page_dma, + GFP_KERNEL); + if (!invalid_desc->page_base) + goto release_desc; + + decoder->invalid_page_dma = (invalid_desc->page_dma & + DECODER_PGTBL_PGPRT_MASK) >> + DECODER_DMA_PAGE_ADDR_OFFSET; + + ub_decoder_init_page_table(decoder, pgtlb_base); + + return 0; + +release_desc: + kfree(pgtlb->desc_base); + pgtlb->desc_base = NULL; +release_pgtlb: + size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; + dmam_free_coherent(decoder->dev, size, pgtlb_base, pgtlb->pgtlb_dma); + return -ENOMEM; +} + +void hi_free_decoder_table(struct ub_decoder *decoder) +{ + struct page_table_desc *invalid_desc = &decoder->invalid_desc; + size_t size; + + dmam_free_coherent(decoder->dev, RANGE_TABLE_PAGE_SIZE, + invalid_desc->page_base, invalid_desc->page_dma); + kfree(decoder->pgtlb.desc_base); + + size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; + dmam_free_coherent(decoder->dev, size, decoder->pgtlb.pgtlb_base, + decoder->pgtlb.pgtlb_dma); +} + +int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) { int ret; struct decoder_map_info info = { @@ -609,10 +692,6 @@ int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) .size = size, }; - if (!decoder) { - pr_err("unmap mmio decoder ptr is null\n"); - return -EINVAL; - } if (size < SZ_1M) size = SZ_1M; ret = handle_table(decoder, &info, false); @@ -621,12 +700,8 @@ int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) return ub_decoder_cmd_request(decoder, addr, size, TLBI_PARTIAL); } -int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) +int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) { - if (!decoder || !info) { - pr_err("decoder or map info is null\n"); - return -EINVAL; - } if (info->size < SZ_1M) info->size = SZ_1M; ub_info(decoder->uent, @@ -637,16 +712,3 @@ int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) return handle_table(decoder, info, true); } - -void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_base) -{ - struct page_table_entry *pgtlb_entry; - int i; - - for (i = 0; i < DECODER_PAGE_TABLE_SIZE; i++) { - pgtlb_entry = (struct page_table_entry *)pgtlb_base + i; - pgtlb_entry->entry_type = PAGE_TABLE; - pgtlb_entry->next_lv_addr = decoder->invalid_page_dma; - pgtlb_entry->pgtlb_attr = PGTLB_ATTR_DEFAULT; - } -} diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h new file mode 100644 index 000000000000..50658ef7b9cb --- /dev/null +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) HiSilicon Technologies Co., Ltd. 2025. All rights reserved. + */ + +#ifndef __HISI_DECODER_H__ +#define __HISI_DECODER_H__ + +#include +#include +#include "../../decoder.h" + +#define DECODER_PGTBL_PGPRT_MASK GENMASK_ULL(47, 12) +#define DECODER_DMA_PAGE_ADDR_OFFSET 12 + +#define PGTLB_CACHE_IR_NC 0b00 +#define PGTLB_CACHE_IR_WBRA 0b01 +#define PGTLB_CACHE_IR_WT 0b10 +#define PGTLB_CACHE_IR_WB 0b11 +#define PGTLB_CACHE_OR_NC 0b0000 +#define PGTLB_CACHE_OR_WBRA 0b0100 +#define PGTLB_CACHE_OR_WT 0b1000 +#define PGTLB_CACHE_OR_WB 0b1100 +#define PGTLB_CACHE_SH_NSH 0b000000 +#define PGTLB_CACHE_SH_OSH 0b100000 +#define PGTLB_CACHE_SH_ISH 0b110000 + +#define PGTLB_ATTR_DEFAULT (PGTLB_CACHE_IR_WBRA | \ + PGTLB_CACHE_OR_WBRA | \ + PGTLB_CACHE_SH_ISH) + +#define RGTLB_TO_PGTLB 8 +#define DECODER_PAGE_ENTRY_SIZE 64 +#define DECODER_PAGE_SIZE (1 << 12) +#define DECODER_PAGE_TABLE_SIZE (1 << 12) +#define PAGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * DECODER_PAGE_SIZE) +#define RANGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * \ + DECODER_PAGE_SIZE * \ + RGTLB_TO_PGTLB) + +void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, + u64 *cmd_queue, u64 *event_queue); + +int hi_create_decoder_table(struct ub_decoder *decoder); +void hi_free_decoder_table(struct ub_decoder *decoder); + +int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); +int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); + +#endif /* __HISI_DECODER_H__ */ diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h index 092695e9d43c..92e97c257302 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h @@ -43,8 +43,6 @@ int hi_mem_decoder_create(struct ub_bus_controller *ubc); void hi_mem_decoder_remove(struct ub_bus_controller *ubc); void hi_register_ubmem_irq(struct ub_bus_controller *ubc); void hi_unregister_ubmem_irq(struct ub_bus_controller *ubc); -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, u64 *cmd_queue, - u64 *event_queue); int hi_send_entity_enable_msg(struct ub_entity *uent, u8 enable); int ub_bus_controller_probe(struct ub_bus_controller *ubc); -- Gitee From 1fda81ee2738379993f1fbe6592c4fee6cd39821 Mon Sep 17 00:00:00 2001 From: Junlong Zheng Date: Mon, 1 Dec 2025 15:37:47 +0800 Subject: [PATCH 153/243] ub:ubus: fix bug of msg workqueue null commit 08ec74f94b034b0face4f8f0509bb43e1a1cd3be openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ 1.fix bug of msg workqueue null 2.don't cfg ubc0 route table during cluster mode Signed-off-by: Junlong Zheng Signed-off-by: Shi Yang --- drivers/ub/ubus/msg.c | 12 +++++++----- drivers/ub/ubus/route.c | 4 ++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubus/msg.c b/drivers/ub/ubus/msg.c index 1d1893a8f54d..54f77128ad2f 100644 --- a/drivers/ub/ubus/msg.c +++ b/drivers/ub/ubus/msg.c @@ -162,7 +162,7 @@ struct workqueue_struct *get_rx_msg_wq(u8 msg_code) return rx_msg_wq[msg_code]; } -static bool msg_rx_flag; +static atomic_t msg_rx_flag; int message_rx_init(void) { @@ -186,18 +186,20 @@ int message_rx_init(void) rx_msg_wq[i] = q; } - msg_rx_flag = true; + wmb(); /* Ensure the register is written correctly. */ + atomic_set(&msg_rx_flag, 1); return 0; } void message_rx_uninit(void) { -#define MSG_RX_WAIT_US 1000 +#define MSG_RX_WAIT_US 15000 struct workqueue_struct *q; int i; - msg_rx_flag = false; + atomic_set(&msg_rx_flag, 0); + wmb(); /* Ensure the register is written correctly. */ /* For cpus still handle rx msg in interrupt context */ udelay(MSG_RX_WAIT_US); @@ -297,7 +299,7 @@ int message_rx_handler(struct ub_bus_controller *ubc, void *pkt, u16 len) struct msg_extended_header *msgetah = &header->msgetah; struct ub_rx_msg_task *task; - if (!msg_rx_flag) + if (!atomic_read(&msg_rx_flag)) return -EBUSY; if (len < MSG_PKT_HEADER_SIZE) { diff --git a/drivers/ub/ubus/route.c b/drivers/ub/ubus/route.c index 364bf78d93c3..ef3d462a90ee 100644 --- a/drivers/ub/ubus/route.c +++ b/drivers/ub/ubus/route.c @@ -505,6 +505,10 @@ static void ub_set_route_table_entry(struct ub_entity *uent, u32 dst_cna, if (uent->port_nums == 1) return; + /* In a cluster scenario, do not configure the UBC routing table. */ + if (is_ibus_controller(uent) && uent->ubc->cluster) + return; + pr_info("cna %#x uent set dstcna %#x route\n", uent->cna, dst_cna); for (i = 0; i < EBW(uent->port_nums); i++) { -- Gitee From a01397c05c8ab5dbd376eaa79da24681399f5a5d Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Fri, 14 Nov 2025 15:59:01 +0800 Subject: [PATCH 154/243] vfio:ubus vfio-ub support ub entity enable commit 49e93aebab5dc6a0f1901977aafdf4fca30939b9 openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ vfio-ub support ub entity enable. Signed-off-by: Yahui Liu Signed-off-by: Shi Yang --- drivers/vfio/ubus/vfio_ub_config.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/vfio/ubus/vfio_ub_config.c b/drivers/vfio/ubus/vfio_ub_config.c index aaf398281d9e..0addb2526c16 100644 --- a/drivers/vfio/ubus/vfio_ub_config.c +++ b/drivers/vfio/ubus/vfio_ub_config.c @@ -588,6 +588,9 @@ static int vfio_ub_cfg1_basic_write(struct vfio_ub_core_device *vdev, u64 pos, if (count < 0) return count; + if (pos == UB_ENTITY_RS_ACCESS_EN) + ub_entity_enable(vdev->uent, val & 0x1); + buf = vfio_ub_find_cfg_buf(vdev, UB_CFG1_BASIC_CAP); if (!buf) return -EFAULT; -- Gitee From 1f25b16845bfe8c751b1bddb18876fd18965d1df Mon Sep 17 00:00:00 2001 From: Anzhe Li Date: Sat, 29 Nov 2025 11:53:40 +0800 Subject: [PATCH 155/243] ub:ubus adapt port local ras commit 3764805d000a62144f97c87c2ecee5c918a0801e openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ Adapt port local ras, and add port reset function in cluster mode. Signed-off-by: Anzhe Li Signed-off-by: Shi Yang --- drivers/ub/ubus/link.c | 14 ++-- drivers/ub/ubus/pool.c | 4 +- drivers/ub/ubus/port.c | 79 ++++++++------------ drivers/ub/ubus/port.h | 7 +- drivers/ub/ubus/reset.c | 6 +- drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h | 2 +- drivers/ub/ubus/vendor/hisilicon/local-ras.c | 40 +++++++++- drivers/ub/ubus/vendor/hisilicon/vdm.c | 45 +++++++++++ drivers/ub/ubus/vendor/hisilicon/vdm.h | 11 +++ 9 files changed, 142 insertions(+), 66 deletions(-) diff --git a/drivers/ub/ubus/link.c b/drivers/ub/ubus/link.c index 266d8e828143..001139f3ad63 100644 --- a/drivers/ub/ubus/link.c +++ b/drivers/ub/ubus/link.c @@ -285,13 +285,13 @@ static void port_link_state_change(struct ub_port *port, struct ub_port *r_port) void ublc_link_up_handle(struct ub_port *port) { struct ub_entity *uent = port->uent; - struct ub_port *r_port; struct ub_entity *r_uent; + struct ub_port *r_port; int ret; if (port->r_uent) { - ub_err(uent, "port%u is already up\n", port->index); - return; + ub_warn(uent, "port%u is already up\n", port->index); + goto link_up_notify; } device_lock(&uent->dev); @@ -324,6 +324,8 @@ void ublc_link_up_handle(struct ub_port *port) ub_info(uent, "port%u link up\n", port->index); out: device_unlock(&uent->dev); +link_up_notify: + ub_notify_share_port(port, UB_PORT_EVENT_LINK_UP); } void ublc_link_down_handle(struct ub_port *port) @@ -332,8 +334,8 @@ void ublc_link_down_handle(struct ub_port *port) struct ub_port *r_port; if (!port->r_uent) { - ub_err(uent, "port%u is already down\n", port->index); - return; + ub_warn(uent, "port%u is already down\n", port->index); + goto link_down_notify; } device_lock(&uent->dev); @@ -355,6 +357,8 @@ void ublc_link_down_handle(struct ub_port *port) device_unlock(&uent->dev); ub_info(uent, "port%u link down\n", port->index); +link_down_notify: + ub_notify_share_port(port, UB_PORT_EVENT_LINK_DOWN); } void ub_link_change_handler(struct work_struct *work) diff --git a/drivers/ub/ubus/pool.c b/drivers/ub/ubus/pool.c index e86b19b58f63..414e9dba0c20 100644 --- a/drivers/ub/ubus/pool.c +++ b/drivers/ub/ubus/pool.c @@ -613,9 +613,9 @@ static void ub_port_reset_notify_handler(struct ub_bus_controller *ubc, void *ms port = ubc->uent->ports + pld->port_index; if (port->shareable && port->domain_boundary) { if (pld->type == RESET_PREPARE) - ub_notify_share_port(port, RESET_PREPARE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_PREPARE); else if (pld->type == RESET_DONE) - ub_notify_share_port(port, RESET_DONE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_DONE); } rsp: diff --git a/drivers/ub/ubus/port.c b/drivers/ub/ubus/port.c index 36950c24e344..a8a238df8cc4 100644 --- a/drivers/ub/ubus/port.c +++ b/drivers/ub/ubus/port.c @@ -577,49 +577,46 @@ static DECLARE_RWSEM(ub_share_port_notify_list_rwsem); struct ub_share_port_notify_node { struct ub_entity *parent; - struct ub_entity *idev; + struct ub_entity *entity; u16 port_id; struct ub_share_port_ops *ops; struct list_head node; }; -int ub_register_share_port(struct ub_entity *idev, u16 port_id, +int ub_register_share_port(struct ub_entity *entity, u16 port_id, struct ub_share_port_ops *ops) { struct ub_share_port_notify_node *notify_node; struct ub_entity *parent; struct ub_port *port; - if (unlikely(!idev || !ops)) + if (unlikely(!entity || !ops)) return -EINVAL; - if (!is_idev(idev)) { - ub_err(idev, "don't support non-idev device with type %u register share port\n", - uent_type(idev)); + if (!is_idev(entity) && !is_ibus_controller(entity)) { + ub_err(entity, + "don't support device with type %u register share port\n", + uent_type(entity)); return -EINVAL; } /* get primary entity first */ - parent = idev; - while (!is_primary(parent)) - parent = parent->pue; - - /* check parent is controller */ - parent = to_ub_entity(parent->dev.parent); - if (!is_ibus_controller(parent)) { - ub_err(idev, "don't support register share port at non-controller device with type %u\n", - uent_type(parent)); - return -EINVAL; - } - - if (port_id >= parent->port_nums) { - ub_err(parent, "port id %u exceeds port num %u\n", port_id, - parent->port_nums); - return -EINVAL; + parent = entity; + if (is_idev(parent)) { + while (!is_primary(parent)) + parent = parent->pue; + + /* check parent is controller */ + parent = to_ub_entity(parent->dev.parent); + if (!is_ibus_controller(parent)) { + ub_err(entity, "don't support register share port at non-controller device with type %u\n", + uent_type(parent)); + return -EINVAL; + } } port = parent->ports + port_id; - if (!port->shareable) { + if (is_idev(entity) && !port->shareable) { ub_err(parent, "port%u isn't shareable\n", port_id); return -EINVAL; } @@ -629,7 +626,7 @@ int ub_register_share_port(struct ub_entity *idev, u16 port_id, return -ENOMEM; notify_node->parent = parent; - notify_node->idev = idev; + notify_node->entity = entity; notify_node->port_id = port_id; notify_node->ops = ops; INIT_LIST_HEAD(¬ify_node->node); @@ -638,33 +635,33 @@ int ub_register_share_port(struct ub_entity *idev, u16 port_id, list_add_tail(¬ify_node->node, &ub_share_port_notify_list); up_write(&ub_share_port_notify_list_rwsem); - ub_info(idev, "register share port at %u success\n", port_id); + ub_info(entity, "register share port at %u success\n", port_id); return 0; } EXPORT_SYMBOL_GPL(ub_register_share_port); -void ub_unregister_share_port(struct ub_entity *idev, u16 port_id, +void ub_unregister_share_port(struct ub_entity *entity, u16 port_id, struct ub_share_port_ops *ops) { struct ub_share_port_notify_node *notify_node; - if (unlikely(!idev)) + if (unlikely(!entity)) return; down_write(&ub_share_port_notify_list_rwsem); list_for_each_entry(notify_node, &ub_share_port_notify_list, node) { - if (notify_node->idev != idev || + if (notify_node->entity != entity || notify_node->port_id != port_id || notify_node->ops != ops) continue; list_del(¬ify_node->node); kfree(notify_node); - ub_info(idev, "unregister share port at %u success\n", port_id); + ub_info(entity, "unregister share port at %u success\n", port_id); goto unlock; } - ub_err(idev, "share port %u isn't registered, unregister failed\n", + ub_err(entity, "share port %u isn't registered, unregister failed\n", port_id); unlock: up_write(&ub_share_port_notify_list_rwsem); @@ -672,13 +669,13 @@ void ub_unregister_share_port(struct ub_entity *idev, u16 port_id, EXPORT_SYMBOL_GPL(ub_unregister_share_port); void ub_notify_share_port(struct ub_port *port, - enum ub_share_port_notify_type type) + enum ub_port_event type) { struct ub_share_port_notify_node *notify_node; struct ub_share_port_ops *ops; struct ub_entity *uent; - if (!port || type >= NOTIFY_TYPE_MAX) + if (!port || type > UB_PORT_EVENT_RESET_DONE) return; uent = port->uent; @@ -689,23 +686,13 @@ void ub_notify_share_port(struct ub_port *port, continue; ops = notify_node->ops; - switch (type) { - case RESET_PREPARE: - if (ops->reset_prepare) - ops->reset_prepare(notify_node->idev, - notify_node->port_id); - break; - case RESET_DONE: - if (ops->reset_done) - ops->reset_done(notify_node->idev, - notify_node->port_id); - break; - default: - break; - } + if (ops->event_notify) + ops->event_notify(notify_node->entity, + notify_node->port_id, type); } up_read(&ub_share_port_notify_list_rwsem); } +EXPORT_SYMBOL_GPL(ub_notify_share_port); bool ub_port_check_link_up(struct ub_port *port) { diff --git a/drivers/ub/ubus/port.h b/drivers/ub/ubus/port.h index 0350908c531a..21a2c7d33299 100644 --- a/drivers/ub/ubus/port.h +++ b/drivers/ub/ubus/port.h @@ -8,10 +8,9 @@ #define for_each_uent_port(p, d) \ for ((p) = (d)->ports; ((p) - (d)->ports) < (d)->port_nums; (p)++) -enum ub_share_port_notify_type { +enum ub_port_reset_notify_type { RESET_PREPARE, - RESET_DONE, - NOTIFY_TYPE_MAX + RESET_DONE }; struct ub_port; @@ -24,7 +23,7 @@ void ub_ports_del(struct ub_entity *uent); int ub_ports_setup(struct ub_entity *uent); void ub_ports_unset(struct ub_entity *uent); void ub_notify_share_port(struct ub_port *port, - enum ub_share_port_notify_type type); + enum ub_port_event type); int ub_port_write_dword(struct ub_port *port, u32 pos, u32 val); bool ub_port_check_link_up(struct ub_port *port); diff --git a/drivers/ub/ubus/reset.c b/drivers/ub/ubus/reset.c index 596d848c2c11..51d984d25b8f 100644 --- a/drivers/ub/ubus/reset.c +++ b/drivers/ub/ubus/reset.c @@ -293,8 +293,7 @@ int ub_port_reset(struct ub_entity *dev, int port_id) return -EINVAL; } - if (port->shareable) - ub_notify_share_port(port, RESET_PREPARE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_PREPARE); /* enable port reset */ ret = ub_port_write_dword(port, UB_PORT_RST, 0x01); @@ -310,8 +309,7 @@ int ub_port_reset(struct ub_entity *dev, int port_id) device_unlock(&dev->dev); if (ub_wait_port_complete(port)) { - if (port->shareable) - ub_notify_share_port(port, RESET_DONE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_DONE); port->link_state = LINK_STATE_NORMAL; ub_info(dev, "port(%d) reset success!\n", port_id); return ret; diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h index 92e97c257302..44e5fc8165ba 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h @@ -44,7 +44,7 @@ void hi_mem_decoder_remove(struct ub_bus_controller *ubc); void hi_register_ubmem_irq(struct ub_bus_controller *ubc); void hi_unregister_ubmem_irq(struct ub_bus_controller *ubc); int hi_send_entity_enable_msg(struct ub_entity *uent, u8 enable); - +int hi_send_port_reset_msg(struct ub_entity *uent, u16 port_idx); int ub_bus_controller_probe(struct ub_bus_controller *ubc); void ub_bus_controller_remove(struct ub_bus_controller *ubc); diff --git a/drivers/ub/ubus/vendor/hisilicon/local-ras.c b/drivers/ub/ubus/vendor/hisilicon/local-ras.c index 9dc016555732..cf65f06da93f 100644 --- a/drivers/ub/ubus/vendor/hisilicon/local-ras.c +++ b/drivers/ub/ubus/vendor/hisilicon/local-ras.c @@ -9,7 +9,9 @@ #include "../../ubus.h" #include "../../ubus_driver.h" #include "../../reset.h" +#include "../../port.h" #include "local-ras.h" +#include "hisi-ubus.h" struct sub_module_info { u32 sub_module_id; @@ -197,7 +199,31 @@ static int ubus_port_recover(struct ub_entity *uent, u16 port_id) return 0; } -static int nl_ssu_link_credi_overtime_recover(struct ub_entity *uent, u8 nl_id) +static int ubus_port_recover_cluster(struct ub_entity *uent, u16 port_id) +{ + struct ub_port *port; + int ret; + + if (port_id >= uent->port_nums || uent->ports[port_id].type != PHYSICAL) { + pr_err("port id is over port nums or port type is not physical\n"); + return -EINVAL; + } + + port = uent->ports + port_id; + ub_notify_share_port(port, UB_PORT_EVENT_RESET_PREPARE); + + ret = hi_send_port_reset_msg(uent, port_id); + if (ret) { + pr_err("ub vdm port reset failed, ret:%d\n", ret); + return ret; + } + + ub_notify_share_port(port, UB_PORT_EVENT_RESET_DONE); + + return 0; +} + +static int nl_ssu_link_credi_overtime_recover(struct ub_entity *uent, u8 nl_id, bool cluster) { #define NL_PORTS 2 /* @@ -210,7 +236,10 @@ static int nl_ssu_link_credi_overtime_recover(struct ub_entity *uent, u8 nl_id) for (i = 0; i < NL_PORTS; i++) { port_id += i; - ret = ubus_port_recover(uent, port_id); + if (!cluster) + ret = ubus_port_recover(uent, port_id); + else + ret = ubus_port_recover_cluster(uent, port_id); if (ret) { ub_err(uent, "port[%u] recover failed, ret=%d.\n", port_id, ret); return ret; @@ -230,11 +259,14 @@ static int ubus_recover(struct ub_entity *uent, if (is_nl_local_ras(edata->sub_module_id) && is_nl_ssu_link_credi_overtime_err(edata)) { nl_id = edata->core_id; - return nl_ssu_link_credi_overtime_recover(uent, nl_id); + return nl_ssu_link_credi_overtime_recover(uent, nl_id, uent->ubc->cluster); } port_id = (int)edata->port_id; - return ubus_port_recover(uent, port_id); + if (uent->ubc->cluster) + return ubus_port_recover_cluster(uent, port_id); + else + return ubus_port_recover(uent, port_id); } static void hisi_ubus_handle_error(struct ub_entity *uent, diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.c b/drivers/ub/ubus/vendor/hisilicon/vdm.c index 2d0444a585c9..f95da0843e26 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.c +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.c @@ -537,3 +537,48 @@ int hi_send_entity_enable_msg(struct ub_entity *uent, u8 enable) return 0; } + +int hi_send_port_reset_msg(struct ub_entity *uent, u16 port_idx) +{ + struct port_reset_pld *rst_pld; + struct vdm_msg_pkt pkt = {}; + struct msg_info info = {}; + struct msg_pkt_dw0 *pld_dw0; + u8 status; + int ret; + + if (!uent->ubc->cluster) + return 0; + + ub_msg_pkt_header_init(&pkt.header, uent, VDM_PORT_RESET_PLD_SIZE, + code_gen(UB_MSG_CODE_VDM, UB_VENDOR_MSG, + MSG_REQ), true); + + pkt.guid_high = *(u64 *)(&uent->ubc->uent->guid.dw[SZ_2]); + pld_dw0 = &pkt.pld_dw0; + pld_dw0->opcode = VDM_OPCODE_UB2FM_COMM_MSG; + pld_dw0->sub_opcode = VDM_SUB_OPCODE_PORT_RESET; + rst_pld = &pkt.reset_pld; + rst_pld->port_idx = port_idx; + + message_info_init(&info, uent->ubc->uent, &pkt, &pkt, + (VDM_PORT_RESET_SIZE << MSG_REQ_SIZE_OFFSET) | + VDM_PORT_RESET_SIZE); + + ub_info(uent, "Sync request port reset msg\n"); + + ret = hi_message_sync_request(uent->message->mdev, &info, + pkt.header.msgetah.code); + if (ret) { + ub_err(uent, "msg sync request ret=%d\n", ret); + return ret; + } + + status = pkt.header.msgetah.rsp_status; + if (status != UB_MSG_RSP_SUCCESS) { + ub_err(uent, "msg rsp status=%#02x\n", status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.h b/drivers/ub/ubus/vendor/hisilicon/vdm.h index 183449aa082e..725288d6abac 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.h +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.h @@ -24,6 +24,7 @@ enum vdm_fm2ub_sub_opcode { enum vdm_ub2fm_sub_opcode { VDM_SUB_OPCODE_ENTITY_ENABLE = 0x1, + VDM_SUB_OPCODE_PORT_RESET = 0x2, }; struct msg_pkt_dw0 { @@ -103,6 +104,13 @@ struct idev_ue_rls_pld { }; #define IDEV_UE_RLS_PLD_TOTAL_SIZE 36 +struct port_reset_pld { + /* DW1 */ + u16 rsvd; + u16 port_idx; +}; +#define VDM_PORT_RESET_PLD_SIZE 16 + #define MSG_IDEV_MUE_REG_SIZE \ (MSG_PKT_HEADER_SIZE + IDEV_MUE_REG_PLD_TOTAL_SIZE) #define MSG_IDEV_MUE_RLS_SIZE \ @@ -111,6 +119,8 @@ struct idev_ue_rls_pld { (MSG_PKT_HEADER_SIZE + IDEV_UE_REG_PLD_TOTAL_SIZE) #define MSG_IDEV_UE_RLS_SIZE \ (MSG_PKT_HEADER_SIZE + IDEV_UE_RLS_PLD_TOTAL_SIZE) +#define VDM_PORT_RESET_SIZE \ + (MSG_PKT_HEADER_SIZE + VDM_PORT_RESET_PLD_SIZE) #define VENDOR_GUID_PLD_SIZE 8 @@ -119,6 +129,7 @@ struct vdm_msg_pkt { u64 guid_high; struct msg_pkt_dw0 pld_dw0; union { + struct port_reset_pld reset_pld; struct entity_enable_pld enable_pld; struct idev_pue_reg_pld pd_reg_pld; struct idev_pue_rls_pld pd_rls_pld; -- Gitee From b6a9f279c0fe719d8a7214b4778c856b0797bb17 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Thu, 4 Dec 2025 19:23:42 +0800 Subject: [PATCH 156/243] ub:unic Adaptation of the port reset interface commit 3c85899aa7a42bf9b40a4432bd0eb57e5b92ebab openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID7LHB CVE: NA ---------------------------------------------------------------- The ubus port reset interface is changed. Therefore, the port reset interface is adapted. Signed-off-by: Yixi Shen Signed-off-by: Xiongchuan Zhou Signed-off-by: Shi Yang --- drivers/ub/ubase/ubase_ubus.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/ub/ubase/ubase_ubus.c b/drivers/ub/ubase/ubase_ubus.c index a589915cd686..a0351b283890 100644 --- a/drivers/ub/ubase/ubase_ubus.c +++ b/drivers/ub/ubase/ubase_ubus.c @@ -114,26 +114,22 @@ static void ubase_ubus_uninit(struct ub_entity *ue) ub_entity_enable(ue, 0); } -static void ubase_port_reset_prepare(struct ub_entity *ue, u16 port_id) +static void ubase_port_event_notify(struct ub_entity *ue, u16 port_id, int event) { struct ubase_dev *udev = dev_get_drvdata(&ue->dev); - ubase_info(udev, "port %u reset prepare.\n", port_id); - ubase_port_down(udev); -} - -static void ubase_port_reset_done(struct ub_entity *ue, u16 port_id) -{ - struct ubase_dev *udev = dev_get_drvdata(&ue->dev); - - ubase_port_up(udev); - ubase_info(udev, "port %u reset done.\n", port_id); - udev->reset_stat.port_reset_cnt++; + if (event == UB_PORT_EVENT_RESET_PREPARE) { + ubase_info(udev, "port %u reset prepare.\n", port_id); + ubase_port_down(udev); + } else if (event == UB_PORT_EVENT_RESET_DONE) { + ubase_port_up(udev); + ubase_info(udev, "port %u reset done.\n", port_id); + udev->reset_stat.port_reset_cnt++; + } } static struct ub_share_port_ops ubase_share_port_ops = { - .reset_prepare = ubase_port_reset_prepare, - .reset_done = ubase_port_reset_done + .event_notify = ubase_port_event_notify }; static int ubase_ubus_reg_share_port(struct ubase_dev *udev) -- Gitee From 4f1e99063938fffc3f450df8ce086688fb4580bf Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 25 Nov 2025 19:27:05 +0800 Subject: [PATCH 157/243] ub:ubus: add active_mutex in ub_entity commit f21f7d173065acedec14639e7522420e62baefde openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------ Add mutex to the entity structure. Signed-off-by: Yiyu Liu Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/ubus_entity.c | 25 ++++++++++++++++--------- include/ub/ubus/ubus.h | 12 +++++++----- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index 6031469a27b9..4fa1a8533e71 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -53,6 +53,8 @@ struct ub_entity *ub_alloc_ent(void) INIT_LIST_HEAD(&uent->slot_list); INIT_LIST_HEAD(&uent->instance_node); + mutex_init(&uent->active_mutex); + uent->dev.type = &ub_dev_type; uent->cna = 0; uent->tid = 0; /* default tid according to ummu */ @@ -971,12 +973,17 @@ void ub_entity_enable(struct ub_entity *uent, u8 enable) ub_cfg_write_byte(uent, UB_BUS_ACCESS_EN, enable); ub_cfg_write_byte(uent, UB_ENTITY_RS_ACCESS_EN, enable); - if (!enable && !ub_entity_test_priv_flag(uent, UB_ENTITY_ACTIVE)) + mutex_lock(&uent->active_mutex); + + if (!enable && !ub_entity_test_priv_flag(uent, UB_ENTITY_ACTIVE)) { + mutex_unlock(&uent->active_mutex); return; + } if (uent->ubc && uent->ubc->ops && uent->ubc->ops->entity_enable) { ret = uent->ubc->ops->entity_enable(uent, enable); if (ret) { + mutex_unlock(&uent->active_mutex); ub_err(uent, "entity enable, ret=%d, enable=%u\n", ret, enable); return; @@ -989,6 +996,8 @@ void ub_entity_enable(struct ub_entity *uent, u8 enable) ub_entity_assign_priv_flag(uent, UB_ENTITY_ACTIVE, true); else ub_entity_assign_priv_flag(uent, UB_ENTITY_ACTIVE, false); + + mutex_unlock(&uent->active_mutex); } EXPORT_SYMBOL_GPL(ub_entity_enable); @@ -1074,12 +1083,11 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx) return -EINVAL; } - if (!device_trylock(&target_dev->dev)) - return -EBUSY; + mutex_lock(&target_dev->active_mutex); if (ub_entity_test_priv_flag(target_dev, UB_ENTITY_ACTIVE)) { ub_warn(uent, "entity_idx[%u] is already in normal state\n", entity_idx); - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return 0; } @@ -1091,7 +1099,7 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx) ub_info(uent, "udrv activate entity_idx[%u] success\n", entity_idx); } - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return ret; } EXPORT_SYMBOL_GPL(ub_activate_entity); @@ -1115,12 +1123,11 @@ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx) return -EINVAL; } - if (!device_trylock(&target_dev->dev)) - return -EBUSY; + mutex_lock(&target_dev->active_mutex); if (!ub_entity_test_priv_flag(target_dev, UB_ENTITY_ACTIVE)) { ub_warn(uent, "entity_idx[%u] is already in disable state\n", entity_idx); - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return 0; } @@ -1132,7 +1139,7 @@ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx) ub_info(uent, "udrv deactivate entity_idx[%u] success\n", entity_idx); } - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return ret; } EXPORT_SYMBOL_GPL(ub_deactivate_entity); diff --git a/include/ub/ubus/ubus.h b/include/ub/ubus/ubus.h index ca3ba63c226a..6752ead41e64 100644 --- a/include/ub/ubus/ubus.h +++ b/include/ub/ubus/ubus.h @@ -263,6 +263,8 @@ struct ub_entity { u32 user_eid; struct ub_eu_table *eu_table; + struct mutex active_mutex; + u32 support_feature; u16 upi; @@ -568,7 +570,7 @@ struct ub_entity *ub_get_entity(unsigned int vendor, unsigned int entity, * * Enable or disable the communication channel between entity and user host. * - * Context: Any context. + * Context: Any context, it will take mutex_lock()/mutex_unlock(). */ void ub_entity_enable(struct ub_entity *uent, u8 enable); @@ -883,9 +885,9 @@ const struct cpumask *ub_irq_get_affinity(struct ub_entity *uent, int nr); * @uent: UB entity. * @entity_idx: Number of the entity to be activated. * - * Context: Any context, it will take device_trylock()/device_unlock() + * Context: Any context, it will take mutex_lock()/mutex_unlock() * Return: 0 if success, or %-EINVAL if the device doesn't match the driver, - * or %-EBUSY if can't get device_trylock(), or other failed negative values. + * or other failed negative values. */ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx); @@ -894,9 +896,9 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx); * @uent: UB entity. * @entity_idx: Number of the entity to be deactivated. * - * Context: Any context, it will take device_trylock()/device_unlock() + * Context: Any context, it will take mutex_lock()/mutex_unlock() * Return: 0 if success, or %-EINVAL if the entity doesn't match the driver, - * or %-EBUSY if can't get device_trylock(), or other failed negative values. + * or other failed negative values. */ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx); -- Gitee From c9594963e8c15fb4b7166927c44cee416590902b Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 9 Dec 2025 09:56:40 +0800 Subject: [PATCH 158/243] ub:hisi-ubus: Fix ue reg/unreg without lock bug commit d6926f5c7ee4c3f9dda78762a3f59a5afa867963 openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------ Currently, UE registration and deregistration are not performed under the protection of the MUE device lock. When the MUE performs a reset, there is a risk of concurrent resource conflicts. Fixes: 86fec00cb73a ("ub:hisi-ubus: Support UBUS vdm entity enable message") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/vendor/hisilicon/vdm.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.c b/drivers/ub/ubus/vendor/hisilicon/vdm.c index f95da0843e26..4a19e0fb8d57 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.c +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.c @@ -14,8 +14,6 @@ #include "hisi-ubus.h" #include "vdm.h" -static DEFINE_SPINLOCK(ub_vdm_lock); - struct opcode_func_map { u16 sub_opcode; u16 idev_pkt_size; @@ -274,6 +272,7 @@ static u8 ub_idevice_ue_add_handler(struct ub_bus_controller *ubc, struct vdm_ms struct ub_entity *pue, *alloc_dev = NULL; u16 ue_entity_idx = pld->ue_entity_idx; int start_idx, end_idx, ret; + int lock = 0; u8 status; /* check whether pue is registered. */ @@ -306,10 +305,13 @@ static u8 ub_idevice_ue_add_handler(struct ub_bus_controller *ubc, struct vdm_ms goto ue_reg_rsp; } - spin_lock(&ub_vdm_lock); - ret = ub_idevice_enable_handle(pue, ue_entity_idx, 0, NULL, &alloc_dev); - spin_unlock(&ub_vdm_lock); + lock = device_trylock(&pue->dev); + if (!lock) { + status = UB_MSG_RSP_EXEC_EBUSY; + goto ue_reg_rsp; + } + ret = ub_idevice_enable_handle(pue, ue_entity_idx, 0, NULL, &alloc_dev); if (ret == 0) { alloc_dev->user_eid = pld->user_eid[0]; ub_info(pue, "enable idev ue succeeded, user_eid=0x%x\n", @@ -331,6 +333,9 @@ static u8 ub_idevice_ue_add_handler(struct ub_bus_controller *ubc, struct vdm_ms pue->num_ues += 1; } + if (lock) + device_unlock(&pue->dev); + return status; } @@ -346,6 +351,7 @@ static u8 ub_idevice_ue_rls_handler(struct ub_bus_controller *ubc, struct vdm_ms struct ub_entity *pue, *vd_dev, *tmp; u16 ue_entity_idx = pld->ue_entity_idx; u16 start_idx, end_idx; + int lock = 0; u8 status; /* search for pue with guid. Return an error if pue does not exist */ @@ -372,6 +378,12 @@ static u8 ub_idevice_ue_rls_handler(struct ub_bus_controller *ubc, struct vdm_ms "The pue of this vdm ue to be disabled is normal\n"); } + lock = device_trylock(&pue->dev); + if (!lock) { + status = UB_MSG_RSP_EXEC_EBUSY; + goto ue_rls_rsp; + } + status = UB_MSG_RSP_EXEC_ENODEV; /* otherwise, delete this ue with ue idx in message payload */ list_for_each_entry_safe(vd_dev, tmp, &pue->ue_list, node) @@ -389,6 +401,9 @@ static u8 ub_idevice_ue_rls_handler(struct ub_bus_controller *ubc, struct vdm_ms pue->num_ues -= 1; } + if (lock) + device_unlock(&pue->dev); + return status; } -- Gitee From a9bce4fbf7a5b655964b1f2721b0f7cf4a77a321 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 9 Dec 2025 10:29:25 +0800 Subject: [PATCH 159/243] ub:ubus: Delete ubc cfg0 config during cluster mode commit 3ee6b2906e902998d3bcb476bd812e9d4db8c738 openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- In cluster mode, the management authority of UBC is primarily held by UBFM. Ubus cannot configure the configuration space of cfg0 except for the vport configuration space. Fixes: 280895301d3b ("ub:ubus: Support Ubus read/write configuration functions") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/enum.c | 4 +++ drivers/ub/ubus/eu.c | 5 +++- drivers/ub/ubus/instance.c | 12 +++++---- drivers/ub/ubus/pool.c | 7 +++++ drivers/ub/ubus/port.c | 4 +++ drivers/ub/ubus/services/gucd.c | 29 ++++----------------- drivers/ub/ubus/ubus_entity.c | 22 +++++++++------- drivers/ub/ubus/vendor/hisilicon/eu-table.c | 6 +++++ include/uapi/ub/ubus/ubus_regs.h | 5 ---- 9 files changed, 49 insertions(+), 45 deletions(-) diff --git a/drivers/ub/ubus/enum.c b/drivers/ub/ubus/enum.c index 48c37e30ca31..67a87f9f23aa 100644 --- a/drivers/ub/ubus/enum.c +++ b/drivers/ub/ubus/enum.c @@ -1430,6 +1430,10 @@ int ub_enum_entities_active(struct list_head *dev_list) list_del(&uent->node); ub_entity_add(uent, uent->ubc); + + if (is_ibus_controller(uent) && uent->ubc->cluster) + continue; + ub_start_ent(uent); } diff --git a/drivers/ub/ubus/eu.c b/drivers/ub/ubus/eu.c index 97e040eadd5e..5c99d44918e5 100644 --- a/drivers/ub/ubus/eu.c +++ b/drivers/ub/ubus/eu.c @@ -93,7 +93,7 @@ void ub_eu_table_init(struct ub_entity *uent) struct ub_bus_controller *ubc = uent->ubc; int ret; - if (!is_ibus_controller(uent)) + if (!is_ibus_controller(uent) || ubc->cluster) return; ret = ub_eu_table_init_common(uent); @@ -137,6 +137,9 @@ int ub_cfg_eu_table(struct ub_bus_controller *ubc, bool flag, u32 eid, u16 upi) struct ub_bus_controller_ops *ops = ubc->ops; int ret; + if (ubc->cluster) + return 0; + if (!ops || !ops->eu_cfg) return -ENODEV; diff --git a/drivers/ub/ubus/instance.c b/drivers/ub/ubus/instance.c index 8c49c04388e3..342fc9960ef2 100644 --- a/drivers/ub/ubus/instance.c +++ b/drivers/ub/ubus/instance.c @@ -915,15 +915,17 @@ int ub_ioctl_bus_instance_unbind(void __user *uptr) int ub_default_bus_instance_init(struct ub_entity *uent) { - bool m_idev = is_p_idevice(uent); - bool fad = is_p_device(uent); struct ub_bus_instance *bi; + bool use_cluster; int ret; if (is_switch(uent)) return 0; - if (fad || m_idev) { + use_cluster = is_p_device(uent) || is_p_idevice(uent) || + (is_ibus_controller(uent) && uent->ubc->cluster); + + if (use_cluster) { mutex_lock(&dynamic_mutex); bi = ub_find_bus_instance(eid_match, &uent->user_eid); } else { @@ -931,7 +933,7 @@ int ub_default_bus_instance_init(struct ub_entity *uent) } if (!bi) { - if (fad || m_idev) + if (use_cluster) mutex_unlock(&dynamic_mutex); ub_err(uent, "get default bi NULL\n"); return -EINVAL; @@ -941,7 +943,7 @@ int ub_default_bus_instance_init(struct ub_entity *uent) ret = ub_bind_bus_instance(uent, bi); mutex_unlock(&uent->instance_lock); - if (fad || m_idev) { + if (use_cluster) { ub_bus_instance_put(bi); mutex_unlock(&dynamic_mutex); } diff --git a/drivers/ub/ubus/pool.c b/drivers/ub/ubus/pool.c index 414e9dba0c20..4fdd9aca922d 100644 --- a/drivers/ub/ubus/pool.c +++ b/drivers/ub/ubus/pool.c @@ -533,7 +533,14 @@ static void ub_cfg_cpl_notify_handler(struct ub_bus_controller *ubc, void *msg, if (ret) { dev_err(&ubc->dev, "handle notify bi failed, ret=%d\n", ret); rsp_status = err_to_msg_rsp(ret); + goto rsp; + } + + if (!ub_entity_test_priv_flag(ubc->uent, UB_ENTITY_START)) { + ubc->uent->user_eid = notify->eid[0]; + ub_start_ent(ubc->uent); } + rsp: header->msgetah.rsp_status = rsp_status; ub_cfg_cpl_notify_msg_rsp(ubc, header); diff --git a/drivers/ub/ubus/port.c b/drivers/ub/ubus/port.c index a8a238df8cc4..f2ec6e8b9f47 100644 --- a/drivers/ub/ubus/port.c +++ b/drivers/ub/ubus/port.c @@ -353,10 +353,14 @@ static umode_t ub_port_qdlws_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct ub_port *port = to_ub_port(kobj); + struct ub_entity *uent = port->uent; if (port->type == VIRTUAL) return 0; + if (is_ibus_controller(uent) && uent->ubc->cluster) + return 0; + if (test_bit(UB_PORT_CAP15_QDLWS, port->cap_map)) return a->mode; diff --git a/drivers/ub/ubus/services/gucd.c b/drivers/ub/ubus/services/gucd.c index ca5f0a3578e8..a7796bcce6aa 100644 --- a/drivers/ub/ubus/services/gucd.c +++ b/drivers/ub/ubus/services/gucd.c @@ -71,6 +71,9 @@ static int ub_component_service_register(struct ub_entity *uent) int capabilities; int i; + if (is_ibus_controller(uent) && uent->ubc->cluster) + return 0; + /* Get and check component services */ capabilities = get_component_service_capability(uent); if (!capabilities) @@ -91,36 +94,15 @@ static int ub_component_service_register(struct ub_entity *uent) return 0; } -static void ub_enable_err_msq_ctrl(struct ub_entity *uent) -{ - int ret; - - ret = ub_cfg_write_dword(uent, EMQ_CAP_START + UB_CAP_ERR_MSG_QUE_CTL, - UB_CAP_INTERRUPT_GEN_ENA); - if (ret) - ub_err(uent, "enable error msq controller failed\n"); -} - -static void ub_disable_err_msq_ctrl(struct ub_entity *uent) -{ - int ret; - - ret = ub_cfg_write_dword(uent, EMQ_CAP_START + UB_CAP_ERR_MSG_QUE_CTL, - 0x0); - if (ret) - ub_err(uent, "disable error msq controller failed\n"); -} - static void ub_setup_bus_controller(struct ub_entity *uent) { u32 vec_num_max; int usi_count; - if (ub_cc_supported(uent)) + if (ub_cc_supported(uent) && !uent->ubc->cluster) ub_cc_enable(uent); ub_set_user_info(uent); - ub_enable_err_msq_ctrl(uent); vec_num_max = ub_int_type1_vec_count(uent); usi_count = ub_alloc_irq_vectors(uent, vec_num_max, vec_num_max); if (usi_count < 0) { @@ -143,10 +125,9 @@ static void ub_unset_bus_controller(struct ub_entity *uent) ub_mem_uninit_usi(uent); ub_uninit_decoder_usi(uent); ub_disable_intr(uent); - ub_disable_err_msq_ctrl(uent); ub_unset_user_info(uent); - if (ub_cc_supported(uent)) + if (ub_cc_supported(uent) && !uent->ubc->cluster) ub_cc_disable(uent); } diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index 4fa1a8533e71..b43d682ba3d8 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -421,6 +421,11 @@ void ub_entity_add(struct ub_entity *uent, void *ctx) ret = ub_ports_add(uent); WARN_ON(ret); } + + if (is_ibus_controller(uent)) { + ret = ub_static_bus_instance_init(uent->ubc); + WARN_ON(ret); + } } EXPORT_SYMBOL_GPL(ub_entity_add); @@ -432,11 +437,6 @@ void ub_start_ent(struct ub_entity *uent) if (!uent) return; - if (is_ibus_controller(uent)) { - ret = ub_static_bus_instance_init(uent->ubc); - WARN_ON(ret); - } - ret = ub_default_bus_instance_init(uent); WARN_ON(ret); @@ -507,9 +507,6 @@ void ub_stop_ent(struct ub_entity *uent) ub_remove_sysfs_ent_files(uent); ub_default_bus_instance_uninit(uent); - - if (is_ibus_controller(uent)) - ub_static_bus_instance_uninit(uent->ubc); } EXPORT_SYMBOL_GPL(ub_stop_ent); @@ -527,6 +524,9 @@ void ub_remove_ent(struct ub_entity *uent) list_for_each_entry_safe_reverse(ent, tmp, &uent->mue_list, node) ub_remove_ent(ent); + if (is_ibus_controller(uent)) + ub_static_bus_instance_uninit(uent->ubc); + if (is_primary(uent)) ub_ports_del(uent); @@ -1008,7 +1008,8 @@ int ub_set_user_info(struct ub_entity *uent) u32 eid = uent->ubc->uent->eid; - if (is_p_device(uent)) + if (is_p_device(uent) || + (uent->ubc->cluster && is_ibus_controller(uent))) goto cfg1; /* set dsteid to device */ @@ -1033,7 +1034,8 @@ void ub_unset_user_info(struct ub_entity *uent) if (!uent) return; - if (is_p_device(uent)) + if (is_p_device(uent) || + (uent->ubc->cluster && is_ibus_controller(uent))) goto cfg1; ub_cfg_write_dword(uent, UB_UCNA, 0); diff --git a/drivers/ub/ubus/vendor/hisilicon/eu-table.c b/drivers/ub/ubus/vendor/hisilicon/eu-table.c index 6bbdfb0e0bf7..3004093b52d8 100644 --- a/drivers/ub/ubus/vendor/hisilicon/eu-table.c +++ b/drivers/ub/ubus/vendor/hisilicon/eu-table.c @@ -165,12 +165,18 @@ static const struct file_operations hi_eu_table_info_ops = { static void hi_eu_table_debugfs_init(struct ub_bus_controller *ubc) { + if (ubc->cluster) + return; + debugfs_create_file("eu_table", 0600, ubc->debug_root, ubc, &hi_eu_table_info_ops); } static void hi_eu_table_debugfs_uninit(struct ub_bus_controller *ubc) { + if (ubc->cluster) + return; + debugfs_lookup_and_remove("eu_table", ubc->debug_root); } diff --git a/include/uapi/ub/ubus/ubus_regs.h b/include/uapi/ub/ubus/ubus_regs.h index 9eed901fd205..a4fe600f5459 100644 --- a/include/uapi/ub/ubus/ubus_regs.h +++ b/include/uapi/ub/ubus/ubus_regs.h @@ -271,9 +271,4 @@ enum ub_port_cap_id { #define QDLWS_EXEC_STATUS_MASK GENMASK(2, 0) #define QDLWS_EXEC_STATUS_MAX 4 -/* Error Message Queue Capability */ -#define EMQ_CAP_START 0x00001400 -#define UB_CAP_ERR_MSG_QUE_CTL 0x8 -#define UB_CAP_INTERRUPT_GEN_ENA 0x100 - #endif /* _UAPI_UB_UBUS_UBUS_REGS_H_ */ -- Gitee From 834a4f2beb535dd5c527d4d50cbce4ea79718c86 Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Tue, 9 Dec 2025 14:31:20 +0800 Subject: [PATCH 160/243] ub:ubus: Matt and MMIO judgments are not performed in cluster commit 68ce3e1d69c339382cf0b8aa95a7e0957c9ce22d openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- Rollback after setting the decoder register fails. Matt and MMIO judgments are not performed in cluster true Fixes: abc591c50df5 ("ub:ubus: Supports decoder event processing") Signed-off-by: Yuhao Xiang Signed-off-by: Shi Yang --- drivers/ub/ubus/decoder.c | 152 +++++++++++++++++++++++++++----------- drivers/ub/ubus/decoder.h | 1 + 2 files changed, 109 insertions(+), 44 deletions(-) diff --git a/drivers/ub/ubus/decoder.c b/drivers/ub/ubus/decoder.c index 288e33a96038..56d20dbbf0aa 100644 --- a/drivers/ub/ubus/decoder.c +++ b/drivers/ub/ubus/decoder.c @@ -75,28 +75,68 @@ static int ub_decoder_init_queue(struct ub_bus_controller *ubc, static u32 set_mmio_base_reg(struct ub_decoder *decoder) { - u32 ret; + u32 mmio_high = upper_32_bits(decoder->mmio_base_addr); + u32 mmio_low = lower_32_bits(decoder->mmio_base_addr); + struct ub_entity *ent = decoder->uent; + u32 low_bit, high_bit, ret; + + if (!ent->ubc->cluster) { + ret = (u32)ub_cfg_write_dword(ent, DECODER_MMIO_BA0, + 0xffffffff); + ret |= (u32)ub_cfg_write_dword(ent, DECODER_MMIO_BA1, + 0xffffffff); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MMIO_BA0, &low_bit); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MMIO_BA1, &high_bit); + if (ret) { + ub_err(ent, "Failed to access decoder MMIO BA\n"); + return ret; + } + + if ((low_bit | mmio_low) != low_bit || + (high_bit | mmio_high) != high_bit) { + ub_err(ent, "decoder MMIO address does not match HW reg\n"); + return -EINVAL; + } + } ret = (u32)ub_cfg_write_dword(decoder->uent, DECODER_MMIO_BA0, lower_32_bits(decoder->mmio_base_addr)); ret |= (u32)ub_cfg_write_dword(decoder->uent, DECODER_MMIO_BA1, upper_32_bits(decoder->mmio_base_addr)); - if (ret) - ub_err(decoder->uent, "set decoder mmio base failed\n"); return ret; } static u32 set_page_table_reg(struct ub_decoder *decoder) { - u32 ret; + u32 matt_high = upper_32_bits(decoder->pgtlb.pgtlb_dma); + u32 matt_low = lower_32_bits(decoder->pgtlb.pgtlb_dma); + struct ub_entity *ent = decoder->uent; + u32 low_bit, high_bit, ret; + + if (!ent->ubc->cluster) { + ret = (u32)ub_cfg_write_dword(ent, DECODER_MATT_BA0, + 0xffffffff); + ret |= (u32)ub_cfg_write_dword(ent, DECODER_MATT_BA1, + 0xffffffff); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MATT_BA0, &low_bit); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MATT_BA1, &high_bit); + if (ret) { + ub_err(ent, "Failed to access decoder MATT BA\n"); + return ret; + } + + if ((low_bit | matt_low) != low_bit || + (high_bit | matt_high) != high_bit) { + ub_err(ent, "decoder MATT address does not match HW reg\n"); + return -EINVAL; + } + } ret = (u32)ub_cfg_write_dword(decoder->uent, DECODER_MATT_BA0, lower_32_bits(decoder->pgtlb.pgtlb_dma)); ret |= (u32)ub_cfg_write_dword(decoder->uent, DECODER_MATT_BA1, upper_32_bits(decoder->pgtlb.pgtlb_dma)); - if (ret) - ub_err(decoder->uent, "set decoder page table reg failed\n"); return ret; } @@ -145,6 +185,25 @@ static u32 set_queue_reg(struct ub_decoder *decoder) return ret; } +static void unset_queue_reg(struct ub_decoder *decoder) +{ + struct ub_entity *uent = decoder->uent; + u32 ret; + + ret = (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_CFG, + decoder->vals.cmdq_cfg_val); + ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_CFG, + decoder->vals.evtq_cfg_val); + + ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR0, 0); + ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR1, 0); + + ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR0, 0); + ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR1, 0); + if (ret) + ub_err(uent, "unset queue reg fail\n"); +} + static u32 set_decoder_enable(struct ub_decoder *decoder) { u32 ret = (u32)ub_cfg_write_dword(decoder->uent, DECODER_CTRL, 1); @@ -155,6 +214,14 @@ static u32 set_decoder_enable(struct ub_decoder *decoder) return ret; } +static void unset_decoder_enable(struct ub_decoder *decoder) +{ + struct ub_entity *uent = decoder->uent; + + if (ub_cfg_write_dword(uent, DECODER_CTRL, 0)) + ub_err(uent, "unset decoder enable fail\n"); +} + static u32 ub_decoder_device_set(struct ub_decoder *decoder) { u32 ret; @@ -164,6 +231,11 @@ static u32 ub_decoder_device_set(struct ub_decoder *decoder) ret |= set_queue_reg(decoder); ret |= set_decoder_enable(decoder); + if (ret) { + unset_decoder_enable(decoder); + unset_queue_reg(decoder); + } + return ret; } @@ -187,21 +259,26 @@ static void ub_decoder_free_page_table(struct ub_bus_controller *ubc, "ub bus controller can't free decoder table\n"); } -static void ub_get_decoder_mmio_base(struct ub_bus_controller *ubc, +static int ub_get_decoder_mmio_base(struct ub_bus_controller *ubc, struct ub_decoder *decoder) { struct resource_entry *entry; - decoder->mmio_base_addr = -1; resource_list_for_each_entry(entry, &ubc->resources) { if (entry->res->flags == IORESOURCE_MEM && - strstr(entry->res->name, "UB_BUS_CTL") && - entry->res->start < decoder->mmio_base_addr) + strstr(entry->res->name, "UB_BUS_CTL")) { decoder->mmio_base_addr = entry->res->start; + decoder->mmio_end_addr = entry->res->end; + break; + } + } + + if (decoder->mmio_base_addr == 0) { + ub_err(decoder->uent, "get decoder mmio base failed\n"); + return -EINVAL; } - ub_info(decoder->uent, "decoder mmio base is %#llx\n", - decoder->mmio_base_addr); + return 0; } static const char * const mmio_size_desc[] = { @@ -209,15 +286,21 @@ static const char * const mmio_size_desc[] = { "2Tbyte", "4Tbyte", "8Tbyte", "16Tbyte" }; +static const u64 mmio_size[] = { + 128ULL * SZ_1G, 256ULL * SZ_1G, 512ULL * SZ_1G, SZ_1T, + 2 * SZ_1T, 4 * SZ_1T, 8 * SZ_1T, 16 * SZ_1T +}; + static int ub_get_decoder_cap(struct ub_decoder *decoder) { struct ub_entity *uent = decoder->uent; + u64 size; u32 val; int ret; ret = ub_cfg_read_dword(uent, DECODER_CAP, &val); if (ret) { - ub_err(uent, "read decoder cap failed\n"); + ub_err(uent, "read decoder cap fail\n"); return ret; } @@ -225,9 +308,15 @@ static int ub_get_decoder_cap(struct ub_decoder *decoder) decoder->cmdq.qs = (val & CMDQ_SIZE_MASK) >> CMDQ_SIZE_OFFSET; decoder->evtq.qs = (val & EVTQ_SIZE_MASK) >> EVTQ_SIZE_OFFSET; - ub_dbg(uent, "cmdq_queue_size=%u, evtq_queue_size=%u, mmio_size=%s\n", - decoder->cmdq.qs, decoder->evtq.qs, - mmio_size_desc[decoder->mmio_size_sup]); + size = decoder->mmio_end_addr - decoder->mmio_base_addr + 1; + if (size > mmio_size[decoder->mmio_size_sup]) + decoder->mmio_end_addr = decoder->mmio_base_addr + + mmio_size[decoder->mmio_size_sup] - 1; + + ub_info(uent, "decoder mmio_addr[%#llx-%#llx], cmdq_queue_size=%u, evtq_queue_size=%u, mmio_size_sup=%s\n", + decoder->mmio_base_addr, decoder->mmio_end_addr, + decoder->cmdq.qs, decoder->evtq.qs, + mmio_size_desc[decoder->mmio_size_sup]); return 0; } @@ -245,7 +334,9 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) decoder->uent = uent; mutex_init(&decoder->table_lock); - ub_get_decoder_mmio_base(ubc, decoder); + ret = ub_get_decoder_mmio_base(ubc, decoder); + if (ret) + goto release_decoder; ret = ub_get_decoder_cap(decoder); if (ret) @@ -293,25 +384,6 @@ static void unset_mmio_base_reg(struct ub_decoder *decoder) ub_err(uent, "unset mmio base reg failed\n"); } -static void unset_queue_reg(struct ub_decoder *decoder) -{ - struct ub_entity *uent = decoder->uent; - u32 ret; - - ret = (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_CFG, - decoder->vals.cmdq_cfg_val); - ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_CFG, - decoder->vals.evtq_cfg_val); - - ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR0, 0); - ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR1, 0); - - ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR0, 0); - ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR1, 0); - if (ret) - ub_err(uent, "unset queue reg failed\n"); -} - static void unset_page_table_reg(struct ub_decoder *decoder) { struct ub_entity *uent = decoder->uent; @@ -323,14 +395,6 @@ static void unset_page_table_reg(struct ub_decoder *decoder) ub_err(uent, "unset page table reg failed\n"); } -static void unset_decoder_enable(struct ub_decoder *decoder) -{ - struct ub_entity *uent = decoder->uent; - - if (ub_cfg_write_dword(uent, DECODER_CTRL, 0)) - ub_err(uent, "unset decoder enable failed\n"); -} - static void ub_decoder_device_unset(struct ub_decoder *decoder) { unset_decoder_enable(decoder); diff --git a/drivers/ub/ubus/decoder.h b/drivers/ub/ubus/decoder.h index 37d628dc45e2..6667d07e9219 100644 --- a/drivers/ub/ubus/decoder.h +++ b/drivers/ub/ubus/decoder.h @@ -72,6 +72,7 @@ struct ub_decoder { struct device *dev; struct ub_entity *uent; phys_addr_t mmio_base_addr; + phys_addr_t mmio_end_addr; u32 mmio_size_sup; u64 rg_size; struct ub_decoder_queue cmdq; -- Gitee From e70b87cb7724f753adbe497641055b2f48ead60e Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Tue, 9 Dec 2025 20:44:11 +0800 Subject: [PATCH 161/243] ub:ubfi: Fix UBFI memory leak issue commit b41f79594e18b9628b1d8d4e50ac1687c9679ecf openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- UBFI in abnormal branch memory leakage issues and redundant branches Fixes: 312a6b7fabe9 ("ub:ubfi: ubfi driver parse ubc information from ubrt") Signed-off-by: Yuhao Xiang Signed-off-by: Shi Yang --- drivers/ub/ubfi/ub_fi.c | 42 ++++++++++++------------ drivers/ub/ubfi/ub_fi.h | 7 ++-- drivers/ub/ubfi/ubc.c | 35 ++++++++++---------- drivers/ub/ubfi/ubrt.c | 72 +++++++++++++++++++++++++++-------------- drivers/ub/ubfi/ubrt.h | 4 --- 5 files changed, 89 insertions(+), 71 deletions(-) diff --git a/drivers/ub/ubfi/ub_fi.c b/drivers/ub/ubfi/ub_fi.c index 50b359e52b7b..aa8b69f39dd8 100644 --- a/drivers/ub/ubfi/ub_fi.c +++ b/drivers/ub/ubfi/ub_fi.c @@ -15,16 +15,16 @@ #define ACPI_SIG_UBRT "UBRT" /* UB Root Table */ #define UBIOS_INFO_TABLE "linux,ubios-information-table" -enum bios_report_mode bios_mode = UNKNOWN; +enum firmware_report_mode firmware_mode = UNKNOWN; -static void ub_bios_mode_init(void) +static void ub_firmware_mode_init(void) { if (acpi_disabled) - bios_mode = DTS; + firmware_mode = DTS; else - bios_mode = ACPI; + firmware_mode = ACPI; - pr_info("Starting with mode: %d\n", bios_mode); + pr_info("Starting with mode: %d\n", firmware_mode); } static int ubfi_get_acpi_ubrt(void) @@ -34,14 +34,13 @@ static int ubfi_get_acpi_ubrt(void) status = acpi_get_table(ACPI_SIG_UBRT, 0, &header); if (ACPI_FAILURE(status)) { - pr_err("ACPI failed to get UBRT.\n"); if (status != AE_NOT_FOUND) pr_err("ACPI failed msg: %s\n", acpi_format_exception(status)); return -ENODEV; } acpi_table = (struct acpi_table_ubrt *)header; - pr_info("get ubrt by acpi success\n"); + pr_debug("get ubrt by acpi success\n"); return 0; } @@ -65,35 +64,32 @@ static int ubfi_get_dts_ubrt(void) if (!ubios_table) return -ENOMEM; - pr_info("ubfi get ubrt by device tree success\n"); + pr_debug("ubfi get ubrt by device tree success\n"); return 0; } static int ubfi_get_ubrt(void) { - if (bios_mode == ACPI) + if (firmware_mode == ACPI) return ubfi_get_acpi_ubrt(); - else if (bios_mode == DTS) + else return ubfi_get_dts_ubrt(); - return -EINVAL; } static int handle_ubrt(void) { - if (bios_mode == ACPI) + if (firmware_mode == ACPI) return handle_acpi_ubrt(); - else if (bios_mode == DTS) + else return handle_dts_ubrt(); - - return -EINVAL; } static void ubfi_put_ubrt(void) { - if (bios_mode == ACPI) { + if (firmware_mode == ACPI) { acpi_put_table((struct acpi_table_header *)acpi_table); acpi_table = NULL; - } else if (bios_mode == DTS) { + } else { ub_table_put(ubios_table); ubios_table = NULL; } @@ -103,15 +99,21 @@ static int __init ubfi_init(void) { int ret; - ub_bios_mode_init(); + ub_firmware_mode_init(); ret = ubfi_get_ubrt(); if (ret) { - pr_warn("can't get ub information from bios, ret=%d\n", ret); + pr_warn("can't get ub information from firmware, ret=%d\n", ret); return 0; } - return handle_ubrt(); + ret = handle_ubrt(); + if (ret) { + pr_err("failed to handle ubrt, ret=%d\n", ret); + ubfi_put_ubrt(); + } + + return ret; } static void __exit ubfi_exit(void) diff --git a/drivers/ub/ubfi/ub_fi.h b/drivers/ub/ubfi/ub_fi.h index 3edf8dd6de4e..9d36c308cede 100644 --- a/drivers/ub/ubfi/ub_fi.h +++ b/drivers/ub/ubfi/ub_fi.h @@ -6,12 +6,11 @@ #ifndef __UB_FI_H__ #define __UB_FI_H__ -enum bios_report_mode { +enum firmware_report_mode { ACPI = 0, DTS = 1, - UBIOS = 3, - UNKNOWN = 4, + UNKNOWN = 2 }; -extern enum bios_report_mode bios_mode; +extern enum firmware_report_mode firmware_mode; #endif /* __UB_FI_H__ */ diff --git a/drivers/ub/ubfi/ubc.c b/drivers/ub/ubfi/ubc.c index a3f7bab8863f..3a160643f075 100644 --- a/drivers/ub/ubfi/ubc.c +++ b/drivers/ub/ubfi/ubc.c @@ -212,7 +212,7 @@ static int ubc_dev_new_resource_entry(struct resource *res, return 0; } -static int dts_register_irq(u32 ctl_no, int irq_type, const char *name, +static int dts_register_irq(u32 ctl_no, int irq_idx, const char *name, struct resource *res) { struct device_node *np; @@ -228,16 +228,16 @@ static int dts_register_irq(u32 ctl_no, int irq_type, const char *name, if (ctl_no != index) continue; - irq = irq_of_parse_and_map(np, irq_type); + irq = irq_of_parse_and_map(np, irq_idx); if (!irq) continue; } if (!irq) { - pr_err("irq_type %d parse and map fail\n", irq_type); + pr_err("irq_idx %d parse and map failed\n", irq_idx); return -EINVAL; } - pr_info("irq_type[%d] register success, irq=%u\n", irq_type, irq); + pr_info("irq_idx %d register successfully, irq=%u\n", irq_idx, irq); res->name = name; res->start = irq; @@ -258,9 +258,9 @@ static void remove_ubc_resource(struct ub_bus_controller *ubc) if ((res->flags & IORESOURCE_IRQ) && !strcmp(res->name, "UBUS") && !ubc->ctl_no) { - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ubrt_unregister_gsi(ubc->attr.msg_int); - else if (bios_mode == DTS) + else irq_dispose_mapping(ubc->queue_virq); } } @@ -296,12 +296,10 @@ static int add_ubc_irq_resource(struct ubc_node *node, trigger = !!(ubc->attr.msg_int_attr & UB_MSGQ_INT_TRIGGER_MASK); polarity = !!(ubc->attr.msg_int_attr & UB_MSGQ_INT_POLARITY_MASK); - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ret = ubrt_register_gsi(hwirq, trigger, polarity, "UBUS", &res); - else if (bios_mode == DTS) - ret = dts_register_irq(ubc->ctl_no, 0, "UBUS", &res); else - ret = -EINVAL; + ret = dts_register_irq(ubc->ctl_no, 0, "UBUS", &res); if (ret) { pr_err("register irq fail, ret=%d\n", ret); @@ -318,9 +316,9 @@ static int add_ubc_irq_resource(struct ubc_node *node, return 0; out: - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ubrt_unregister_gsi(hwirq); - else if (bios_mode == DTS) + else irq_dispose_mapping(res.start); return ret; @@ -333,7 +331,7 @@ static void ub_release_ubc_dev(struct device *dev) pr_info("%s release ub bus controller device.\n", ubc->name); - if (bios_mode == DTS) { + if (firmware_mode == DTS) { usi_np = irq_domain_get_of_node(dev->msi.domain); if (usi_np) of_node_put(usi_np); @@ -509,7 +507,7 @@ static int create_ubc(struct ubc_node *node, u32 ctl_no) if (ret) goto free_resource; - /* after init_ubc, ubc resources will be released in the dev->release */ + /* after init_ubc, if failed, ubc resources will be released in the dev->release */ ret = init_ubc(ubc); if (ret) return ret; @@ -551,9 +549,10 @@ static int parse_ubc_table(void *info_node) pr_info("cna_start=%u, cna_end=%u\n", ubc_cna_start, ubc_cna_end); pr_info("eid_start=%u, eid_end=%u\n", ubc_eid_start, ubc_eid_end); - pr_info("ubc_count=%u, bios_cluster_mode=%u, feature=%u\n", count, + pr_info("ubc_count=%u, firmware_cluster_mode=%u, feature=%u\n", count, cluster_mode, ubc_feature); - if (ubc_cna_start > ubc_cna_end || ubc_eid_start > ubc_eid_end) { + if (ubc_cna_start > ubc_cna_end || ubc_eid_start > ubc_eid_end || + ubc_cna_start == 0 || ubc_eid_start == 0) { pr_err("eid or cna range is incorrect\n"); return -EINVAL; } @@ -597,9 +596,9 @@ int handle_ubc_table(u64 pointer) if (ret) goto err_handle; - pr_info("Update msi domain for ub bus controller\n"); + pr_debug("Update msi domain for ub bus controller\n"); /* Update msi domain for ub bus controller */ - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ret = acpi_update_ubc_msi_domain(); else ret = dts_update_ubc_msi_domain(); diff --git a/drivers/ub/ubfi/ubrt.c b/drivers/ub/ubfi/ubrt.c index 8908b2ae8edd..ecf975526e72 100644 --- a/drivers/ub/ubfi/ubrt.c +++ b/drivers/ub/ubfi/ubrt.c @@ -21,10 +21,13 @@ struct acpi_table_ubrt *acpi_table; struct ubios_root_table *ubios_table; /* - * ummu max count is 32, max size is 40 + 32 * 128 = 4640 - * ubc max count is 32, max size is 40 + 88 + 32 * 256 + 32 * 4 = 8448 + * ubios max sub table count is 256, max size is 40 + 8 * 256 = 2088 + * ummu max count is 32, max size is 32 + 8 + 32 * 160 = 5160 + * ubc max count is 32, max size is 32 + 24 + 32 * 384 = 12344 + * Choose the largest one as the maximum value for the ubios table. */ -#define UBIOS_TABLE_TOTLE_SIZE_MAX 8448 +#define UBIOS_TABLE_TOTAL_SIZE_MAX (sizeof(struct ubrt_ubc_table) + \ + 32 * sizeof(struct ubc_node)) /* remember to use ub_table_put to release memory alloced by ub_table_get */ void *ub_table_get(u64 pa) @@ -44,8 +47,9 @@ void *ub_table_get(u64 pa) total_size = readl(va + UB_TABLE_HEADER_NAME_LEN); pr_debug("ub table size is[0x%x]\n", total_size); - if (total_size == 0 || total_size > UBIOS_TABLE_TOTLE_SIZE_MAX) { - pr_err("ubios table size is invalid\n"); + if (total_size == 0 || total_size > UBIOS_TABLE_TOTAL_SIZE_MAX) { + pr_err("ubios table size is invalid, total_size=0x%x\n", + total_size); iounmap(va); return NULL; } @@ -81,6 +85,7 @@ void uninit_ub_nodes(void) int handle_acpi_ubrt(void) { + bool ubc_done = false, ummu_done = false; struct ubrt_sub_table *sub_table; int ret = 0; u32 i; @@ -89,16 +94,14 @@ int handle_acpi_ubrt(void) for (i = 0; i < acpi_table->count; i++) { sub_table = &acpi_table->sub_table[i]; - switch (sub_table->type) { - case UB_BUS_CONTROLLER_TABLE: + if (sub_table->type == UB_BUS_CONTROLLER_TABLE && !ubc_done) { ret = handle_ubc_table(sub_table->pointer); - break; - case UMMU_TABLE: + ubc_done = true; + } else if (sub_table->type == UMMU_TABLE && !ummu_done) { ret = handle_ummu_table(sub_table->pointer); - break; - default: + ummu_done = true; + } else { pr_warn("Ignore sub table: type %u\n", sub_table->type); - break; } if (ret) { pr_err("parse ubrt sub table type %u failed\n", @@ -112,10 +115,25 @@ int handle_acpi_ubrt(void) return ret; } +static int get_ubrt_table_name(char *name, u64 sub_table) +{ + void __iomem *va; + + va = ioremap(sub_table, sizeof(struct ub_table_header)); + if (!va) { + pr_err("ioremap ub table header failed\n"); + return -ENOMEM; + } + + memcpy_fromio(name, va, UB_TABLE_HEADER_NAME_LEN - 1); + iounmap(va); + return 0; +} + int handle_dts_ubrt(void) { - char name[UB_TABLE_HEADER_NAME_LEN] = {}; - struct ub_table_header *header; + bool ubc_done = false, ummu_done = false; + char name[UB_TABLE_HEADER_NAME_LEN]; int ret = 0, i; if (ubios_table->count == 0) { @@ -125,24 +143,28 @@ int handle_dts_ubrt(void) pr_info("ubios sub table count is %u\n", ubios_table->count); for (i = 0; i < ubios_table->count; i++) { - header = (struct ub_table_header *)ub_table_get( - ubios_table->sub_tables[i]); - if (!header) + memset(name, 0, UB_TABLE_HEADER_NAME_LEN); + ret = get_ubrt_table_name(name, ubios_table->sub_tables[i]); + if (ret) + goto out; + if (name[0] == '\0') continue; - - memcpy(name, header->name, UB_TABLE_HEADER_NAME_LEN - 1); pr_info("ubrt sub table name is %s\n", name); - ub_table_put(header); - if (!strncmp(name, UBIOS_SIG_UBC, strlen(UBIOS_SIG_UBC))) - ret = handle_ubc_table(ubios_table->sub_tables[i]); - else if (!strncmp(name, UBIOS_SIG_UMMU, strlen(UBIOS_SIG_UMMU))) + if (!strncmp(name, UBIOS_SIG_UMMU, strlen(UBIOS_SIG_UMMU)) && + !ummu_done) { ret = handle_ummu_table(ubios_table->sub_tables[i]); - else + ummu_done = true; + } else if (!strncmp(name, UBIOS_SIG_UBC, strlen(UBIOS_SIG_UBC)) && + !ubc_done) { + ret = handle_ubc_table(ubios_table->sub_tables[i]); + ubc_done = true; + } else { pr_warn("Ignore sub table: %s\n", name); + } if (ret) { - pr_err("Create %s device ret=%d\n", name, ret); + pr_err("Create %s failed, ret=%d\n", name, ret); goto out; } } diff --git a/drivers/ub/ubfi/ubrt.h b/drivers/ub/ubfi/ubrt.h index 0cbc5fe82368..1f15f0ce76c9 100644 --- a/drivers/ub/ubfi/ubrt.h +++ b/drivers/ub/ubfi/ubrt.h @@ -43,10 +43,6 @@ enum ubrt_sub_table_type { UB_BUS_CONTROLLER_TABLE = 0, UMMU_TABLE = 1, UB_RESERVED_MEMORY_TABLE = 2, - VIRTUAL_BUS_TABLE = 3, - CALL_ID_SERVICE_TABLE = 4, - UB_ENTITY_TABLE = 5, - UB_TOPOLOGY_TABLE = 6, }; extern struct acpi_table_ubrt *acpi_table; -- Gitee From a2a6d6701f3e58f7d2f8e8b9bf5093bd187e8d0f Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 10 Dec 2025 20:01:02 +0800 Subject: [PATCH 162/243] ub:ubus: bugfix calltrace of killing qemu when rmmod hisi_ubus commit 0e8de1af5c2d288c7e6eb2fc702049bf2c3792ab openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- bugfix calltrace of killing qemu when rmmod hisi_ubus Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/msg.c | 23 ++--------------------- drivers/ub/ubus/msg.h | 4 ---- drivers/ub/ubus/vendor/hisilicon/msg.c | 11 ----------- 3 files changed, 2 insertions(+), 36 deletions(-) diff --git a/drivers/ub/ubus/msg.c b/drivers/ub/ubus/msg.c index 54f77128ad2f..34b2831bd8bf 100644 --- a/drivers/ub/ubus/msg.c +++ b/drivers/ub/ubus/msg.c @@ -83,39 +83,20 @@ static void dev_message_put(struct ub_entity *uent) int message_probe_device(struct ub_entity *uent) { - const struct message_ops *ops = uent->ubc->mdev->ops; - int ret; - if (!dev_message_get(uent)) return -ENOMEM; - if (uent->message->mdev) - return 0; - - if (ops->probe_dev) { - ret = ops->probe_dev(uent); - if (ret) - goto err_probe; - } - - uent->message->mdev = uent->ubc->mdev; + if (!uent->message->mdev) + uent->message->mdev = uent->ubc->mdev; return 0; - -err_probe: - dev_message_put(uent); - return ret; } void message_remove_device(struct ub_entity *uent) { - const struct message_ops *ops = uent->ubc->mdev->ops; - if (!uent->message) return; - if (ops->remove_dev) - ops->remove_dev(uent); dev_message_put(uent); } diff --git a/drivers/ub/ubus/msg.h b/drivers/ub/ubus/msg.h index 92a126bdf471..f714295fa0e9 100644 --- a/drivers/ub/ubus/msg.h +++ b/drivers/ub/ubus/msg.h @@ -190,8 +190,6 @@ typedef void (*rx_msg_handler_t)(struct ub_bus_controller *ubc, void *pkt, u16 l /** * struct message_ops - message ops and capabilities - * @probe_dev: probe ub_entity to init message - * @remove_dev: remove ub_entity to uninit message * @sync_request: send message to target ub_entity and wait response * @send: send message to target ub_entity but not wait response * @response: send response message to target @@ -200,8 +198,6 @@ typedef void (*rx_msg_handler_t)(struct ub_bus_controller *ubc, void *pkt, u16 l * @owner: Driver module providing these ops */ struct message_ops { - int (*probe_dev)(struct ub_entity *uent); - void (*remove_dev)(struct ub_entity *uent); int (*sync_request)(struct message_device *mdev, struct msg_info *info, u8 code); int (*send)(struct message_device *mdev, struct msg_info *info, diff --git a/drivers/ub/ubus/vendor/hisilicon/msg.c b/drivers/ub/ubus/vendor/hisilicon/msg.c index 178682aa5cd6..5c4e672aa55e 100644 --- a/drivers/ub/ubus/vendor/hisilicon/msg.c +++ b/drivers/ub/ubus/vendor/hisilicon/msg.c @@ -463,15 +463,6 @@ static void hi_msg_queue_uninit(struct hi_message_device *hmd) hi_msg_core_uninit(&hmd->hmc); } -static int hi_message_probe_dev(struct ub_entity *uent) -{ - return 0; -} - -static void hi_message_remove_dev(struct ub_entity *uent) -{ -} - static bool pkt_plen_valid(void *pkt, u16 pkt_size, int task_type) { struct msg_pkt_header *header = (struct msg_pkt_header *)pkt; @@ -638,8 +629,6 @@ int hi_message_private(struct message_device *mdev, struct msg_info *info, } static struct message_ops hi_message_ops = { - .probe_dev = hi_message_probe_dev, - .remove_dev = hi_message_remove_dev, .sync_request = hi_message_sync_request, .response = hi_message_response, .sync_enum = hi_message_sync_enum, -- Gitee From 552fe81802e48a5ab67717d8e3509b892a777e56 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 10 Dec 2025 16:27:55 +0800 Subject: [PATCH 163/243] ub:ubus: add hotplug capability check commit b0aa44c0a884645c7a84a1ad65d01a7a3ab976a8 openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- HotPlug capability check added to determine whether power-on and power-off functions are supported. Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/services/hotplug/hotplug.h | 1 + .../ub/ubus/services/hotplug/hotplug_core.c | 121 +++++++++++++++++- .../ub/ubus/services/hotplug/hotplug_ctrl.c | 71 +++++----- include/uapi/ub/ubus/ubus_regs.h | 1 + 4 files changed, 154 insertions(+), 40 deletions(-) diff --git a/drivers/ub/ubus/services/hotplug/hotplug.h b/drivers/ub/ubus/services/hotplug/hotplug.h index 93c8e3c798b9..81f731eb29be 100644 --- a/drivers/ub/ubus/services/hotplug/hotplug.h +++ b/drivers/ub/ubus/services/hotplug/hotplug.h @@ -53,6 +53,7 @@ struct ub_slot { #define WORK_LED(slot) ((slot)->slot_cap & UB_SLOT_WLPS) #define PWR_LED(slot) ((slot)->slot_cap & UB_SLOT_PLPS) #define PRESENT(slot) ((slot)->slot_cap & UB_SLOT_PDSS) +#define PWR(slot) ((slot)->slot_cap & UB_SLOT_PWCS) struct ubhp_msg_payload { u16 slot_id; diff --git a/drivers/ub/ubus/services/hotplug/hotplug_core.c b/drivers/ub/ubus/services/hotplug/hotplug_core.c index 0646a8d388d7..a419dbae0ea2 100644 --- a/drivers/ub/ubus/services/hotplug/hotplug_core.c +++ b/drivers/ub/ubus/services/hotplug/hotplug_core.c @@ -397,6 +397,102 @@ static void ubhp_disconnect_slot(struct ub_slot *slot) slot->r_uent = NULL; } +static void ubhp_clear_port(struct ub_slot *slot) +{ + struct ub_port *port; + + for_each_slot_port(port, slot) { + port->r_index = 0; + guid_copy(&port->r_guid, &guid_null); + } +} + +/** + * ubhp_enum_at_slot() - enum at slot to find new devices + * @slot: the slot that has new device plugged in + * @dev_list: a list to store the new found devices + * + * this func use bfs to enum devices and put them into dev_list, + * which means the previous device in dev_list is enumerated previous + */ +static int ubhp_enum_at_slot(struct ub_slot *slot, struct list_head *dev_list) +{ + void *buf; + int ret; + +#define UB_TOPO_BUF_SZ SZ_4K + buf = kzalloc(UB_TOPO_BUF_SZ, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ub_enum_topo_scan_ports(slot->uent, slot->port_start, slot->port_num, + dev_list, buf); + if (ret) + ubhp_clear_port(slot); + + kfree(buf); + return ret; +} + +/** + * a simple example for link up + * for a given topo like + * +-------------+ +---------+ +---------+ +--------+ + * | controller0 |p0:---:p0| switch0 |p1:---slot0---:p0| switch1 |p1:---:p0| device0| + * +-------------+ +---------+ +---------+ +--------+ + * when slot0 is calling handle link up + * 1. enum at slot0 to create switch1 and device0, put them in dev_list + * 2. route dev_list to set up route between these two devices + * 3. handle route link up at slot0, add route of left(controller0 & switch0) + * into right(switch1 & device0) and route of right into left + * 4. start switch1 and device0 + */ +static int ubhp_handle_link_up(struct ub_slot *slot) +{ + struct list_head dev_list; + int ret; + + INIT_LIST_HEAD(&dev_list); + + ret = ubhp_enum_at_slot(slot, &dev_list); + if (ret) { + ub_err(slot->uent, "enum at slot%u failed, ret=%d\n", slot->slot_id, ret); + return ret; + } + + if (list_empty(&dev_list)) { + ub_warn(slot->uent, "link up without remote dev\n"); + return -ENXIO; + } + + ret = ub_route_entities(&dev_list); + if (ret) { + ub_err(slot->uent, "hotplug cal route failed, ret=%d\n", ret); + goto err_route; + } + + slot->r_uent = slot->ports->r_uent; + ret = ubhp_update_route_link_up(slot); + if (ret) { + ub_err(slot->uent, "hotplug update route failed, ret=%d\n", ret); + goto err_link_up; + } + + ret = ub_enum_entities_active(&dev_list); + if (ret) { + ub_err(slot->uent, "hotplug start devices failed, ret=%d\n", ret); + goto err_link_up; + } + + return 0; +err_link_up: + ubhp_update_route_link_down(slot); + slot->r_uent = NULL; +err_route: + ub_enum_clear_ent_list(&dev_list); + return ret; +} + /** * a simple example for link down * for a given topo like @@ -471,7 +567,7 @@ static void ubhp_button_handler(struct work_struct *work) void ubhp_handle_power(struct ub_slot *slot, bool power_on) { - if (!slot) + if (!slot || !PWR(slot)) return; mutex_lock(&slot->state_lock); @@ -531,11 +627,22 @@ static void ubhp_handle_present(struct ub_slot *slot) ubhp_set_slot_power(slot, POWER_ON); - mutex_unlock(&slot->state_lock); - ubhp_get_slot(slot); - queue_delayed_work(get_rx_msg_wq(UB_MSG_CODE_LINK), - &slot->power_work, HP_LINK_WAIT_DELAY * HZ); - return; + /* If support power ctrl, wait link up process */ + if (PWR(slot)) { + mutex_unlock(&slot->state_lock); + ubhp_get_slot(slot); + queue_delayed_work(get_rx_msg_wq(UB_MSG_CODE_LINK), + &slot->power_work, HP_LINK_WAIT_DELAY * HZ); + return; + } + + if (ubhp_handle_link_up(slot)) + goto poweroff; + + ubhp_set_indicators(slot, INDICATOR_ON, INDICATOR_NOOP); + slot->state = SLOT_ON; + ub_info(slot->uent, "slot%u on\n", slot->slot_id); + out: /** * why cancel button work here: @@ -559,6 +666,8 @@ static void ubhp_handle_present(struct ub_slot *slot) ub_info(slot->uent, "slot%u handle hotplug succeeded\n", slot->slot_id); return; +poweroff: + ubhp_set_slot_power(slot, POWER_OFF); clear_state: slot->state = SLOT_OFF; ubhp_set_indicators(slot, INDICATOR_OFF, INDICATOR_NOOP); diff --git a/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c b/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c index 28753cc2501c..73d32079bdd8 100644 --- a/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c +++ b/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c @@ -95,13 +95,18 @@ void ubhp_set_indicators(struct ub_slot *slot, u8 power, u8 work) void ubhp_set_slot_power(struct ub_slot *slot, enum power_state power) { - ub_slot_write_byte(slot, UB_SLOT_PW_CTRL, power); + if (PWR(slot)) + ub_slot_write_byte(slot, UB_SLOT_PW_CTRL, power); } bool ubhp_card_present(struct ub_slot *slot) { u8 val; + /* always present if no present ctrl */ + if (!PRESENT(slot)) + return true; + ub_slot_read_byte(slot, UB_SLOT_PD_STA, &val); return !!(val & UB_SLOT_PD_STA_MASK); @@ -153,54 +158,52 @@ bool ubhp_confirm_event(struct ub_slot *slot, enum hotplug_event event) return true; } -static void ubhp_start_slot(struct ub_slot *slot) +static void ubhp_enable(struct ub_slot *slot, u32 pos, u32 mask, bool flag) { u8 val; - /* enable PP */ - ub_slot_read_byte(slot, UB_SLOT_PP_CTRL, &val); - val |= UB_SLOT_PP_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PP_CTRL, val); + if (!flag) + return; - /* enable PD */ - ub_slot_read_byte(slot, UB_SLOT_PD_CTRL, &val); - val |= UB_SLOT_PD_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PD_CTRL, val); + ub_slot_read_byte(slot, pos, &val); + val |= mask; + ub_slot_write_byte(slot, pos, val); +} - /* enable PDS */ - ub_slot_read_byte(slot, UB_SLOT_PDS_CTRL, &val); - val |= UB_SLOT_PDS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PDS_CTRL, val); +static void ubhp_disable(struct ub_slot *slot, u32 pos, u32 mask, bool flag) +{ + u8 val; + + if (!flag) + return; + + ub_slot_read_byte(slot, pos, &val); + val &= ~mask; + ub_slot_write_byte(slot, pos, val); +} +static void ubhp_start_slot(struct ub_slot *slot) +{ + /* enable PP */ + ubhp_enable(slot, UB_SLOT_PP_CTRL, UB_SLOT_PP_CTRL_MASK, BUTTON(slot)); + /* enable PD */ + ubhp_enable(slot, UB_SLOT_PD_CTRL, UB_SLOT_PD_CTRL_MASK, PRESENT(slot)); + /* enable PDS */ + ubhp_enable(slot, UB_SLOT_PDS_CTRL, UB_SLOT_PDS_CTRL_MASK, PRESENT(slot)); /* enable MS */ - ub_slot_read_byte(slot, UB_SLOT_MS_CTRL, &val); - val |= UB_SLOT_MS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_MS_CTRL, val); + ubhp_enable(slot, UB_SLOT_MS_CTRL, UB_SLOT_MS_CTRL_MASK, true); } static void ubhp_stop_slot(struct ub_slot *slot) { - u8 val; - /* disable MS */ - ub_slot_read_byte(slot, UB_SLOT_MS_CTRL, &val); - val &= ~UB_SLOT_MS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_MS_CTRL, val); - + ubhp_disable(slot, UB_SLOT_MS_CTRL, UB_SLOT_MS_CTRL_MASK, true); /* disable PDS */ - ub_slot_read_byte(slot, UB_SLOT_PDS_CTRL, &val); - val &= ~UB_SLOT_PDS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PDS_CTRL, val); - + ubhp_disable(slot, UB_SLOT_PDS_CTRL, UB_SLOT_PDS_CTRL_MASK, PRESENT(slot)); /* disable PD */ - ub_slot_read_byte(slot, UB_SLOT_PD_CTRL, &val); - val &= ~UB_SLOT_PD_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PD_CTRL, val); - + ubhp_disable(slot, UB_SLOT_PD_CTRL, UB_SLOT_PD_CTRL_MASK, PRESENT(slot)); /* disable PP */ - ub_slot_read_byte(slot, UB_SLOT_PP_CTRL, &val); - val &= ~UB_SLOT_PP_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PP_CTRL, val); + ubhp_disable(slot, UB_SLOT_PP_CTRL, UB_SLOT_PP_CTRL_MASK, BUTTON(slot)); } void ubhp_start_slots(struct ub_entity *uent) diff --git a/include/uapi/ub/ubus/ubus_regs.h b/include/uapi/ub/ubus/ubus_regs.h index a4fe600f5459..47847be68e91 100644 --- a/include/uapi/ub/ubus/ubus_regs.h +++ b/include/uapi/ub/ubus/ubus_regs.h @@ -81,6 +81,7 @@ enum ub_port_cap_id { #define UB_SLOT_WLPS 0x2 #define UB_SLOT_PLPS 0x4 #define UB_SLOT_PDSS 0x8 +#define UB_SLOT_PWCS 0x10 #define UB_SLOT_PORT UB_ADDR_TO_POS(0x3) #define UB_SLOT_START_PORT 0x0000ffff #define UB_SLOT_PP_CTRL UB_ADDR_TO_POS(0x4) -- Gitee From 38d53a63d2abe5b32ff06cf88528a2debc19eb8a Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Wed, 10 Dec 2025 20:36:11 +0800 Subject: [PATCH 164/243] ub:ubus: bugfix port reset in cluster mode commit 246a059f3238542f9f9ada5fda550e7f8fb4f78a openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- bugfix port reset in cluster mode. Signed-off-by: Yahui Liu Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- Documentation/ub/ubus/hisi_ubus.rst | 2 -- drivers/ub/ubus/port.c | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Documentation/ub/ubus/hisi_ubus.rst b/Documentation/ub/ubus/hisi_ubus.rst index b384b058129f..90e76d2e587f 100644 --- a/Documentation/ub/ubus/hisi_ubus.rst +++ b/Documentation/ub/ubus/hisi_ubus.rst @@ -61,8 +61,6 @@ UB Message Core Driver Hisi UBUS implements a message device that provides a set of operations:: static struct message_ops hi_message_ops = { - .probe_dev = hi_message_probe_dev, - .remove_dev = hi_message_remove_dev, .sync_request = hi_message_sync_request, .response = hi_message_response, .sync_enum = hi_message_sync_enum, diff --git a/drivers/ub/ubus/port.c b/drivers/ub/ubus/port.c index f2ec6e8b9f47..f4d91d0e9d99 100644 --- a/drivers/ub/ubus/port.c +++ b/drivers/ub/ubus/port.c @@ -162,6 +162,11 @@ static ssize_t port_reset_store(struct ub_port *port, const char *buf, return -EINVAL; } + if (port->uent->ubc->cluster) { + ub_err(port->uent, "Port reset is not supported by sysfs in cluster mode\n"); + return -EINVAL; + } + ret = ub_port_reset_function(port); if (ret < 0) return ret; -- Gitee From b14be35ea8a328c07fbe0cf11c1b881f11f8692d Mon Sep 17 00:00:00 2001 From: Junlong Zheng Date: Wed, 10 Dec 2025 20:24:13 +0800 Subject: [PATCH 165/243] ub:hisi-ubus: fix MUE unreg msg rsp time commit f0942c09a5cf0eb52de419aeaf1067d7798fd60e openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- 1. During MUE deregistration processing, first send a response to the control plane, then proceed with the device destruction process. 2. Add entity number to the uent release log. Fixes: 86fec00cb73a ("ub:hisi-ubus: Support UBUS vdm entity enable message") Signed-off-by: Junlong Zheng Signed-off-by: Shi Yang --- drivers/ub/ubus/ubus_entity.c | 4 +++- drivers/ub/ubus/vendor/hisilicon/vdm.c | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index b43d682ba3d8..105c6f396b44 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -463,6 +463,7 @@ EXPORT_SYMBOL_GPL(ub_start_ent); static void ub_release_ent(struct device *dev) { struct ub_entity *uent; + u32 uent_num; uent = to_ub_entity(dev); if (is_primary(uent) && !is_p_device(uent)) { @@ -480,8 +481,9 @@ static void ub_release_ent(struct device *dev) kfree(uent->driver_override); uent->token_value = 0; + uent_num = uent->uent_num; kfree(uent); - pr_info("uent release\n"); + pr_info("uent[%#x] release\n", uent_num); } void ub_stop_ent(struct ub_entity *uent) diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.c b/drivers/ub/ubus/vendor/hisilicon/vdm.c index 4a19e0fb8d57..329bc51b5a0e 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.c +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.c @@ -256,10 +256,11 @@ static u8 ub_idevice_pue_rls_handler(struct ub_bus_controller *ubc, struct vdm_m status = UB_MSG_RSP_SUCCESS; } + ub_vdm_msg_rsp(ubc, pkt, status); + if (status == UB_MSG_RSP_SUCCESS) ub_disable_ent(uent); - ub_vdm_msg_rsp(ubc, pkt, status); return status; } -- Gitee From b870561ec2dd9fe1cb324bc65e8b6e32474e9f20 Mon Sep 17 00:00:00 2001 From: Jianquan-Lin Date: Tue, 16 Dec 2025 09:58:16 +0800 Subject: [PATCH 166/243] ub:ubus: Change create device irq domain debug info commit 9147e429cddb371664aeb08f8ed5af680cb6054a openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- Change create device irq domain debug info. Fixes: 81962d0ecc6e ("ub:ubus: Support UBUS Interrupt framework") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/msi/irqdomain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ub/ubus/msi/irqdomain.c b/drivers/ub/ubus/msi/irqdomain.c index 5a01c40368bc..83da28e03fc7 100644 --- a/drivers/ub/ubus/msi/irqdomain.c +++ b/drivers/ub/ubus/msi/irqdomain.c @@ -95,7 +95,7 @@ static bool ub_create_device_domain(struct ub_entity *uent, return true; if (WARN_ON_ONCE(1)) - pr_info("TODO: create device irq domain.\n"); + pr_err("Create device irq domain failed.\n"); return false; } -- Gitee From 126727ce7017f51dc518257442e66d4fa1907dba Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 15 Dec 2025 11:22:17 +0800 Subject: [PATCH 167/243] ub: cdma: add eid upi update response commit f7c5a9a48b65c9b80282e4776fcdfc46bb3e02e7 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ----------------------------------------------------------- add eid upi update response Fixes: ca1562136e14 ("ub: cdma: support querying sl information and updating eu") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma_dev.c | 62 ++++++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 2b69a44b346e..ae3d76cff5b2 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -298,30 +298,66 @@ static int cdma_ctrlq_eu_del(struct cdma_dev *cdev, struct eu_info *eu) return ret; } +static int cdma_ctrlq_eu_update_response(struct cdma_dev *cdev, u16 seq, int ret_val) +{ + struct ubase_ctrlq_msg msg = { 0 }; + int inbuf = 0; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + msg.opcode = CDMA_CTRLQ_EU_UPDATE; + msg.need_resp = 0; + msg.is_resp = 1; + msg.resp_seq = seq; + msg.resp_ret = (uint8_t)(-ret_val); + msg.in = (void *)&inbuf; + msg.in_size = sizeof(inbuf); + + ret = ubase_ctrlq_send_msg(cdev->adev, &msg); + if (ret) + dev_err(cdev->dev, "send eu update response failed, ret = %d, ret_val = %d.\n", + ret, ret_val); + return ret; +} + static int cdma_ctrlq_eu_update(struct auxiliary_device *adev, u8 service_ver, - void *data, u16 len, u16 seq) + void *data, u16 len, u16 seq) { struct cdma_dev *cdev = dev_get_drvdata(&adev->dev); - struct cdma_ctrlq_eu_info *ctrlq_eu; + struct cdma_ctrlq_eu_info eu = { 0 }; int ret = -EINVAL; - if (len < sizeof(*ctrlq_eu)) { - dev_err(cdev->dev, "ctrlq data len is invalid.\n"); - return -EINVAL; + if (cdev->status != CDMA_NORMAL) { + dev_err(cdev->dev, "status is abnormal and don't update eu.\n"); + return cdma_ctrlq_eu_update_response(cdev, seq, 0); + } + + if (len < sizeof(eu)) { + dev_err(cdev->dev, "update eu msg len = %u is invalid.\n", len); + return cdma_ctrlq_eu_update_response(cdev, seq, -EINVAL); } - ctrlq_eu = (struct cdma_ctrlq_eu_info *)data; + memcpy(&eu, data, sizeof(eu)); + if (eu.op != CDMA_CTRLQ_EU_ADD && eu.op != CDMA_CTRLQ_EU_DEL) { + dev_err(cdev->dev, "update eu op = %u is invalid.\n", eu.op); + return cdma_ctrlq_eu_update_response(cdev, seq, -EINVAL); + } + + if (eu.eu.eid_idx >= CDMA_MAX_EU_NUM) { + dev_err(cdev->dev, "update eu invalid eid_idx = %u.\n", + eu.eu.eid_idx); + return cdma_ctrlq_eu_update_response(cdev, seq, -EINVAL); + } mutex_lock(&cdev->eu_mutex); - if (ctrlq_eu->op == CDMA_CTRLQ_EU_ADD) - ret = cdma_ctrlq_eu_add(cdev, &ctrlq_eu->eu); - else if (ctrlq_eu->op == CDMA_CTRLQ_EU_DEL) - ret = cdma_ctrlq_eu_del(cdev, &ctrlq_eu->eu); - else - dev_err(cdev->dev, "ctrlq eu op is invalid.\n"); + if (eu.op == CDMA_CTRLQ_EU_ADD) + ret = cdma_ctrlq_eu_add(cdev, &eu.eu); + else if (eu.op == CDMA_CTRLQ_EU_DEL) + ret = cdma_ctrlq_eu_del(cdev, &eu.eu); mutex_unlock(&cdev->eu_mutex); - return ret; + return cdma_ctrlq_eu_update_response(cdev, seq, ret); } int cdma_create_arm_db_page(struct cdma_dev *cdev) -- Gitee From f2b5861fe767301f3b01f4bdba759384cf83b98b Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 8 Dec 2025 11:20:51 +0800 Subject: [PATCH 168/243] ub: cdma: modify log level about en information commit 3a0deab724ae6b28be4b3ea36b4805aa02cd7c50 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- modify log level about en information Fixes: ca1562136e14 ("ub: cdma: support querying sl information and updating eu") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma.h | 2 +- drivers/ub/cdma/cdma_api.c | 2 +- drivers/ub/cdma/cdma_chardev.h | 2 +- drivers/ub/cdma/cdma_cmd.c | 12 +++++---- drivers/ub/cdma/cdma_cmd.h | 3 ++- drivers/ub/cdma/cdma_common.h | 2 +- drivers/ub/cdma/cdma_context.h | 2 +- drivers/ub/cdma/cdma_db.h | 2 +- drivers/ub/cdma/cdma_debugfs.h | 2 +- drivers/ub/cdma/cdma_dev.c | 11 ++++----- drivers/ub/cdma/cdma_dev.h | 2 +- drivers/ub/cdma/cdma_eq.h | 2 +- drivers/ub/cdma/cdma_event.h | 3 ++- drivers/ub/cdma/cdma_handle.h | 2 +- drivers/ub/cdma/cdma_ioctl.c | 6 ++--- drivers/ub/cdma/cdma_ioctl.h | 2 +- drivers/ub/cdma/cdma_jfc.h | 2 +- drivers/ub/cdma/cdma_jfs.h | 2 +- drivers/ub/cdma/cdma_main.c | 1 - drivers/ub/cdma/cdma_mbox.h | 2 +- drivers/ub/cdma/cdma_mmap.h | 2 +- drivers/ub/cdma/cdma_queue.c | 3 +-- drivers/ub/cdma/cdma_queue.h | 3 ++- drivers/ub/cdma/cdma_segment.h | 2 +- drivers/ub/cdma/cdma_tid.h | 2 +- drivers/ub/cdma/cdma_tp.c | 44 ++++++++++++++++++--------------- drivers/ub/cdma/cdma_tp.h | 3 ++- drivers/ub/cdma/cdma_types.h | 6 ++--- drivers/ub/cdma/cdma_uobj.h | 2 +- include/uapi/ub/cdma/cdma_abi.h | 2 +- include/ub/cdma/cdma_api.h | 2 +- 31 files changed, 70 insertions(+), 65 deletions(-) diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index b7d00bcf39ac..b77cf1350545 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -230,4 +230,4 @@ static inline struct cdma_dev *get_cdma_dev(struct auxiliary_device *adev) return (struct cdma_dev *)dev_get_drvdata(&adev->dev); } -#endif /* _CDMA_H_ */ +#endif /* __CDMA_H__ */ diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index ae84210c1f97..36f037b97d0b 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -367,7 +367,7 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) } ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; - queue = (struct cdma_queue *)xa_load(&ctx_res->queue_xa, queue_id); + queue = xa_load(&ctx_res->queue_xa, queue_id); if (!queue) { dev_err(cdev->dev, "no queue found in this device, id = %d\n", queue_id); diff --git a/drivers/ub/cdma/cdma_chardev.h b/drivers/ub/cdma/cdma_chardev.h index 0bd4fcc654ff..684c7a766d16 100644 --- a/drivers/ub/cdma/cdma_chardev.h +++ b/drivers/ub/cdma/cdma_chardev.h @@ -18,4 +18,4 @@ void cdma_destroy_chardev(struct cdma_dev *cdev); int cdma_create_chardev(struct cdma_dev *cdev); void cdma_release_file(struct kref *ref); -#endif /* _CDMA_CHARDEV_H_ */ +#endif /* __CDMA_CHARDEV_H__ */ diff --git a/drivers/ub/cdma/cdma_cmd.c b/drivers/ub/cdma/cdma_cmd.c index c8bf01d930ad..239369363a25 100644 --- a/drivers/ub/cdma/cdma_cmd.c +++ b/drivers/ub/cdma/cdma_cmd.c @@ -32,8 +32,10 @@ static int cdma_query_caps_from_firmware(struct cdma_dev *cdev) int ret; ret = cdma_cmd_query_fw_resource(cdev, &cmd); - if (ret) - return dev_err_probe(cdev->dev, ret, "query fw resource failed\n"); + if (ret) { + dev_err(cdev->dev, "query fw resource failed, ret = %d\n", ret); + return ret; + } caps->jfs_sge = cmd.jfs_sge; caps->trans_mode = cmd.trans_mode; @@ -42,9 +44,9 @@ static int cdma_query_caps_from_firmware(struct cdma_dev *cdev) caps->ue_cnt = cmd.ue_cnt; caps->ue_id = cmd.ue_id; - dev_dbg(cdev->dev, "jfs_sge = 0x%x, trans_mode = 0x%x, seid.max_cnt = 0x%x\n", + dev_info(cdev->dev, "jfs_sge = 0x%x, trans_mode = 0x%x, seid.max_cnt = 0x%x\n", caps->jfs_sge, caps->trans_mode, caps->seid.max_cnt); - dev_dbg(cdev->dev, "feature = 0x%x, ue_cnt = 0x%x, ue_id = 0x%x\n", + dev_info(cdev->dev, "feature = 0x%x, ue_cnt = 0x%x, ue_id = 0x%x\n", caps->feature, caps->ue_cnt, caps->ue_id); return 0; @@ -207,7 +209,7 @@ int cdma_ctrlq_query_eu(struct cdma_dev *cdev) attr->eu_num = out_query.seid_num; for (i = 0; i < attr->eu_num; i++) - dev_dbg(cdev->dev, + dev_info(cdev->dev, "cdma init eus[%u], upi = 0x%x, eid = 0x%x, eid_idx = 0x%x.\n", i, eus[i].upi, eus[i].eid.dw0, eus[i].eid_idx); mutex_unlock(&cdev->eu_mutex); diff --git a/drivers/ub/cdma/cdma_cmd.h b/drivers/ub/cdma/cdma_cmd.h index f85331c8c51b..0f676791b121 100644 --- a/drivers/ub/cdma/cdma_cmd.h +++ b/drivers/ub/cdma/cdma_cmd.h @@ -79,4 +79,5 @@ int cdma_ctrlq_query_eu(struct cdma_dev *cdev); void cdma_cmd_inc(struct cdma_dev *cdev); void cdma_cmd_dec(struct cdma_dev *cdev); void cdma_cmd_flush(struct cdma_dev *cdev); -#endif + +#endif /* __CDMA_CMD_H__ */ diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 58855991647d..d6da50f5d0aa 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -108,4 +108,4 @@ int cdma_pin_queue_addr(struct cdma_dev *cdev, u64 addr, u32 len, struct cdma_buf *buf); void cdma_unpin_queue_addr(struct cdma_umem *umem); -#endif +#endif /* __CDMA_COMMON_H__ */ diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 0eb40763c29d..715b59b64a41 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -38,4 +38,4 @@ struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel); void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx); void cdma_cleanup_context_res(struct cdma_context *ctx); -#endif /* CDMA_CONTEXT_H */ +#endif /* __CDMA_CONTEXT_H__ */ diff --git a/drivers/ub/cdma/cdma_db.h b/drivers/ub/cdma/cdma_db.h index fa3ef8c0f570..f11780bf3e29 100644 --- a/drivers/ub/cdma/cdma_db.h +++ b/drivers/ub/cdma/cdma_db.h @@ -39,4 +39,4 @@ int cdma_alloc_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); void cdma_free_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); -#endif /* CDMA_DB_H */ +#endif /* __CDMA_DB_H__ */ diff --git a/drivers/ub/cdma/cdma_debugfs.h b/drivers/ub/cdma/cdma_debugfs.h index 1cd0f2ada9dc..fa8af1f1c1ba 100644 --- a/drivers/ub/cdma/cdma_debugfs.h +++ b/drivers/ub/cdma/cdma_debugfs.h @@ -55,4 +55,4 @@ struct cdma_dbgfs { int cdma_dbg_init(struct auxiliary_device *adev); void cdma_dbg_uninit(struct auxiliary_device *adev); -#endif /* CDMA_DEBUGFS_H */ +#endif /* __CDMA_DEBUGFS_H__ */ diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index ae3d76cff5b2..54e2e4c778f7 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -232,10 +232,10 @@ static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) if (eu->eid_idx != eus[i].eid_idx) continue; - dev_dbg(cdev->dev, - "cdma.%u: eid_idx[0x%x] eid[0x%x->0x%x] upi[0x%x->0x%x] update success.\n", - cdev->adev->id, eu->eid_idx, eus[i].eid.dw0, - eu->eid.dw0, eus[i].upi, eu->upi & CDMA_UPI_MASK); + dev_info(cdev->dev, + "cdma.%u: eid_idx[0x%x] eid[0x%x->0x%x] upi[0x%x->0x%x] update success.\n", + cdev->adev->id, eu->eid_idx, eus[i].eid.dw0, + eu->eid.dw0, eus[i].upi, eu->upi & CDMA_UPI_MASK); eus[i].eid = eu->eid; eus[i].upi = eu->upi & CDMA_UPI_MASK; @@ -254,7 +254,7 @@ static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) } eus[attr->eu_num++] = *eu; - dev_dbg(cdev->dev, + dev_info(cdev->dev, "cdma.%u: eid_idx[0x%x] eid[0x%x] upi[0x%x] add success.\n", cdev->adev->id, eu->eid_idx, eu->eid.dw0, eu->upi & CDMA_UPI_MASK); @@ -479,7 +479,6 @@ void cdma_destroy_dev(struct cdma_dev *cdev, bool is_remove) if (is_remove) { cdma_free_dev_tid(cdev); - cdma_del_device_from_list(cdev); cdma_uninit_dev_param(cdev); kfree(cdev); diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index d433218934f1..2737cd3a5c58 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -34,4 +34,4 @@ void cdma_unregister_crq_event(struct auxiliary_device *adev); int cdma_create_arm_db_page(struct cdma_dev *cdev); void cdma_destroy_arm_db_page(struct cdma_dev *cdev); -#endif /* _CDMA_DEV_H_ */ +#endif /* __CDMA_DEV_H__ */ diff --git a/drivers/ub/cdma/cdma_eq.h b/drivers/ub/cdma/cdma_eq.h index 70e9edcccad4..cac34b5a2ae0 100644 --- a/drivers/ub/cdma/cdma_eq.h +++ b/drivers/ub/cdma/cdma_eq.h @@ -15,4 +15,4 @@ void cdma_unreg_ae_event(struct auxiliary_device *adev); int cdma_reg_ce_event(struct auxiliary_device *adev); void cdma_unreg_ce_event(struct auxiliary_device *adev); -#endif +#endif /* __CDMA_EQ_H__ */ diff --git a/drivers/ub/cdma/cdma_event.h b/drivers/ub/cdma/cdma_event.h index 4ca14c3c5fcb..d11a2f681dc7 100644 --- a/drivers/ub/cdma/cdma_event.h +++ b/drivers/ub/cdma/cdma_event.h @@ -76,4 +76,5 @@ void cdma_release_comp_event(struct cdma_jfce *jfce, struct list_head *event_lis void cdma_release_async_event(struct cdma_context *ctx, struct list_head *event_list); void cdma_put_jfae(struct cdma_context *ctx); -#endif /* CDMA_EVENT_H */ + +#endif /* __CDMA_EVENT_H__ */ diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index 00cb8049778e..4d36f72c0d97 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -19,4 +19,4 @@ int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, int cdma_faa(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg, u64 add); -#endif /* CDMA_HANDLE_H */ +#endif /* __CDMA_HANDLE_H__ */ diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 0a62e306d6f7..abcf39f0d021 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -71,8 +71,7 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, int ret; if (cfile->uctx) { - dev_err(cdev->dev, "create jfae failed, ctx handle = %d.\n", - ctx->handle); + dev_err(cdev->dev, "cdma context has been created.\n"); return -EEXIST; } @@ -92,7 +91,8 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, ctx->jfae = cdma_alloc_jfae(cfile); if (!ctx->jfae) { - dev_err(cdev->dev, "create jfae failed.\n"); + dev_err(cdev->dev, "create jfae failed, ctx handle = %d.\n", + ctx->handle); ret = -EFAULT; goto free_context; } diff --git a/drivers/ub/cdma/cdma_ioctl.h b/drivers/ub/cdma/cdma_ioctl.h index a5b20c99117e..160c8e0f4300 100644 --- a/drivers/ub/cdma/cdma_ioctl.h +++ b/drivers/ub/cdma/cdma_ioctl.h @@ -9,4 +9,4 @@ struct cdma_ioctl_hdr; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr); -#endif /* _CDMA_IOCTL_H_ */ +#endif /* __CDMA_IOCTL_H__ */ diff --git a/drivers/ub/cdma/cdma_jfc.h b/drivers/ub/cdma/cdma_jfc.h index 7f512150e50c..39535bf513f3 100644 --- a/drivers/ub/cdma/cdma_jfc.h +++ b/drivers/ub/cdma/cdma_jfc.h @@ -188,4 +188,4 @@ int cdma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, int cdma_poll_jfc(struct cdma_base_jfc *base_jfc, int cr_cnt, struct dma_cr *cr); -#endif /* CDMA_JFC_H */ +#endif /* __CDMA_JFC_H__ */ diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 3d0391b03d97..187d1f660db6 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -326,4 +326,4 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id); int cdma_post_jfs_wr(struct cdma_jfs *jfs, struct cdma_jfs_wr *wr, struct cdma_jfs_wr **bad_wr); -#endif +#endif /* __CDMA_JFS_H__ */ diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 8ec5849ade39..b7748e791c5e 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -383,5 +383,4 @@ static void __exit cdma_exit(void) module_init(cdma_init); module_exit(cdma_exit); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0"); MODULE_DESCRIPTION("Hisilicon UBus Crystal DMA Driver"); diff --git a/drivers/ub/cdma/cdma_mbox.h b/drivers/ub/cdma/cdma_mbox.h index e8a00f5c9b97..0cddd4fb8df8 100644 --- a/drivers/ub/cdma/cdma_mbox.h +++ b/drivers/ub/cdma/cdma_mbox.h @@ -43,4 +43,4 @@ int cdma_post_mailbox_ctx(struct cdma_dev *cdev, void *ctx, u32 size, struct ubase_cmd_mailbox *cdma_mailbox_query_ctx(struct cdma_dev *cdev, struct ubase_mbx_attr *attr); -#endif /* CDMA_MBOX_H */ +#endif /* __CDMA_MBOX_H__ */ diff --git a/drivers/ub/cdma/cdma_mmap.h b/drivers/ub/cdma/cdma_mmap.h index 0dd6c609a85e..65abdb5e284e 100644 --- a/drivers/ub/cdma/cdma_mmap.h +++ b/drivers/ub/cdma/cdma_mmap.h @@ -11,4 +11,4 @@ void cdma_unmap_vma_pages(struct cdma_file *cfile); const struct vm_operations_struct *cdma_get_umap_ops(void); void cdma_umap_priv_init(struct cdma_umap_priv *priv, struct vm_area_struct *vma); -#endif /* CDMA_MMAP_H */ +#endif /* __CDMA_MMAP_H__ */ diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index 9b03baef162c..ab7252a649f0 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -16,8 +16,7 @@ struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id) struct cdma_queue *queue; spin_lock(&cdev->queue_table.lock); - queue = (struct cdma_queue *)idr_find(&cdev->queue_table.idr_tbl.idr, - queue_id); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); spin_unlock(&cdev->queue_table.lock); return queue; diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index 08b24cb0b3fc..072e51c3a300 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -36,4 +36,5 @@ struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, int cdma_delete_queue(struct cdma_dev *cdev, u32 queue_id); void cdma_set_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue, enum cdma_queue_res_type type, void *res); -#endif + +#endif /* __CDMA_QUEUE_H__ */ diff --git a/drivers/ub/cdma/cdma_segment.h b/drivers/ub/cdma/cdma_segment.h index 113e357fcedd..ef1610205fae 100644 --- a/drivers/ub/cdma/cdma_segment.h +++ b/drivers/ub/cdma/cdma_segment.h @@ -32,4 +32,4 @@ void cdma_seg_ungrant(struct cdma_segment *seg); struct dma_seg *cdma_import_seg(struct dma_seg_cfg *cfg); void cdma_unimport_seg(struct dma_seg *seg); -#endif /* CDMA_SEGMENT_H */ +#endif /* __CDMA_SEGMENT_H__ */ diff --git a/drivers/ub/cdma/cdma_tid.h b/drivers/ub/cdma/cdma_tid.h index 8bbd8c0c979a..9b82d47281e1 100644 --- a/drivers/ub/cdma/cdma_tid.h +++ b/drivers/ub/cdma/cdma_tid.h @@ -13,4 +13,4 @@ struct cdma_dev; int cdma_alloc_dev_tid(struct cdma_dev *cdev); void cdma_free_dev_tid(struct cdma_dev *cdev); -#endif +#endif /* __CDMA_TID_H__ */ diff --git a/drivers/ub/cdma/cdma_tp.c b/drivers/ub/cdma/cdma_tp.c index a77f1164b416..c5a6b9c7d395 100644 --- a/drivers/ub/cdma/cdma_tp.c +++ b/drivers/ub/cdma/cdma_tp.c @@ -23,7 +23,7 @@ static inline int cdma_ctrlq_msg_send(struct cdma_dev *cdev, static int cdma_ctrlq_create_ctp(struct cdma_dev *cdev, struct cdma_tp_cfg *cfg, u32 *tpn) { - struct cdma_ctrlq_tp_create_cfg ctrlq_tp; + struct cdma_ctrlq_tp_create_cfg ctrlq_tp = { 0 }; struct cdma_ctrlq_tp_ret tp_out = { 0 }; struct ubase_ctrlq_msg msg = { 0 }; int ret; @@ -75,25 +75,29 @@ static void cdma_ctrlq_delete_ctp(struct cdma_dev *cdev, u32 tpn, struct ubase_ctrlq_msg msg = { 0 }; int ret; - ctrlq_tp.seid_flag = CDMA_CTRLQ_FLAG_ON; - ctrlq_tp.deid_flag = CDMA_CTRLQ_FLAG_ON; - ctrlq_tp.scna = cfg->scna; - ctrlq_tp.dcna = cfg->dcna; - ctrlq_tp.seid[0] = cfg->seid; - ctrlq_tp.deid[0] = cfg->deid; - ctrlq_tp.tpn = tpn; - ctrlq_tp.route_type = CDMA_ROUTE_TYPE_CNA; - ctrlq_tp.trans_type = CDMA_TRANS_TYPE_CDMA_CTP; - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = CDMA_CTRLQ_DELETE_CTP; - msg.need_resp = CDMA_CTRLQ_FLAG_ON; - msg.is_resp = CDMA_CTRLQ_FLAG_OFF; - msg.in_size = sizeof(ctrlq_tp); - msg.in = &ctrlq_tp; - msg.out_size = sizeof(tp_out); - msg.out = &tp_out; + ctrlq_tp = (struct cdma_ctrlq_tp_delete_cfg) { + .seid_flag = CDMA_CTRLQ_FLAG_ON, + .deid_flag = CDMA_CTRLQ_FLAG_ON, + .scna = cfg->scna, + .dcna = cfg->dcna, + .seid[0] = cfg->seid, + .deid[0] = cfg->deid, + .tpn = tpn, + .route_type = CDMA_ROUTE_TYPE_CNA, + .trans_type = CDMA_TRANS_TYPE_CDMA_CTP + }; + + msg = (struct ubase_ctrlq_msg) { + .service_ver = UBASE_CTRLQ_SER_VER_01, + .service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL, + .opcode = CDMA_CTRLQ_DELETE_CTP, + .need_resp = CDMA_CTRLQ_FLAG_ON, + .is_resp = CDMA_CTRLQ_FLAG_OFF, + .in_size = sizeof(ctrlq_tp), + .in = &ctrlq_tp, + .out_size = sizeof(tp_out), + .out = &tp_out + }; ret = cdma_ctrlq_msg_send(cdev, &msg); if (ret) diff --git a/drivers/ub/cdma/cdma_tp.h b/drivers/ub/cdma/cdma_tp.h index 72019df35d74..dc18002785ed 100644 --- a/drivers/ub/cdma/cdma_tp.h +++ b/drivers/ub/cdma/cdma_tp.h @@ -73,4 +73,5 @@ struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id); void cdma_destroy_ctp_imm(struct cdma_dev *cdev, uint32_t tp_id); -#endif /* CDMA_TP_H */ + +#endif /* __CDMA_TP_H__ */ diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 947c360ba2ef..1a9aef127bc2 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -12,7 +12,7 @@ enum cdma_event_type { CDMA_EVENT_JFC_ERR, CDMA_EVENT_JFS_ERR, - CDMA_EVENT_DEV_INVALID, + CDMA_EVENT_DEV_INVALID }; enum cdma_remove_reason { @@ -73,7 +73,6 @@ struct cdma_tp_cfg { struct cdma_base_tp { struct cdma_ucontext *uctx; struct cdma_tp_cfg cfg; - u64 usr_tp; u32 tpn; u32 tp_id; }; @@ -101,7 +100,6 @@ struct cdma_base_jfs { struct cdma_context *ctx; struct cdma_jfs_cfg cfg; cdma_event_callback_t jfae_handler; - u64 usr_jfs; u32 id; atomic_t use_cnt; struct cdma_jfs_event jfs_event; @@ -162,4 +160,4 @@ struct cdma_umap_priv { struct list_head node; }; -#endif +#endif /* __CDMA_TYPES_H__ */ diff --git a/drivers/ub/cdma/cdma_uobj.h b/drivers/ub/cdma/cdma_uobj.h index f343559a33ce..480db379901a 100644 --- a/drivers/ub/cdma/cdma_uobj.h +++ b/drivers/ub/cdma/cdma_uobj.h @@ -31,4 +31,4 @@ struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, void cdma_cleanup_context_uobj(struct cdma_file *cfile, enum cdma_remove_reason why); void cdma_close_uobj_fd(struct cdma_file *cfile); -#endif +#endif /* __CDMA_UOBJ_H__ */ diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 681854ed9765..d9c89e57019b 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -417,4 +417,4 @@ enum jfc_poll_state { JFC_POLL_ERR, }; -#endif +#endif /* _UAPI_UB_CDMA_CDMA_ABI_H_ */ diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 51acd722a74d..4f80012ef3e2 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -236,4 +236,4 @@ int dma_register_client(struct dma_client *client); void dma_unregister_client(struct dma_client *client); -#endif +#endif /* _UB_CDMA_CDMA_API_H_ */ -- Gitee From 777fd08b5f3fcdfb6cab8c0e0197c3b6dc4a5149 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 8 Dec 2025 14:07:08 +0800 Subject: [PATCH 169/243] ub: cdma: fix kasan cdma jfae uaf commit 53a1a76fe01939f53d15d0e360d25d26de9a9514 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- fix kasan cdma jfae uaf Fixes: 35203448b9d1 ("ub: cdma: support reporting asynchronous events") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma_event.c | 5 ++++- drivers/ub/cdma/cdma_ioctl.c | 5 +++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index 057bf2daefc3..bf0554c200a5 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -611,7 +611,10 @@ static int cdma_delete_jfae(struct inode *inode, struct file *filp) if (!mutex_trylock(&cfile->ctx_mutex)) return -ENOLCK; - jfae->ctx->jfae = NULL; + + if (jfae->ctx) + jfae->ctx->jfae = NULL; + cdma_uninit_jfe(&jfae->jfe); kfree(jfae); filp->private_data = NULL; diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index abcf39f0d021..dbd4bc78429e 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -127,6 +127,7 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfae *jfae; if (!cfile->uctx) { dev_err(cdev->dev, "cdma context has not been created.\n"); @@ -140,6 +141,10 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, return -EBUSY; } + jfae = cfile->uctx->jfae; + if (jfae) + jfae->ctx = NULL; + cdma_free_context(cdev, cfile->uctx); cfile->uctx = NULL; -- Gitee From 054265f22090d6461d22b52b07b30b4d3f6d769d Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 8 Dec 2025 14:59:59 +0800 Subject: [PATCH 170/243] ub: cdma: fix the timing issue during flow-based deregistration. commit c3c1e92567d995d8fb362964ab696c5345746669 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- fix the timing issue during flow-based deregistration. Fixes: 710a287ef643 ("ub: cdma: support reset function") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma.h | 1 + drivers/ub/cdma/cdma_api.c | 14 +++++++------- drivers/ub/cdma/cdma_chardev.c | 8 ++++---- drivers/ub/cdma/cdma_context.c | 2 +- drivers/ub/cdma/cdma_event.c | 28 +++++++++++++++++----------- drivers/ub/cdma/cdma_ioctl.c | 4 ++-- drivers/ub/cdma/cdma_jfc.c | 18 ++++++++++++------ drivers/ub/cdma/cdma_jfs.c | 20 +++++++++++++------- drivers/ub/cdma/cdma_main.c | 21 ++++++++------------- drivers/ub/cdma/cdma_queue.c | 4 ++-- drivers/ub/cdma/cdma_tp.c | 4 ++-- drivers/ub/cdma/cdma_tp.h | 2 +- 12 files changed, 70 insertions(+), 56 deletions(-) diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index b77cf1350545..5fea4526e505 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -38,6 +38,7 @@ enum cdma_cqe_size { enum cdma_status { CDMA_NORMAL, CDMA_SUSPEND, + CDMA_INVALID }; enum cdma_client_ops { diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 36f037b97d0b..ce7461411d62 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -57,7 +57,7 @@ struct dma_device *dma_get_device_list(u32 *num_devices) xa_for_each(cdma_devs_tbl, index, cdev) { attr = &cdev->base.attr; - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", attr->eid.dw0); continue; @@ -150,7 +150,7 @@ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) xa_for_each(cdma_devs_tbl, index, cdev) { attr = &cdev->base.attr; - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", attr->eid.dw0); continue; @@ -203,7 +203,7 @@ int dma_create_context(struct dma_device *dma_dev) return -EINVAL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", dma_dev->attr.eid.dw0); return -EINVAL; @@ -302,7 +302,7 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cf return -EINVAL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", dma_dev->attr.eid.dw0); return -EINVAL; @@ -414,7 +414,7 @@ struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, return NULL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", dma_dev->attr.eid.dw0); return NULL; @@ -558,7 +558,7 @@ static int cdma_param_transfer(struct dma_device *dma_dev, int queue_id, return -EINVAL; } - if (tmp_dev->status == CDMA_SUSPEND) { + if (tmp_dev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); return -EINVAL; } @@ -818,7 +818,7 @@ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, return -EINVAL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); return -EINVAL; } diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index 3614609d683e..51b19d614743 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -67,7 +67,7 @@ static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct cdma_ioctl_hdr hdr = { 0 }; int ret; - if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + if (!cfile->cdev || cfile->cdev->status >= CDMA_SUSPEND) { pr_info("ioctl cdev is invalid.\n"); return -ENODEV; } @@ -126,7 +126,7 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * u32 jfs_id; u32 cmd; - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { dev_warn(cdev->dev, "cdev is resetting.\n"); return -EBUSY; } @@ -177,7 +177,7 @@ static int cdma_mmap(struct file *file, struct vm_area_struct *vma) struct cdma_umap_priv *priv; int ret; - if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + if (!cfile->cdev || cfile->cdev->status >= CDMA_SUSPEND) { pr_info("mmap cdev is invalid.\n"); return -ENODEV; } @@ -267,7 +267,7 @@ static int cdma_open(struct inode *inode, struct file *file) chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); cdev = container_of(chardev, struct cdma_dev, chardev); - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { dev_warn(cdev->dev, "cdev is resetting.\n"); return -EBUSY; } diff --git a/drivers/ub/cdma/cdma_context.c b/drivers/ub/cdma/cdma_context.c index c95ccb0c28b4..ec55b03b49fc 100644 --- a/drivers/ub/cdma/cdma_context.c +++ b/drivers/ub/cdma/cdma_context.c @@ -151,7 +151,7 @@ static void cdma_cleanup_queue_res(struct cdma_dev *cdev, struct cdma_context *c cdma_delete_jfs(cdev, queue->jfs->id); if (queue->tp) - cdma_delete_ctp(cdev, queue->tp->tp_id); + cdma_delete_ctp(cdev, queue->tp->tp_id, ctx->invalid); if (queue->jfc) cdma_delete_jfc(cdev, queue->jfc->id, NULL); diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index bf0554c200a5..e8ecb7f8c4f6 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -512,6 +512,8 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, struct cdma_cmd_async_event async_event = { 0 }; struct cdma_jfe_event *event; struct list_head event_list; + struct cdma_context *ctx; + struct cdma_dev *cdev; u32 event_cnt; int ret; @@ -520,7 +522,10 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, return -EINVAL; } - if (!jfae->cfile->cdev || jfae->cfile->cdev->status == CDMA_SUSPEND) { + ctx = jfae->ctx; + cdev = jfae->cfile->cdev; + + if (!cdev || cdev->status == CDMA_INVALID || !ctx || ctx->invalid) { pr_info("wait dev invalid event success.\n"); async_event.event_data = 0; async_event.event_type = CDMA_EVENT_DEV_INVALID; @@ -562,11 +567,16 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait) { struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; + struct cdma_context *ctx; + struct cdma_dev *cdev; - if (!jfae || !jfae->cfile || !jfae->cfile->cdev) + if (!jfae || !jfae->cfile) return POLLERR; - if (jfae->cfile->cdev->status == CDMA_SUSPEND) + ctx = jfae->ctx; + cdev = jfae->cfile->cdev; + + if (!cdev || cdev->status == CDMA_INVALID || !ctx || ctx->invalid) return POLLIN | POLLRDNORM; return cdma_jfe_poll(&jfae->jfe, filp, wait); @@ -575,25 +585,21 @@ static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait static long cdma_jfae_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; - unsigned int nr; - int ret; + unsigned int nr = (unsigned int)_IOC_NR(cmd); + long ret = -ENOIOCTLCMD; if (!jfae) return -EINVAL; - nr = (unsigned int)_IOC_NR(cmd); - switch (nr) { case JFAE_CMD_GET_ASYNC_EVENT: ret = cdma_get_async_event(jfae, filp, arg); break; default: - dev_err(jfae->cfile->cdev->dev, "nr = %u.\n", nr); - ret = -ENOIOCTLCMD; - break; + pr_err("jfae ioctl wrong nr = %u.\n", nr); } - return (long)ret; + return ret; } static int cdma_delete_jfae(struct inode *inode, struct file *filp) diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index dbd4bc78429e..4a30cbbd383f 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -215,7 +215,7 @@ static int cdma_cmd_create_ctp(struct cdma_ioctl_hdr *hdr, return 0; delete_ctp: - cdma_delete_ctp(cdev, ctp->tp_id); + cdma_delete_ctp(cdev, ctp->tp_id, false); delete_obj: cdma_uobj_delete(uobj); @@ -260,7 +260,7 @@ static int cdma_cmd_delete_ctp(struct cdma_ioctl_hdr *hdr, } ctp = uobj->object; - cdma_delete_ctp(cdev, ctp->tp_id); + cdma_delete_ctp(cdev, ctp->tp_id, cfile->uctx->invalid); cdma_uobj_delete(uobj); cdma_set_queue_res(cdev, queue, QUEUE_RES_TP, NULL); diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index 0b3611c3d27d..9c6c82eaee93 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -249,12 +249,20 @@ static int cdma_query_jfc_destroy_done(struct cdma_dev *cdev, uint32_t jfcn) return ret; } -static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, u32 jfcn) +static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, struct cdma_jfc *jfc) { #define QUERY_MAX_TIMES 5 + struct cdma_context *ctx = jfc->base.ctx; + u32 jfcn = jfc->jfcn; u32 wait_times = 0; int ret; + if (cdev->status == CDMA_INVALID || (ctx && ctx->invalid)) { + dev_info(cdev->dev, + "resetting Ignore jfc ctx, jfcn = %u\n", jfcn); + return 0; + } + ret = cdma_post_destroy_jfc_mbox(cdev, jfcn, CDMA_JFC_STATE_INVALID); if (ret) { dev_err(cdev->dev, "post mbox to destroy jfc failed, id: %u.\n", jfcn); @@ -555,11 +563,9 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, return -EINVAL; } - if (!(jfc->base.ctx && jfc->base.ctx->invalid)) { - ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); - if (ret) - dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); - } + ret = cdma_destroy_and_flush_jfc(cdev, jfc); + if (ret) + dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); if (refcount_dec_and_test(&jfc->event_refcount)) complete(&jfc->event_comp); diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 8a62e2a2fd6b..437e48d1e9a6 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -498,10 +498,18 @@ static bool cdma_destroy_jfs_precondition(struct cdma_dev *cdev, } static int cdma_modify_and_destroy_jfs(struct cdma_dev *cdev, - struct cdma_jetty_queue *sq) + struct cdma_jfs *jfs) { + struct cdma_context *ctx = jfs->base_jfs.ctx; + struct cdma_jetty_queue *sq = &jfs->sq; int ret = 0; + if (cdev->status == CDMA_INVALID || (ctx && ctx->invalid)) { + dev_info(cdev->dev, + "resetting Ignore jfs ctx, id = %u.\n", sq->id); + return 0; + } + if (!cdma_destroy_jfs_precondition(cdev, sq)) return -EINVAL; @@ -538,11 +546,9 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) return -EINVAL; } - if (!(jfs->base_jfs.ctx && jfs->base_jfs.ctx->invalid)) { - ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); - if (ret) - dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); - } + ret = cdma_modify_and_destroy_jfs(cdev, jfs); + if (ret) + dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); if (refcount_dec_and_test(&jfs->ae_ref_cnt)) complete(&jfs->ae_comp); @@ -1018,7 +1024,7 @@ static int cdma_post_sq_wr(struct cdma_dev *cdev, struct cdma_jetty_queue *sq, post_wr: if (wr_cnt) { - if (cdev->status != CDMA_SUSPEND) { + if (cdev->status == CDMA_NORMAL) { /* Ensure the order of write memory operations */ wmb(); if (wr_cnt == 1 && dwqe_enable && (sq->pi - sq->ci == 1)) diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index b7748e791c5e..83c6671dcf66 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -107,13 +107,13 @@ static void cdma_reset_down(struct auxiliary_device *adev) mutex_lock(&g_cdma_reset_mutex); cdev = get_cdma_dev(adev); - if (!cdev || cdev->status == CDMA_SUSPEND) { + if (!cdev || cdev->status >= CDMA_SUSPEND) { dev_warn(&adev->dev, "cdma device is not ready.\n"); mutex_unlock(&g_cdma_reset_mutex); return; } - cdev->status = CDMA_SUSPEND; + cdev->status = CDMA_INVALID; cdma_cmd_flush(cdev); cdma_reset_unmap_vma_pages(cdev, true); cdma_client_handler(cdev, CDMA_CLIENT_STOP); @@ -136,7 +136,7 @@ static void cdma_reset_uninit(struct auxiliary_device *adev) } stage = ubase_get_reset_stage(adev); - if (stage == UBASE_RESET_STAGE_UNINIT && cdev->status == CDMA_SUSPEND) { + if (stage == UBASE_RESET_STAGE_UNINIT && cdev->status == CDMA_INVALID) { cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); cdma_destroy_dev(cdev, is_rmmod); } @@ -225,12 +225,12 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", __func__, auxdev->name, auxdev->id); + ubase_reset_unregister(auxdev); mutex_lock(&g_cdma_reset_mutex); cdev = dev_get_drvdata(&auxdev->dev); if (!cdev) { - dev_err(&auxdev->dev, "get drvdata from ubase failed.\n"); - ubase_reset_unregister(auxdev); mutex_unlock(&g_cdma_reset_mutex); + dev_err(&auxdev->dev, "cdma device is not exist.\n"); return; } @@ -239,19 +239,15 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) cdma_client_handler(cdev, CDMA_CLIENT_STOP); cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); cdma_reset_unmap_vma_pages(cdev, false); - - if (!is_rmmod) { - ret = ubase_deactivate_dev(auxdev); - dev_info(&auxdev->dev, "ubase deactivate dev ret = %d.\n", ret); - } - - ubase_reset_unregister(auxdev); + ret = is_rmmod ? 0 : ubase_deactivate_dev(auxdev); cdma_dbg_uninit(auxdev); cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); cdma_free_cfile_uobj(cdev); cdma_destroy_dev(cdev, true); mutex_unlock(&g_cdma_reset_mutex); + + dev_info(&auxdev->dev, "cdma device remove success, ret = %d.\n", ret); } static void cdma_reset_init(struct auxiliary_device *adev) @@ -331,7 +327,6 @@ static int cdma_probe(struct auxiliary_device *auxdev, static void cdma_remove(struct auxiliary_device *auxdev) { cdma_uninit_dev(auxdev); - pr_info("cdma device remove success.\n"); } static const struct auxiliary_device_id cdma_id_table[] = { diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index ab7252a649f0..2d6a04d0bff9 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -100,7 +100,7 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, return 0; delete_tp: - cdma_delete_ctp(cdev, queue->tp->tp_id); + cdma_delete_ctp(cdev, queue->tp->tp_id, false); delete_jfc: cdma_delete_jfc(cdev, queue->jfc->id, NULL); @@ -112,7 +112,7 @@ static void cdma_delete_queue_res(struct cdma_dev *cdev, { cdma_delete_jfs(cdev, queue->jfs->id); queue->jfs = NULL; - cdma_delete_ctp(cdev, queue->tp->tp_id); + cdma_delete_ctp(cdev, queue->tp->tp_id, false); queue->tp = NULL; cdma_delete_jfc(cdev, queue->jfc->id, NULL); queue->jfc = NULL; diff --git a/drivers/ub/cdma/cdma_tp.c b/drivers/ub/cdma/cdma_tp.c index c5a6b9c7d395..681f0be3a74c 100644 --- a/drivers/ub/cdma/cdma_tp.c +++ b/drivers/ub/cdma/cdma_tp.c @@ -202,7 +202,7 @@ struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, return NULL; } -void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id) +void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id, bool invalid) { struct cdma_tp_cfg cfg = { 0 }; struct cdma_tp *tp; @@ -219,7 +219,7 @@ void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id) spin_lock(&cdev->ctp_table.lock); refcount_dec(&tp->refcount); if (refcount_dec_if_one(&tp->refcount)) { - if (cdev->status != CDMA_SUSPEND) { + if (cdev->status == CDMA_NORMAL && !invalid) { flag = true; tpn = tp->base.tpn; cfg = tp->base.cfg; diff --git a/drivers/ub/cdma/cdma_tp.h b/drivers/ub/cdma/cdma_tp.h index dc18002785ed..d291bbae68d3 100644 --- a/drivers/ub/cdma/cdma_tp.h +++ b/drivers/ub/cdma/cdma_tp.h @@ -70,7 +70,7 @@ struct cdma_ctrlq_tp_delete_cfg { struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, struct cdma_tp_cfg *cfg); -void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id); +void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id, bool invalid); void cdma_destroy_ctp_imm(struct cdma_dev *cdev, uint32_t tp_id); -- Gitee From 6c8c7463c951a3850d8f641224a80d0ca57c43dd Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Tue, 9 Dec 2025 21:10:52 +0800 Subject: [PATCH 171/243] ub: cdma: modify the compatibility field according to the Linux KABI specification commit 874105a62f638a0d580524a6ac46bd2369edaa3e openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- modify the compatibility field according to the Linux KABI specification Fixes: 34c67ed84c10 ("ub: cdma: support for cdma kernelspace north-south compatibility requirements") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- include/ub/cdma/cdma_api.h | 49 +++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 4f80012ef3e2..f3b90848bee1 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -5,6 +5,7 @@ #define _UB_CDMA_CDMA_API_H_ #include +#include #include /** @@ -19,8 +20,10 @@ struct dma_device { struct cdma_device_attr attr; atomic_t ref_cnt; void *private_data; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; enum dma_cr_opcode { @@ -68,8 +71,10 @@ struct dma_cr { u32 local_id; u32 remote_id; u32 tpn; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -89,8 +94,10 @@ struct queue_cfg { u32 dcna; struct dev_eid rmt_eid; u32 trans_mode; - u32 rsv_bitmap; - u32 rsvd[6]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -111,8 +118,10 @@ struct dma_seg { u32 tid; /* data valid only in bit 0-19 */ u32 token_value; bool token_value_valid; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; struct dma_seg_cfg { @@ -120,8 +129,10 @@ struct dma_seg_cfg { u64 len; u32 token_value; bool token_value_valid; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -149,8 +160,10 @@ enum dma_status { struct dma_cas_data { u64 compare_data; u64 swap_data; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -163,8 +176,10 @@ struct dma_cas_data { struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -183,8 +198,10 @@ struct dma_client { int (*add)(u32 eid); void (*remove)(u32 eid); void (*stop)(u32 eid); - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; struct dma_device *dma_get_device_list(u32 *num_devices); -- Gitee From 9c5f46dfd26e9bcc5bd9b164f2ea50cfb15fbc62 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 19 Nov 2025 13:38:29 +0800 Subject: [PATCH 172/243] ub:ubus: Fix X86 build error commit d742069465ca1953abb2591d392686fbf1d69714 openEuler driver inclusion category: feature bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID7DDH CVE: NA ----------------------------------------------------------- Fix X86 build error.When CONFIG_GENERIC_MSI_IRQ is close and CONFIG_UB is on, msi.domain cannot be found. Add CONFIG_GENERIC_MSI_IRQ in function ub_update_msi_domain Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubfi/irq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubfi/irq.c b/drivers/ub/ubfi/irq.c index 846af8d6c5f1..5835bc8421b3 100644 --- a/drivers/ub/ubfi/irq.c +++ b/drivers/ub/ubfi/irq.c @@ -12,6 +12,7 @@ int ub_update_msi_domain(struct device *dev, enum irq_domain_bus_token bus_token) { +#ifdef CONFIG_GENERIC_MSI_IRQ struct fwnode_handle *fwnode; struct irq_domain *domain; @@ -35,7 +36,7 @@ int ub_update_msi_domain(struct device *dev, /* Update msi domain with new bus_token */ dev_set_msi_domain(dev, domain); - +#endif return 0; } EXPORT_SYMBOL_GPL(ub_update_msi_domain); -- Gitee From 1beea27a7892dc5e1ce326b61c27d6dbb345b8f5 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 20 Nov 2025 20:02:53 +0800 Subject: [PATCH 173/243] ub: ubase: Fix CTRLQ init process commit 785047eef6d2a17a8016986ff9f79bff97b64d4b openEuler Fix CTRLQ init process, the wrong uninit process shouldn't be in front of the success process which may leads some unexpected mistakes. Fixes: d7ce08663cc5 ("ub: ubase: Supports for ctrl queue management.") Signed-off-by: Chuan Wu Signed-off-by: Fengyan Mu Signed-off-by: zhao-lichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 5dcb25012d61..8827318f97a8 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -324,12 +324,12 @@ int ubase_ctrlq_init(struct ubase_dev *udev) udev->ctrlq.csq_next_seq = 1; atomic_set(&udev->ctrlq.req_cnt, 0); -err_crq_table_init: - ubase_ctrlq_msg_queue_uninit(udev); success: set_bit(UBASE_CTRLQ_STATE_ENABLE, &udev->ctrlq.state); return 0; +err_crq_table_init: + ubase_ctrlq_msg_queue_uninit(udev); err_msg_queue_init: ubase_ctrlq_queue_uninit(udev); return ret; -- Gitee From 376fdaf66cefaa68adf90467cd71ecd9371d9e8d Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Mon, 10 Nov 2025 11:34:23 +0800 Subject: [PATCH 174/243] ub: ubase: Fix the problem that free pages when ubase driver destroy TA context commit c19452d5589463c279520ac69794b897c3069d36 openEuler Fix the problem that free pages when ubase driver destroy TA context which we don't want to release, because the upper driver still need to use them for further use. Fixes: 8d68017f37fa ("ub: ubase: support for command process") Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_mailbox.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/ub/ubase/ubase_mailbox.c b/drivers/ub/ubase/ubase_mailbox.c index 86da27cdf828..a92993f5ac0b 100644 --- a/drivers/ub/ubase/ubase_mailbox.c +++ b/drivers/ub/ubase/ubase_mailbox.c @@ -535,8 +535,7 @@ int __ubase_hw_upgrade_ctx_ex(struct ubase_dev *udev, } ret = __ubase_hw_upgrade_ctx(udev, attr, mailbox); - if ((ret && type == UBASE_MB_CREATE) || - (!ret && type == UBASE_MB_DESTROY)) + if (ret && type == UBASE_MB_CREATE) ubase_free_buf_ctx_page(udev, ctx_buf, attr->tag); return ret; -- Gitee From 1d6f0a94548e36b63a0fba962dc9dca936365b70 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 13 Nov 2025 17:56:49 +0800 Subject: [PATCH 175/243] net: unic: Use copy instead of ummu map in tx commit 5098688f41bcdc8467d225a6df5e119b87c87297 openEuler In the transmit direction, the data structure skb passed down by the protocol stack contains packet information. The driver needs to map the virtual addresses such as skb->data to DMA addresses. After the chip completes the transmission, the driver must unmap these addresses and release the packet memory. However, due to the poor performance of the UMMU's DMA mapping and unmapping operations, each packet requires the UMMU to perform both mapping and unmapping actions, which affects the UNIC I/O performance. To address this issue, the UNIC driver pre-allocates a block of physical memory during initialization and performs the UMMU DMA address mapping in advance. When sending packets, the driver directly copies the packets into the pre-allocated memory. The driver does not support non-linear areas such as skb->frag, and it supports a smaller MTU value. As a result, the simple copy operation achieves high performance, improving overall performance by up to three times. Fixes: d8164d3745d4 ("net: unic: add io basic Rx/Tx functionality for unic") Signed-off-by: Haiqing Fang Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_dev.c | 2 +- drivers/net/ub/unic/unic_netdev.c | 2 - drivers/net/ub/unic/unic_trace.h | 15 +- drivers/net/ub/unic/unic_tx.c | 368 ++++++++++++++---------------- drivers/net/ub/unic/unic_tx.h | 14 ++ 5 files changed, 193 insertions(+), 208 deletions(-) diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index f8d5676bfc1f..a0b25e52695e 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -269,7 +269,7 @@ static int unic_init_channels_attr(struct unic_dev *unic_dev) channels->vl.vl_num = 1; channels->rss_vl_num = 1; - channels->rss_size = unic_channels_max_num(unic_dev->comdev.adev); + channels->rss_size = 1; channels->num = channels->rss_size * channels->rss_vl_num; channels->sqebb_depth = unic_caps->jfs.depth; channels->rqe_depth = unic_caps->jfr.depth; diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index d2d213c90d0b..a9d8240d59b0 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -403,7 +403,6 @@ static void unic_fetch_stats_tx(struct rtnl_link_stats64 *stats, stats->tx_bytes += channel->sq->stats.bytes; stats->tx_packets += channel->sq->stats.packets; stats->tx_errors += channel->sq->stats.pad_err; - stats->tx_errors += channel->sq->stats.map_err; stats->tx_errors += channel->sq->stats.over_max_sge_num; stats->tx_errors += channel->sq->stats.csum_err; stats->tx_errors += channel->sq->stats.vlan_err; @@ -412,7 +411,6 @@ static void unic_fetch_stats_tx(struct rtnl_link_stats64 *stats, stats->tx_errors += channel->sq->stats.cfg5_drop_cnt; stats->tx_dropped += channel->sq->stats.pad_err; - stats->tx_dropped += channel->sq->stats.map_err; stats->tx_dropped += channel->sq->stats.over_max_sge_num; stats->tx_dropped += channel->sq->stats.csum_err; stats->tx_dropped += channel->sq->stats.vlan_err; diff --git a/drivers/net/ub/unic/unic_trace.h b/drivers/net/ub/unic/unic_trace.h index b6cec3303175..5b6bc7f19bd2 100644 --- a/drivers/net/ub/unic/unic_trace.h +++ b/drivers/net/ub/unic/unic_trace.h @@ -69,15 +69,15 @@ DEFINE_EVENT(unic_cqe_template, unic_rx_cqe, TP_ARGS(netdev, cq, pi, ci, cqe_mask)); TRACE_EVENT(unic_tx_sqe, - TP_PROTO(struct unic_sq *sq, u16 sqebb_num, u16 sqebb_mask, - bool doorbell), - TP_ARGS(sq, sqebb_num, sqebb_mask, doorbell), + TP_PROTO(struct unic_sq *sq, u16 sqebb_num, u16 sqebb_mask), + TP_ARGS(sq, sqebb_num, sqebb_mask), TP_STRUCT__entry(__field(u32, jfcn) __field(u16, pi) __field(u16, ci) + __field(u16, buff_pi) + __field(u16, buff_ci) __field(u16, sqebb_num) - __field(bool, doorbell) __field(u16, real_pi) __array(u32, sqebb, trace_sqebb_num(trace_tx_max_sqebb_num)) __string(devname, sq->netdev->name) @@ -86,8 +86,9 @@ TRACE_EVENT(unic_tx_sqe, TP_fast_assign(__entry->jfcn = sq->cq->jfcn; __entry->pi = sq->pi; __entry->ci = sq->ci; + __entry->buff_pi = sq->tx_buff->pi; + __entry->buff_ci = sq->tx_buff->ci; __entry->sqebb_num = sqebb_num; - __entry->doorbell = doorbell; __entry->real_pi = sq->pi & sqebb_mask; if (__entry->real_pi + sqebb_num - 1 > sqebb_mask) { memcpy(__entry->sqebb, &sq->sqebb[__entry->real_pi], @@ -105,9 +106,9 @@ TRACE_EVENT(unic_tx_sqe, __assign_str(devname, sq->netdev->name); ), - TP_printk("%s-%u-%u/%u-%d sqe: %s", + TP_printk("%s-%u-%u/%u-%u/%u sqe: %s", __get_str(devname), __entry->jfcn, __entry->pi, - __entry->ci, __entry->doorbell, + __entry->ci, __entry->buff_pi, __entry->buff_ci, __print_array(__entry->sqebb, trace_sqebb_num(__entry->sqebb_num), sizeof(u32)) diff --git a/drivers/net/ub/unic/unic_tx.c b/drivers/net/ub/unic/unic_tx.c index f0e97f1c3184..5a7969b8c49a 100644 --- a/drivers/net/ub/unic/unic_tx.c +++ b/drivers/net/ub/unic/unic_tx.c @@ -47,6 +47,7 @@ #define UNIC_SKB_FRAGS_START_INDEX 1 #define UNIC_SQEBB_POINT_REVERSE (USHRT_MAX + 1) #define UNIC_RCV_SEND_MAX_DIFF_VAL 512U +#define UNIC_TX_PAGES_NUM (1024 * 1024 * 2 / PAGE_SIZE) #define unic_sqebb_cnt(sge_num) DIV_ROUND_UP((sge_num), 4) @@ -62,53 +63,27 @@ static inline u16 unic_get_sqe_mask(struct unic_sq *sq) return unic_get_sqe_depth(sq) - 1; } -static u16 unic_get_spare_sqebb(struct unic_sq *sq) +static u16 unic_get_spare_sqebb_num(struct unic_sq *sq) { u16 sqe_depth = unic_get_sqe_depth(sq); u32 pi = sq->pi; + u32 ci = sq->ci; - if (unlikely(sq->pi < sq->ci)) + if (unlikely(pi < ci)) pi += UNIC_SQEBB_POINT_REVERSE; - return sqe_depth - (pi - sq->ci); + return sqe_depth - (pi - ci); } -static void unic_unmap_skb_buffer(struct unic_sq *sq, u32 info_index, - u8 skb_map_num, u16 sqebb_mask) +static inline u16 unic_get_spare_page_num(struct unic_tx_buff *tx_buff) { - struct unic_dev *unic_dev = netdev_priv(sq->netdev); - struct unic_sqe_ctrl_section *ctrl; - struct unic_sqe_sge_section *sge; - struct device *dev; - u8 i; - - ctrl = (struct unic_sqe_ctrl_section *)&sq->sqebb[info_index & sqebb_mask]; - sge = (struct unic_sqe_sge_section *)(ctrl + 1); - dev = &unic_dev->comdev.adev->dev; - dma_unmap_single(dev->parent, sge->address, sge->length, DMA_TO_DEVICE); - - for (i = UNIC_SKB_FRAGS_START_INDEX; i < skb_map_num; i++) { - if (!((i + UNIC_SQE_CTRL_SECTION_NUM) % UNIC_SQEBB_MAX_SGE_NUM)) { - info_index++; - sge = (struct unic_sqe_sge_section *) - &sq->sqebb[info_index & sqebb_mask]; - } else { - sge++; - } - - dma_unmap_page(dev->parent, sge->address, sge->length, DMA_TO_DEVICE); - } -} + u32 pi = tx_buff->pi; + u32 ci = tx_buff->ci; -static void unic_consume_skb(struct sk_buff *skb, u32 info_index, - struct unic_sq *sq, int budget, u16 sqebb_mask) -{ - struct unic_sqe_ctrl_section *ctrl; - - ctrl = (struct unic_sqe_ctrl_section *)&sq->sqebb[info_index & sqebb_mask]; - unic_unmap_skb_buffer(sq, info_index, ctrl->sge_num, sqebb_mask); + if (unlikely(pi < ci)) + pi += UNIC_SQEBB_POINT_REVERSE; - napi_consume_skb(skb, budget); + return tx_buff->num - (pi - ci); } static bool unic_check_hw_ci_valid(u16 hw_ci, u16 sq_ci, struct unic_sq *sq) @@ -147,6 +122,7 @@ static void unic_reclaim_single_sqe_space(struct unic_sq *sq, u16 sqebb_mask, } *sq_ci += sqebb_cnt; + sq->tx_buff->ci += sge_num; } static void unic_flush_unused_sqe(struct unic_sq *sq, u16 sqebb_mask, @@ -156,7 +132,7 @@ static void unic_flush_unused_sqe(struct unic_sq *sq, u16 sqebb_mask, while (*sq_ci != sq->pi) { skb = sq->skbs[*sq_ci & sqebb_mask]; - unic_consume_skb(skb, *sq_ci, sq, 0, sqebb_mask); + napi_consume_skb(skb, 0); unic_reclaim_single_sqe_space(sq, sqebb_mask, sq_ci); } } @@ -217,11 +193,11 @@ static bool unic_reclaim_sq_space(struct unic_sq *sq, int budget, u64 *bytes, skb = sq->skbs[sq_ci & sqebb_mask]; if (!clear && likely(!unic_check_hw_ci_late(sq, sq_ci))) { - *bytes += skb->len; + *bytes += skb_headlen(skb); (*packets)++; } - unic_consume_skb(skb, sq_ci, sq, budget, sqebb_mask); + napi_consume_skb(skb, budget); unic_reclaim_single_sqe_space(sq, sqebb_mask, &sq_ci); reclaimed = true; @@ -236,9 +212,8 @@ static bool unic_reclaim_sq_space(struct unic_sq *sq, int budget, u64 *bytes, void unic_poll_tx(struct unic_sq *sq, int budget) { -#define UNIC_MIN_SPARE_SQEBB DIV_ROUND_UP(UNIC_SQE_CTRL_SECTION_NUM + \ - UNIC_SQE_MAX_SGE_NUM, \ - UNIC_SQEBB_MAX_SGE_NUM) +#define UNIC_MIN_SPARE_SQEBB 1 +#define UNIC_MIN_SPARE_PAGE 2 struct net_device *netdev = sq->netdev; struct netdev_queue *dev_queue; @@ -258,7 +233,8 @@ void unic_poll_tx(struct unic_sq *sq, int budget) netdev_tx_completed_queue(dev_queue, packets, bytes); if (unlikely(netif_carrier_ok(netdev) && - unic_get_spare_sqebb(sq) >= UNIC_MIN_SPARE_SQEBB)) { + unic_get_spare_sqebb_num(sq) >= UNIC_MIN_SPARE_SQEBB && + unic_get_spare_page_num(sq->tx_buff) >= UNIC_MIN_SPARE_PAGE)) { unic_dev = netdev_priv(netdev); if (netif_tx_queue_stopped(dev_queue) && !test_bit(UNIC_STATE_DOWN, &unic_dev->state)) { @@ -631,10 +607,80 @@ static void unic_destroy_multi_jfs(struct unic_dev *unic_dev, u32 num, unic_destroy_multi_jfs_context(adev, num, start_idx); } -static int unic_sq_alloc_resource(struct auxiliary_device *adev, - struct unic_sq *sq, u32 sqebb_depth) +static void unic_sq_free_tx_buff_resources(struct auxiliary_device *adev, + struct unic_tx_buff *tx_buff) +{ + struct unic_tx_page_info *page_info; + u16 i; + + for (i = 0; i < tx_buff->num; i++) { + page_info = &tx_buff->page_info[i]; + dma_unmap_page(adev->dev.parent, page_info->sge_dma_addr, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(page_info->p); + } + + devm_kfree(&adev->dev, tx_buff->page_info); +} + +static int unic_sq_alloc_tx_buff_resources(struct auxiliary_device *adev, + struct unic_tx_buff *tx_buff, + u16 page_num) +{ + struct unic_tx_page_info *page_info; + int ret; + u16 i; + + tx_buff->page_info = devm_kcalloc(&adev->dev, page_num, + sizeof(struct unic_tx_page_info), + GFP_KERNEL); + if (!tx_buff->page_info) { + dev_err(adev->dev.parent, "failed to alloc unic tx page info.\n"); + return -ENOMEM; + } + + for (i = 0; i < page_num; i++) { + page_info = &tx_buff->page_info[i]; + page_info->p = alloc_page(GFP_KERNEL); + if (!page_info->p) { + dev_err(adev->dev.parent, + "failed to alloc %uth tx page.\n", i); + ret = -ENOMEM; + goto err_alloc_pages; + } + + page_info->sge_dma_addr = dma_map_page(adev->dev.parent, + page_info->p, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(adev->dev.parent, + page_info->sge_dma_addr))) { + dev_err(adev->dev.parent, + "failed to dma map %uth tx page.\n", i); + __free_page(page_info->p); + ret = -ENOMEM; + goto err_alloc_pages; + } + + page_info->sge_va_addr = page_address(page_info->p); + tx_buff->num++; + } + + return 0; + +err_alloc_pages: + unic_sq_free_tx_buff_resources(adev, tx_buff); + + return ret; +} + +static int unic_sq_alloc_resource(struct unic_dev *unic_dev, struct unic_sq *sq) { + u16 page_num = UNIC_TX_PAGES_NUM / unic_dev->channels.num; + struct auxiliary_device *adev = unic_dev->comdev.adev; + u16 sqebb_depth = unic_dev->channels.sqebb_depth; u32 size = sqebb_depth * sizeof(struct unic_sqebb); + int ret; sq->skbs = devm_kcalloc(&adev->dev, sqebb_depth, sizeof(struct sk_buff *), GFP_KERNEL); @@ -647,24 +693,45 @@ static int unic_sq_alloc_resource(struct auxiliary_device *adev, &sq->sqebb_dma_addr, GFP_KERNEL); if (!sq->sqebb) { dev_err(adev->dev.parent, "failed to dma alloc unic sqebb.\n"); + ret = -ENOMEM; goto err_alloc_unic_sqebb; } + sq->tx_buff = devm_kzalloc(&adev->dev, sizeof(struct unic_tx_buff), + GFP_KERNEL); + if (!sq->tx_buff) { + ret = -ENOMEM; + goto err_alloc_tx_buff; + } + + ret = unic_sq_alloc_tx_buff_resources(adev, sq->tx_buff, page_num); + if (ret) { + dev_err(adev->dev.parent, "failed to alloc sqebb resources.\n"); + goto err_alloc_tx_buff_resources; + } + return 0; +err_alloc_tx_buff_resources: + devm_kfree(&adev->dev, sq->tx_buff); +err_alloc_tx_buff: + dma_free_coherent(adev->dev.parent, size, sq->sqebb, sq->sqebb_dma_addr); err_alloc_unic_sqebb: devm_kfree(&adev->dev, sq->skbs); - return -ENOMEM; + return ret; } -static void unic_sq_free_resource(struct auxiliary_device *adev, - struct unic_sq *sq, u32 sqebb_depth) +static void unic_sq_free_resource(struct unic_dev *unic_dev, struct unic_sq *sq) { + struct auxiliary_device *adev = unic_dev->comdev.adev; + u16 sqebb_depth = unic_dev->channels.sqebb_depth; u32 size = sqebb_depth * sizeof(struct unic_sqebb); u16 sqebb_mask = unic_get_sqe_mask(sq); unic_flush_unused_sqe(sq, sqebb_mask, &sq->ci); + unic_sq_free_tx_buff_resources(adev, sq->tx_buff); + devm_kfree(&adev->dev, sq->tx_buff); dma_free_coherent(adev->dev.parent, size, sq->sqebb, sq->sqebb_dma_addr); devm_kfree(&adev->dev, sq->skbs); } @@ -677,7 +744,6 @@ int unic_create_sq(struct unic_dev *unic_dev, u32 idx) struct unic_channel *channel = &unic_dev->channels.c[idx]; struct unic_sq *sq; u32 jfs_start_idx; - u16 sqebb_depth; u32 offset; int ret; @@ -697,8 +763,7 @@ int unic_create_sq(struct unic_dev *unic_dev, u32 idx) return -ENOMEM; } - sqebb_depth = unic_dev->channels.sqebb_depth; - ret = unic_sq_alloc_resource(adev, sq, sqebb_depth); + ret = unic_sq_alloc_resource(unic_dev, sq); if (ret) goto err_alloc_res; @@ -724,7 +789,7 @@ int unic_create_sq(struct unic_dev *unic_dev, u32 idx) return 0; err_mbx_create_jfs_context: - unic_sq_free_resource(adev, sq, sqebb_depth); + unic_sq_free_resource(unic_dev, sq); err_alloc_res: devm_kfree(&adev->dev, sq); return ret; @@ -738,8 +803,7 @@ static void unic_free_multi_sq_resource(struct unic_dev *unic_dev, u32 num) for (i = 0; i < num; i++) { channel = &unic_dev->channels.c[i]; - unic_sq_free_resource(adev, channel->sq, - unic_dev->channels.sqebb_depth); + unic_sq_free_resource(unic_dev, channel->sq); devm_kfree(&adev->dev, channel->sq); channel->sq = NULL; } @@ -769,8 +833,7 @@ static int unic_apply_ub_pkt(struct unic_dev *unic_dev, struct unic_sq *sq, struct ublhdr *ubl = (struct ublhdr *)skb->data; if (unic_dev_ubl_supported(unic_dev)) { - if (unlikely(ubl->cfg == UB_NOIP_CFG_TYPE && - unic_is_port_down(unic_dev))) { + if (unlikely(ubl->cfg == UB_NOIP_CFG_TYPE)) { unic_sq_stats_inc(sq, cfg5_drop_cnt); return -EIO; } @@ -786,47 +849,31 @@ static int unic_apply_ub_pkt(struct unic_dev *unic_dev, struct unic_sq *sq, } #endif -static u32 unic_tx_need_sge_num(struct sk_buff *skb) -{ - u32 sge_num = 1, i, size; - skb_frag_t *frag; - - if (likely(skb->len <= UNIC_SGE_MAX_PAYLOAD && !skb_has_frag_list(skb))) - return skb_shinfo(skb)->nr_frags + 1U; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - size = skb_frag_size(frag); - if (!size) - continue; - - sge_num++; - } - - return sge_num; -} - static int unic_maybe_stop_tx(struct net_device *netdev, struct unic_sq *sq, u32 sge_num) { - u32 sqebb_num = unic_sqebb_cnt(sge_num + UNIC_SQE_CTRL_SECTION_NUM); - struct unic_dev *unic_dev = netdev_priv(netdev); - u16 spare_num = unic_get_spare_sqebb(sq); + u16 spare_page, spare_sqebb, need_sqebb; + struct unic_dev *unic_dev; if (unlikely(sge_num > UNIC_SQE_MAX_SGE_NUM)) { unic_sq_stats_inc(sq, over_max_sge_num); return -ENOMEM; } - if (likely(spare_num >= sqebb_num)) + spare_sqebb = unic_get_spare_sqebb_num(sq); + spare_page = unic_get_spare_page_num(sq->tx_buff); + need_sqebb = unic_sqebb_cnt(sge_num + UNIC_SQE_CTRL_SECTION_NUM); + if (likely(spare_sqebb >= need_sqebb && spare_page >= sge_num)) return 0; netif_stop_subqueue(netdev, sq->queue_index); smp_mb(); /* Memory barrier before checking sqebb space */ + unic_dev = netdev_priv(netdev); if (unlikely(netif_carrier_ok(netdev) && !test_bit(UNIC_STATE_DOWN, &unic_dev->state) && - unic_get_spare_sqebb(sq) > sqebb_num)) { + unic_get_spare_sqebb_num(sq) >= need_sqebb && + unic_get_spare_page_num(sq->tx_buff) >= sge_num)) { netif_start_subqueue(netdev, sq->queue_index); return 0; } @@ -1044,24 +1091,23 @@ static int unic_handle_ctrl_section(struct sk_buff *skb, struct unic_sq *sq, return ret; } -static inline void unic_fill_sge_section(struct unic_sqe_sge_section *sge, u64 addr, - u32 length, u32 tid) +static inline void unic_fill_sge_section(struct unic_sqe_sge_section *sge, + u64 addr, u32 length) { sge->length = length; - sge->token_id = tid; sge->address = addr; } static inline struct unic_sqe_sge_section * unic_get_next_sge(u8 *sec_num, struct unic_sqe_sge_section *sge, - struct unic_sq *sq, u16 sqebb_mask) + struct unic_sq *sq, u16 sqebb_mask, u16 *sq_pi) { (*sec_num)++; if (*sec_num >= UNIC_SQEBB_MAX_SGE_NUM) { - sq->pi++; + (*sq_pi)++; *sec_num = 0; sge = (struct unic_sqe_sge_section *) - &sq->sqebb[sq->pi & sqebb_mask]; + &sq->sqebb[*sq_pi & sqebb_mask]; } else { sge++; } @@ -1069,104 +1115,36 @@ unic_get_next_sge(u8 *sec_num, struct unic_sqe_sge_section *sge, return sge; } -static int unic_map_skb_frags(struct unic_sq *sq, struct sk_buff *skb, - u8 *skb_map_num, u8 *sec_num, - struct unic_sqe_sge_section *sge) -{ - struct net_device *netdev = sq->netdev; - u16 sqebb_mask = unic_get_sqe_mask(sq); - struct unic_dev *unic_dev; - struct device *dev; - skb_frag_t *frag; - dma_addr_t dma; - u32 size, i; - - unic_dev = netdev_priv(netdev); - dev = &unic_dev->comdev.adev->dev; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - size = skb_frag_size(frag); - if (!size) - continue; - - dma = skb_frag_dma_map(dev->parent, frag, 0, size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, dma))) { - unic_sq_stats_inc(sq, map_err); - return -ENOMEM; - } - - sge = unic_get_next_sge(sec_num, sge, sq, sqebb_mask); - unic_fill_sge_section(sge, (u64)dma, size, unic_dev->tid); - - (*skb_map_num)++; - } - - return 0; -} - -static void unic_clear_sge_section(struct unic_sq *sq, u16 idx, u8 skb_map_num, - u16 sqebb_mask) -{ - struct unic_sqe_ctrl_section *ctrl; - u16 index = idx & sqebb_mask; - u8 sqebb_cnt; - - ctrl = (struct unic_sqe_ctrl_section *)&sq->sqebb[index]; - sqebb_cnt = unic_sqebb_cnt(skb_map_num + UNIC_SQE_CTRL_SECTION_NUM); - if (unlikely(index + sqebb_cnt - 1 > sqebb_mask)) { - memset(ctrl, 0, (sqebb_mask - index + 1) * sizeof(struct unic_sqebb)); - memset(&sq->sqebb[0], 0, - (index + sqebb_cnt - 1 - sqebb_mask) * sizeof(struct unic_sqebb)); - } else { - memset(ctrl, 0, sqebb_cnt * sizeof(struct unic_sqebb)); - } -} - -static int unic_handle_sge_section(struct unic_sq *sq, struct sk_buff *skb, - struct unic_sqe_sge_section *sge, - u16 sqebb_mask) +static void unic_handle_sge_section(struct unic_sq *sq, struct sk_buff *skb, + struct unic_sqe_sge_section *sge, + u16 sqebb_mask, u8 sge_num) { + struct unic_tx_buff *tx_buff = sq->tx_buff; u8 sec_num = UNIC_SQE_CTRL_SECTION_NUM; - struct net_device *netdev = sq->netdev; - struct unic_dev *unic_dev; - struct device *dev; - u8 skb_map_num = 0; - u16 pi = sq->pi; - dma_addr_t dma; - u32 size; - - size = skb_headlen(skb); - unic_dev = netdev_priv(netdev); - dev = &unic_dev->comdev.adev->dev; - - dma = dma_map_single(dev->parent, skb->data, size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, dma))) { - unic_sq_stats_inc(sq, map_err); - return -ENOMEM; + struct unic_tx_page_info *page_info; + u32 size = skb_headlen(skb); + u16 sq_pi = sq->pi; + u32 i, len; + + for (i = 0; i < sge_num; i++) { + page_info = &tx_buff->page_info[tx_buff->pi % tx_buff->num]; + len = i < sge_num - 1 ? PAGE_SIZE : size; + memcpy(page_info->sge_va_addr, skb->data + i * PAGE_SIZE, len); + unic_fill_sge_section(sge, (u64)page_info->sge_dma_addr, len); + + sge = unic_get_next_sge(&sec_num, sge, sq, sqebb_mask, &sq_pi); + size -= PAGE_SIZE; + tx_buff->pi++; } - - unic_fill_sge_section(sge, (u64)dma, size, unic_dev->tid); - skb_map_num++; - - if (unlikely(unic_map_skb_frags(sq, skb, &skb_map_num, &sec_num, sge))) - goto dma_map_err; - - sq->pi = pi; - return 0; - -dma_map_err: - unic_unmap_skb_buffer(sq, pi, skb_map_num, sqebb_mask); - unic_clear_sge_section(sq, pi, skb_map_num, sqebb_mask); - sq->pi = pi; - return -ENOMEM; } -static int unic_handle_sqe(struct sk_buff *skb, struct unic_sq *sq, u8 sge_num) +static int unic_handle_sqe(struct sk_buff *skb, struct unic_sq *sq) { + u8 sge_num = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); u16 sqebb_mask = unic_get_sqe_mask(sq); struct unic_sqe_ctrl_section *ctrl; struct unic_sqe_sge_section *sge; + u16 sqebb_num; int ret; ctrl = (struct unic_sqe_ctrl_section *)&sq->sqebb[sq->pi & sqebb_mask]; @@ -1175,25 +1153,20 @@ static int unic_handle_sqe(struct sk_buff *skb, struct unic_sq *sq, u8 sge_num) return ret; sge = (struct unic_sqe_sge_section *)(ctrl + 1); - ret = unic_handle_sge_section(sq, skb, sge, sqebb_mask); - if (unlikely(ret)) - return ret; + unic_handle_sge_section(sq, skb, sge, sqebb_mask, sge_num); sq->skbs[sq->pi & sqebb_mask] = skb; + sqebb_num = unic_sqebb_cnt(sge_num + UNIC_SQE_CTRL_SECTION_NUM); - return ret; + trace_unic_tx_sqe(sq, sqebb_num, sqebb_mask); + sq->pi += sqebb_num; + return 0; } -static void unic_tx_doorbell(struct unic_sq *sq, u16 sge_num, bool doorbell) +static void unic_tx_doorbell(struct unic_sq *sq, bool doorbell) { - u16 sqebb_num = unic_sqebb_cnt(sge_num + UNIC_SQE_CTRL_SECTION_NUM); - u16 sqebb_mask = unic_get_sqe_mask(sq); struct unic_jfs_db jfs_db = {0}; - trace_unic_tx_sqe(sq, sqebb_num, sqebb_mask, doorbell); - - sq->pi += sqebb_num; - if (!doorbell) { unic_sq_stats_inc(sq, more); return; @@ -1211,7 +1184,7 @@ static void unic_tx_compensate_doorbell(struct unic_sq *sq) if (sq->last_pi == sq->pi) return; - trace_unic_tx_sqe(sq, 0, unic_get_sqe_mask(sq), true); + trace_unic_tx_sqe(sq, 0, unic_get_sqe_mask(sq)); jfs_db.pi = cpu_to_le16(sq->pi); writel(*(u32 *)&jfs_db, sq->db_addr); sq->last_pi = sq->pi; @@ -1237,7 +1210,7 @@ netdev_tx_t unic_start_xmit(struct sk_buff *skb, struct net_device *netdev) /* Prefetch the data used later */ prefetch(skb->data); - sge_num = unic_tx_need_sge_num(skb); + sge_num = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); ret = unic_maybe_stop_tx(netdev, sq, sge_num); if (unlikely(ret < 0)) { if (ret == -EBUSY) { @@ -1250,19 +1223,19 @@ netdev_tx_t unic_start_xmit(struct sk_buff *skb, struct net_device *netdev) } #ifdef CONFIG_UB_UNIC_UBL - if (unlikely(unic_apply_ub_pkt(unic_dev, sq, skb))) + if (unic_apply_ub_pkt(unic_dev, sq, skb)) goto xmit_drop_pkt; #endif - ret = unic_handle_sqe(skb, sq, sge_num); + ret = unic_handle_sqe(skb, sq); if (unlikely(ret)) goto xmit_drop_pkt; dev_queue = netdev_get_tx_queue(netdev, sq->queue_index); - doorbell = __netdev_tx_sent_queue(dev_queue, skb->len, + doorbell = __netdev_tx_sent_queue(dev_queue, skb_headlen(skb), netdev_xmit_more()); - unic_tx_doorbell(sq, sge_num, doorbell); + unic_tx_doorbell(sq, doorbell); return NETDEV_TX_OK; @@ -1315,7 +1288,6 @@ void unic_dump_sq_stats(struct net_device *netdev, u32 queue_idx) "pad_err: %llu\n" "bytes: %llu\n" "packets: %llu\n" - "map_err: %llu\n" "busy: %llu\n" "more: %llu\n" "restart_queue: %llu\n" @@ -1327,10 +1299,10 @@ void unic_dump_sq_stats(struct net_device *netdev, u32 queue_idx) "cfg5_drop_cnt: %llu\n", queue_idx, queue->state, sq->pi, sq->ci, sq_stats->pad_err, sq_stats->bytes, sq_stats->packets, - sq_stats->map_err, sq_stats->busy, sq_stats->more, - sq_stats->restart_queue, sq_stats->over_max_sge_num, - sq_stats->csum_err, sq_stats->ci_mismatch, sq_stats->fd_cnt, - sq_stats->drop_cnt, sq_stats->cfg5_drop_cnt); + sq_stats->busy, sq_stats->more, sq_stats->restart_queue, + sq_stats->over_max_sge_num, sq_stats->csum_err, + sq_stats->ci_mismatch, sq_stats->fd_cnt, sq_stats->drop_cnt, + sq_stats->cfg5_drop_cnt); } void unic_mask_key_words(void *sqebb) diff --git a/drivers/net/ub/unic/unic_tx.h b/drivers/net/ub/unic/unic_tx.h index ff74c6bfda2f..b472065af4d8 100644 --- a/drivers/net/ub/unic/unic_tx.h +++ b/drivers/net/ub/unic/unic_tx.h @@ -273,6 +273,19 @@ struct unic_jfs_db { __le16 rsv; }; +struct unic_tx_page_info { + struct page *p; + void *sge_va_addr; + dma_addr_t sge_dma_addr; +}; + +struct unic_tx_buff { + struct unic_tx_page_info *page_info; + u16 num; + u16 pi; + u16 ci; +}; + struct unic_sq { void __iomem *db_addr; struct device *parent_dev; @@ -290,6 +303,7 @@ struct unic_sq { bool check_ci_late; u16 queue_index; struct net_device *netdev; + struct unic_tx_buff *tx_buff; }; void unic_poll_tx(struct unic_sq *sq, int budget); -- Gitee From 968498b3af32dd08c2eb621f10b59aba1a3caf42 Mon Sep 17 00:00:00 2001 From: Yixi Shen Date: Mon, 24 Nov 2025 22:32:24 +0800 Subject: [PATCH 176/243] net: unic: Fix UE deactivate message cleaning commit 73d01da0b977a2fc83848751f28bbe07df7a7934 openEuler In the process of stopping the data stream, the control plane sends cleanup messages for EID and DIP. However, since there is currently no unic ue, the messages cannot be properly processed after being sent, leading to error messages being printed. Modify the message handling mechanism so that the response message only specifies the opcode and does not include data. Signed-off-by: Yixi Shen Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_dev.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index d48582bf90a9..f3f9ab6c7e96 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -461,7 +461,7 @@ static int ubase_handle_ue2ue_ctrlq_req(struct ubase_dev *udev, msg.is_resp = cmd->is_resp; msg.is_async = cmd->is_async; msg.resp_seq = cmd->seq; - msg.in = (u8 *)head + UBASE_CTRLQ_HDR_LEN; + msg.in = cmd->in_size ? (u8 *)head + UBASE_CTRLQ_HDR_LEN : NULL; msg.in_size = cmd->in_size; msg.out = NULL; msg.out_size = 0; @@ -474,8 +474,9 @@ static int ubase_handle_ue2ue_ctrlq_req(struct ubase_dev *udev, ret = __ubase_ctrlq_send(udev, &msg, &ue_info); if (ret) - ubase_err(udev, "failed to send opc(0x%x) ctrlq, ret = %d.\n", - head->opcode, ret); + ubase_err(udev, + "failed to send ue's ctrlq msg, ser_type = 0x%x, opc = 0x%x, ret = %d.\n", + head->service_type, head->opcode, ret); return ret; } -- Gitee From 9e8c848afba3b2451c50f76a0c2376b171004aca Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Tue, 25 Nov 2025 17:14:05 +0800 Subject: [PATCH 177/243] ub: ubase: adapt to response message structure change for ctrlq sl query commit 904e31bebd6176b88f632b263c307a479ab6662c openEuler In rack server mode, the tp sl and ctp sl available to urma are changed to be directly obtained through ctrlq query, and the corresponding tp vl and ctp vl don't need to be obtained by querying vl aging configuration. This patch adapts to the response message structure change for ctrlq sl query. Signed-off-by: Zhang Lei Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.h | 6 +- drivers/ub/ubase/ubase_hw.h | 5 - drivers/ub/ubase/ubase_qos_hw.c | 183 ++++++++------------------------ 3 files changed, 49 insertions(+), 145 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.h b/drivers/ub/ubase/ubase_ctrlq.h index c3d5f55db87d..881f3342393a 100644 --- a/drivers/ub/ubase/ubase_ctrlq.h +++ b/drivers/ub/ubase/ubase_ctrlq.h @@ -69,8 +69,10 @@ struct ubase_ctrlq_query_vl_req { struct ubase_ctrlq_query_sl_resp { __le16 unic_sl_bitmap; - __le16 udma_sl_bitmap; - u8 rsv[16]; + u8 rsv0[2]; + __le16 udma_tp_sl_bitmap; + __le16 udma_ctp_sl_bitmap; + u8 rsv1[12]; }; struct ubase_ctrlq_query_sl_req { diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h index 12bc101cd04f..2c7ed2264aab 100644 --- a/drivers/ub/ubase/ubase_hw.h +++ b/drivers/ub/ubase/ubase_hw.h @@ -158,11 +158,6 @@ struct ubase_ctx_buf_map { u16 mb_cmd; }; -struct ubase_query_vl_ageing_cmd { - __le16 vl_ageing_en; - u8 rsv[22]; -}; - struct ubase_query_ctp_vl_offset_cmd { u8 ctp_vl_offset; u8 rsv[23]; diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index bb1a9db877d8..b67b7f7e5c91 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -568,33 +568,6 @@ int ubase_get_priqos_info(struct device *dev, struct ubase_sl_priqos *sl_priqos) } EXPORT_SYMBOL(ubase_get_priqos_info); -static int ubase_query_vl_ageing(struct ubase_dev *udev, u16 *vl_ageing_en) -{ - struct ubase_query_vl_ageing_cmd resp = {0}; - struct ubase_query_vl_ageing_cmd req = {0}; - struct ubase_cmd_buf in, out; - int ret; - - ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_VL_AGEING_EN, true, - sizeof(req), &req); - ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_VL_AGEING_EN, false, - sizeof(resp), &resp); - - ret = __ubase_cmd_send_inout(udev, &in, &out); - if (ret) { - ubase_err(udev, - "failed to query vl ageing configuration, ret = %d.\n", - ret); - return ret; - } - - *vl_ageing_en = le16_to_cpu(resp.vl_ageing_en); - - ubase_dbg(udev, "vl_ageing_en bitmap:%u.\n", *vl_ageing_en); - - return 0; -} - static int ubase_query_ctp_vl_offset(struct ubase_dev *udev, u8 *ctp_vl_offset) { struct ubase_query_ctp_vl_offset_cmd resp = {0}; @@ -629,38 +602,6 @@ static inline void ubase_parse_udma_req_vl_uboe(struct ubase_dev *udev) memcpy(qos->tp_req_vl, qos->vl, qos->vl_num); } -static int ubase_parse_udma_req_vl_ub(struct ubase_dev *udev) -{ - struct ubase_adev_qos *qos = &udev->qos; - unsigned long vl_ageing_en; - int ret; - u8 i; - - ret = ubase_query_vl_ageing(udev, (u16 *)&vl_ageing_en); - if (ret) - return ret; - - for (i = 0; i < qos->vl_num; i++) { - if (test_bit(qos->vl[i], &vl_ageing_en)) - qos->tp_req_vl[qos->tp_vl_num++] = - qos->vl[i]; - else - qos->ctp_req_vl[qos->ctp_vl_num++] = - qos->vl[i]; - } - - return 0; -} - -static int ubase_parse_udma_req_vl(struct ubase_dev *udev) -{ - if (ubase_dev_ubl_supported(udev)) - return ubase_parse_udma_req_vl_ub(udev); - - ubase_parse_udma_req_vl_uboe(udev); - return 0; -} - static int ubase_check_ctp_resp_vl(struct ubase_dev *udev, u8 ctp_vl_offset) { struct ubase_adev_qos *qos = &udev->qos; @@ -713,57 +654,6 @@ static bool ubase_get_vl_sl(struct ubase_dev *udev, u8 vl, u8 *sl, u8 *sl_num) return sl_exist; } -static int ubase_parse_udma_tp_sl(struct ubase_dev *udev) -{ - struct ubase_adev_qos *qos = &udev->qos; - bool exist; - u8 i; - - for (i = 0; i < qos->tp_vl_num; i++) { - exist = ubase_get_vl_sl(udev, qos->tp_req_vl[i], - qos->tp_sl, &qos->tp_sl_num); - if (!exist) { - ubase_err(udev, - "udma tp req vl(%u) doesn't have a corresponding sl.\n", - qos->tp_req_vl[i]); - return -EINVAL; - } - } - - return 0; -} - -static int ubase_parse_udma_ctp_sl(struct ubase_dev *udev) -{ - struct ubase_adev_qos *qos = &udev->qos; - bool exist; - u8 i; - - for (i = 0; i < qos->ctp_vl_num; i++) { - exist = ubase_get_vl_sl(udev, qos->ctp_req_vl[i], - qos->ctp_sl, &qos->ctp_sl_num); - if (!exist) { - ubase_err(udev, - "udma ctp req vl(%u) doesn't have a corresponding sl.\n", - qos->ctp_req_vl[i]); - return -EINVAL; - } - } - - return 0; -} - -static int ubase_parse_udma_sl(struct ubase_dev *udev) -{ - int ret; - - ret = ubase_parse_udma_tp_sl(udev); - if (ret) - return ret; - - return ubase_parse_udma_ctp_sl(udev); -} - static void ubase_gather_udma_req_resp_vl(struct ubase_dev *udev, u8 *req_vl, u8 req_vl_num, u8 resp_vl_off) @@ -861,17 +751,28 @@ static int ubase_parse_rack_nic_vl(struct ubase_dev *udev) udev->qos.nic_vl, &udev->qos.nic_vl_num); } -static int ubase_parse_rack_udma_req_vl(struct ubase_dev *udev) +static int ubase_parse_rack_udma_req_vl_ub(struct ubase_dev *udev) { + struct ubase_adev_qos *qos = &udev->qos; int ret; - ret = ubase_assign_urma_vl(udev, udev->qos.sl, - udev->qos.sl_num, udev->qos.vl, - &udev->qos.vl_num); + ret = ubase_assign_urma_vl(udev, qos->tp_sl, qos->tp_sl_num, + qos->tp_req_vl, &qos->tp_vl_num); if (ret) return ret; - return ubase_parse_udma_req_vl(udev); + return ubase_assign_urma_vl(udev, qos->ctp_sl, qos->ctp_sl_num, + qos->ctp_req_vl, &qos->ctp_vl_num); +} + +static int ubase_parse_rack_udma_req_vl(struct ubase_dev *udev) +{ + if (ubase_dev_ubl_supported(udev)) + return ubase_parse_rack_udma_req_vl_ub(udev); + + ubase_parse_udma_req_vl_uboe(udev); + + return 0; } static int ubase_parse_rack_udma_vl(struct ubase_dev *udev) @@ -932,17 +833,6 @@ static inline int ubase_parse_rack_nic_sl_vl(struct ubase_dev *udev) return ubase_parse_rack_nic_vl(udev); } -static inline int ubase_parse_rack_udma_sl_vl(struct ubase_dev *udev) -{ - int ret; - - ret = ubase_parse_rack_udma_vl(udev); - if (ret) - return ret; - - return ubase_parse_udma_sl(udev); -} - static int ubase_parse_rack_urma_sl_vl(struct ubase_dev *udev) { int ret; @@ -952,7 +842,7 @@ static int ubase_parse_rack_urma_sl_vl(struct ubase_dev *udev) return ret; if (ubase_dev_udma_supported(udev)) { - ret = ubase_parse_rack_udma_sl_vl(udev); + ret = ubase_parse_rack_udma_vl(udev); if (ret) return ret; } @@ -1004,6 +894,17 @@ static void ubase_parse_max_vl(struct ubase_dev *udev) udma_caps->rc_max_cnt *= (max_vl + 1); } +static int ubase_get_nic_max_vl(struct ubase_dev *udev) +{ + struct ubase_adev_qos *qos = &udev->qos; + u8 i, nic_max_vl = 0; + + for (i = 0; i < qos->nic_vl_num; i++) + nic_max_vl = max(qos->nic_vl[i], nic_max_vl); + + return nic_max_vl; +} + static int ubase_parse_sl_vl(struct ubase_dev *udev) { int ret; @@ -1020,7 +921,7 @@ static int ubase_parse_sl_vl(struct ubase_dev *udev) ubase_init_udma_dscp_vl(udev); if (ubase_utp_supported(udev) && ubase_dev_urma_supported(udev)) - udev->caps.unic_caps.tpg.max_cnt = udev->qos.nic_vl_num; + udev->caps.unic_caps.tpg.max_cnt = ubase_get_nic_max_vl(udev) + 1; ubase_parse_max_vl(udev); @@ -1071,13 +972,13 @@ static int ubase_ctrlq_query_vl(struct ubase_dev *udev) static int ubase_ctrlq_query_sl(struct ubase_dev *udev) { + unsigned long unic_sl_bitmap, udma_tp_sl_bitmap, udma_ctp_sl_bitmap; + u8 unic_sl_cnt = 0, udma_tp_sl_cnt = 0, udma_ctp_sl_cnt = 0; struct ubase_ctrlq_query_sl_resp resp = {0}; struct ubase_ctrlq_query_sl_req req = {0}; - u8 i, unic_sl_cnt = 0, udma_sl_cnt = 0; struct ubase_ctrlq_msg msg = {0}; - unsigned long unic_sl_bitmap; - unsigned long udma_sl_bitmap; int ret; + u8 i; msg.service_ver = UBASE_CTRLQ_SER_VER_01; msg.service_type = UBASE_CTRLQ_SER_TYPE_QOS; @@ -1097,13 +998,16 @@ static int ubase_ctrlq_query_sl(struct ubase_dev *udev) } unic_sl_bitmap = le16_to_cpu(resp.unic_sl_bitmap); - udma_sl_bitmap = le16_to_cpu(resp.udma_sl_bitmap); + udma_tp_sl_bitmap = le16_to_cpu(resp.udma_tp_sl_bitmap); + udma_ctp_sl_bitmap = le16_to_cpu(resp.udma_ctp_sl_bitmap); for (i = 0; i < UBASE_MAX_SL_NUM; i++) { if (test_bit(i, &unic_sl_bitmap)) udev->qos.nic_sl[unic_sl_cnt++] = i; - if (test_bit(i, &udma_sl_bitmap)) - udev->qos.sl[udma_sl_cnt++] = i; + if (test_bit(i, &udma_tp_sl_bitmap)) + udev->qos.tp_sl[udma_tp_sl_cnt++] = i; + if (test_bit(i, &udma_ctp_sl_bitmap)) + udev->qos.ctp_sl[udma_ctp_sl_cnt++] = i; } if (!unic_sl_cnt) { @@ -1111,16 +1015,19 @@ static int ubase_ctrlq_query_sl(struct ubase_dev *udev) return -EIO; } - if (ubase_dev_udma_supported(udev) && !udma_sl_cnt) { + if (ubase_dev_udma_supported(udev) && ++ !(udma_tp_sl_cnt + udma_ctp_sl_cnt)) { ubase_err(udev, "udma doesn't have any sl.\n"); return -EIO; } udev->qos.nic_sl_num = unic_sl_cnt; - udev->qos.sl_num = udma_sl_cnt; + udev->qos.tp_sl_num = udma_tp_sl_cnt; + udev->qos.ctp_sl_num = udma_ctp_sl_cnt; - ubase_dbg(udev, "ctrlq query unic_sl_bitmap = 0x%lx, udma_sl_bitmap = 0x%lx.\n", - unic_sl_bitmap, udma_sl_bitmap); + ubase_dbg(udev, + "ctrlq query unic_sl_bitmap = 0x%lx, udma_tp_sl_bitmap = 0x%lx, udma_ctp_sl_bitmap = 0x%lx.\n", + unic_sl_bitmap, udma_tp_sl_bitmap, udma_ctp_sl_bitmap); return 0; } -- Gitee From c15ddf732140b4dfda089d5a0c9f1519e45c9f4f Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Tue, 25 Nov 2025 17:19:31 +0800 Subject: [PATCH 178/243] ub: ubase: change cmdq/mbx timeout val & fix spell error commit b2e112c56ae703aac37f659cf2910a9358707647 openEuler This commit is used to change cmdq/mbx timeout val. Currently, the timeout interval for the cmdq/mbx command execution failure is the commissioning time in a slower environment. The execution frequency in a new environment is higher than that in the old environment. Therefore, the timeout interval for the cmdq/mbx command execution failure is changed to prevent the destruction from taking a long time in abnormal scenarios. In the same time, fix some spell error. Signed-off-by: Xiaobo Zhang Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 34 ++++++++++++------------ drivers/ub/ubase/ubase_cmd.h | 4 +-- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 9476cf17fa01..3b66f2558845 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -36,24 +36,24 @@ static int ubase_dbg_dump_rst_info(struct seq_file *s, void *data) static void ubase_dbg_dump_caps_bits(struct seq_file *s, struct ubase_dev *udev) { #define CAP_FMT(name) "\tsupport_" #name ": %d\n" -#define PTRINT_CAP(name, func) seq_printf(s, CAP_FMT(name), func(udev)) - - PTRINT_CAP(ub_link, ubase_dev_ubl_supported); - PTRINT_CAP(ta_extdb_buffer_config, ubase_dev_ta_extdb_buf_supported); - PTRINT_CAP(ta_timer_buffer_config, ubase_dev_ta_timer_buf_supported); - PTRINT_CAP(err_handle, ubase_dev_err_handle_supported); - PTRINT_CAP(ctrlq, ubase_dev_ctrlq_supported); - PTRINT_CAP(eth_mac, ubase_dev_eth_mac_supported); - PTRINT_CAP(mac_stats, ubase_dev_mac_stats_supported); - PTRINT_CAP(prealloc, __ubase_dev_prealloc_supported); - PTRINT_CAP(udma, ubase_dev_udma_supported); - PTRINT_CAP(unic, ubase_dev_unic_supported); - PTRINT_CAP(uvb, ubase_dev_uvb_supported); - PTRINT_CAP(ip_over_urma, ubase_ip_over_urma_supported); +#define PRTINT_CAP(name, func) seq_printf(s, CAP_FMT(name), func(udev)) + + PRTINT_CAP(ub_link, ubase_dev_ubl_supported); + PRTINT_CAP(ta_extdb_buffer_config, ubase_dev_ta_extdb_buf_supported); + PRTINT_CAP(ta_timer_buffer_config, ubase_dev_ta_timer_buf_supported); + PRTINT_CAP(err_handle, ubase_dev_err_handle_supported); + PRTINT_CAP(ctrlq, ubase_dev_ctrlq_supported); + PRTINT_CAP(eth_mac, ubase_dev_eth_mac_supported); + PRTINT_CAP(mac_stats, ubase_dev_mac_stats_supported); + PRTINT_CAP(prealloc, __ubase_dev_prealloc_supported); + PRTINT_CAP(udma, ubase_dev_udma_supported); + PRTINT_CAP(unic, ubase_dev_unic_supported); + PRTINT_CAP(uvb, ubase_dev_uvb_supported); + PRTINT_CAP(ip_over_urma, ubase_ip_over_urma_supported); if (ubase_ip_over_urma_supported(udev)) - PTRINT_CAP(ip_over_urma_utp, ubase_ip_over_urma_utp_supported); - PTRINT_CAP(activate_proxy, ubase_activate_proxy_supported); - PTRINT_CAP(utp, ubase_utp_supported); + PRTINT_CAP(ip_over_urma_utp, ubase_ip_over_urma_utp_supported); + PRTINT_CAP(activate_proxy, ubase_activate_proxy_supported); + PRTINT_CAP(utp, ubase_utp_supported); } static void ubase_dbg_dump_caps_info(struct seq_file *s, struct ubase_dev *udev) diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index a99422b59dc4..ae34dccfdd01 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -13,8 +13,8 @@ #define UBASE_CMDQ_DESC_NUM_S 3 #define UBASE_CMDQ_DESC_NUM 1024 -#define UBASE_CMDQ_TX_TIMEOUT 3000000 -#define UBASE_CMDQ_MBX_TX_TIMEOUT 30000 +#define UBASE_CMDQ_TX_TIMEOUT 300000 +#define UBASE_CMDQ_MBX_TX_TIMEOUT 50 #define UBASE_CMDQ_CLEAR_WAIT_TIME 200 #define UBASE_CMDQ_WAIT_TIME 10 -- Gitee From e929f55a27fcabbf1dad53888c104eef2a3370ba Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Wed, 10 Dec 2025 10:09:59 +0800 Subject: [PATCH 179/243] ub:ubus: hi_msg_sync_wait first pull cq commit eea5fe7853d982ab3bef4665e731f58f3747a116 openEuler In cluster mode, ub_get_ue_by_entity_idx should not check entity idx. Currently hi_msg_sync_wait() first check msg is not timeout, then pull the cq, which can be misjudged because of cpu not getting dispatch for long time but actually the msg cq already arrived. Change the order usingdo {} while() so that driver will pull cq at least once. Fixes: 0f9e2dbe888d ("ub:ubus: Support for enabling and disabling ue") Signed-off-by: Yahui Liu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubus/ubus_entity.c | 3 --- drivers/ub/ubus/vendor/hisilicon/msg.c | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index 105c6f396b44..3dbc196d4da4 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -1057,9 +1057,6 @@ static struct ub_entity *ub_get_ue_by_entity_idx(struct ub_entity *pue, u32 enti { struct ub_entity *ue; - if (ub_check_ue_para(pue, entity_idx)) - return NULL; - list_for_each_entry(ue, &pue->ue_list, node) { if (ue->entity_idx == entity_idx) return ue; diff --git a/drivers/ub/ubus/vendor/hisilicon/msg.c b/drivers/ub/ubus/vendor/hisilicon/msg.c index 5c4e672aa55e..6d4e9d422c49 100644 --- a/drivers/ub/ubus/vendor/hisilicon/msg.c +++ b/drivers/ub/ubus/vendor/hisilicon/msg.c @@ -249,14 +249,14 @@ static int hi_msg_sync_wait(struct hi_message_device *hmd, int task_type, unsigned long flags; int idx; - while (!time_after64(get_jiffies_64(), end_time)) { + do { idx = hi_msg_cq_poll(hmc, task_type, msn); if (idx >= 0) return idx; if (flag) usleep_range(SLEEP_MIN_US, SLEEP_MAX_US); - } + } while (!time_after64(get_jiffies_64(), end_time)); timeout_msg = kzalloc(TIMEOUT_MSG_INFO_SZ, GFP_ATOMIC); if (!timeout_msg) -- Gitee From 139a64d2bab4bfc13ccb05e759eee3863c75f0a3 Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Tue, 9 Dec 2025 14:30:33 +0800 Subject: [PATCH 180/243] ub:ubus: Move the decoder's queue operations to hisi-ubus commit 0b1aae123308a1c20d76f275a5015d8a6b2754bf openEuler Move the decoder's queue operations to hisi-ubus Fixes: abc591c50df5 ("ub:ubus: Supports decoder event processing") Signed-off-by: Yuhao Xiang Signed-off-by: zhao-lichang <943677312@qq.com> --- drivers/ub/ubus/decoder.c | 421 ++---------------- drivers/ub/ubus/decoder.h | 2 - drivers/ub/ubus/ubus_controller.h | 5 +- drivers/ub/ubus/vendor/hisilicon/controller.c | 4 +- .../ub/ubus/vendor/hisilicon/hisi-decoder.c | 394 +++++++++++++++- .../ub/ubus/vendor/hisilicon/hisi-decoder.h | 8 +- 6 files changed, 428 insertions(+), 406 deletions(-) diff --git a/drivers/ub/ubus/decoder.c b/drivers/ub/ubus/decoder.c index 56d20dbbf0aa..c32df61a1d1d 100644 --- a/drivers/ub/ubus/decoder.c +++ b/drivers/ub/ubus/decoder.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include "ubus.h" @@ -25,52 +24,28 @@ #define CMDQ_SIZE_USE_MASK GENMASK(11, 8) #define CMDQ_SIZE_USE_OFFSET 8 #define CMDQ_ENABLE 0x1 -#define CMD_ENTRY_SIZE 16 #define EVTQ_SIZE_USE_MASK GENMASK(11, 8) #define EVTQ_SIZE_USE_OFFSET 8 #define EVTQ_ENABLE 0x1 -#define EVT_ENTRY_SIZE 16 -#define DECODER_QUEUE_TIMEOUT_US 1000000 /* 1s */ - -static void ub_decoder_uninit_queue(struct ub_decoder *decoder) +static void ub_decoder_uninit_queue(struct ub_bus_controller *ubc, + struct ub_decoder *decoder) { - iounmap(decoder->cmdq.qbase); - iounmap(decoder->evtq.qbase); + if (ubc->ops->uninit_decoder_queue) + ubc->ops->uninit_decoder_queue(decoder); + else + ub_err(ubc->uent, "ub bus controller can't uninit decoder queue\n"); } static int ub_decoder_init_queue(struct ub_bus_controller *ubc, struct ub_decoder *decoder) { - struct ub_entity *uent = ubc->uent; + if (ubc->ops->init_decoder_queue && ubc->ops->uninit_decoder_queue) + return ubc->ops->init_decoder_queue(decoder); - if (ubc->ops->register_decoder_base_addr) { - ubc->ops->register_decoder_base_addr(ubc, &decoder->cmdq.base, - &decoder->evtq.base); - } else { - ub_err(uent, - "ub_bus_controller_ops does not provide register_decoder_base_addr func, exit\n"); - return -EINVAL; - } - - if (decoder->cmdq.qs == 0 || decoder->evtq.qs == 0) { - ub_err(uent, "decoder cmdq or evtq qs is 0\n"); - return -EINVAL; - } - - decoder->cmdq.qbase = ioremap(decoder->cmdq.base, - (1 << decoder->cmdq.qs) * CMD_ENTRY_SIZE); - if (!decoder->cmdq.qbase) - return -ENOMEM; - - decoder->evtq.qbase = ioremap(decoder->evtq.base, - (1 << decoder->evtq.qs) * EVT_ENTRY_SIZE); - if (!decoder->evtq.qbase) { - iounmap(decoder->cmdq.qbase); - return -ENOMEM; - } - return 0; + ub_err(ubc->uent, "ub bus controller can't init decoder queue\n"); + return -EINVAL; } static u32 set_mmio_base_reg(struct ub_decoder *decoder) @@ -242,11 +217,11 @@ static u32 ub_decoder_device_set(struct ub_decoder *decoder) static int ub_decoder_create_page_table(struct ub_bus_controller *ubc, struct ub_decoder *decoder) { - if (ubc->ops->create_decoder_table) + if (ubc->ops->create_decoder_table && ubc->ops->free_decoder_table) return ubc->ops->create_decoder_table(decoder); ub_err(decoder->uent, "ub bus controller can't create decoder table\n"); - return -EPERM; + return -EINVAL; } static void ub_decoder_free_page_table(struct ub_bus_controller *ubc, @@ -308,6 +283,11 @@ static int ub_get_decoder_cap(struct ub_decoder *decoder) decoder->cmdq.qs = (val & CMDQ_SIZE_MASK) >> CMDQ_SIZE_OFFSET; decoder->evtq.qs = (val & EVTQ_SIZE_MASK) >> EVTQ_SIZE_OFFSET; + if (decoder->cmdq.qs == 0 || decoder->evtq.qs == 0) { + ub_err(uent, "decoder cmdq or evtq qs is 0\n"); + return -EINVAL; + } + size = decoder->mmio_end_addr - decoder->mmio_base_addr + 1; if (size > mmio_size[decoder->mmio_size_sup]) decoder->mmio_end_addr = decoder->mmio_base_addr + @@ -367,7 +347,7 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) release_page_table: ub_decoder_free_page_table(ubc, decoder); release_queue: - ub_decoder_uninit_queue(decoder); + ub_decoder_uninit_queue(ubc, decoder); release_decoder: kfree(decoder); return ret; @@ -416,374 +396,29 @@ static void ub_remove_decoder(struct ub_bus_controller *ubc) ub_decoder_free_page_table(ubc, decoder); - ub_decoder_uninit_queue(decoder); + ub_decoder_uninit_queue(ubc, decoder); kfree(decoder); ubc->decoder = NULL; } -struct sync_entry { - u64 op : 8; - u64 reserve0 : 4; - u64 cm : 2; - u64 ntf_sh : 2; - u64 ntf_attr : 4; - u64 reserve1 : 12; - u64 notify_data : 32; - u64 reserve2 : 2; - u64 notify_addr : 50; - u64 reserve3 : 12; -}; - -struct tlbi_all_entry { - u32 op : 8; - u32 reserve0 : 24; - u32 reserve1; - u32 reserve2; - u32 reserve3; -}; - -struct tlbi_partial_entry { - u32 op : 8; - u32 reserve0 : 24; - u32 tlbi_addr_base : 28; - u32 reserve1 : 4; - u32 tlbi_addr_limt : 28; - u32 reserve2 : 4; - u32 reserve3; -}; - -#define TLBI_ADDR_MASK GENMASK_ULL(43, 20) -#define TLBI_ADDR_OFFSET 20 -#define CMDQ_ENT_DWORDS 2 - -#define NTF_SH_NSH 0b00 -#define NTF_SH_OSH 0b10 -#define NTF_SH_ISH 0b11 -#define NTF_ATTR_IR_NC 0b00 -#define NTF_ATTR_IR_WBRA 0b01 -#define NTF_ATTR_IR_WT 0b10 -#define NTF_ATTR_IR_WB 0b11 -#define NTF_ATTR_OR_NC 0b0000 -#define NTF_ATTR_OR_WBRA 0b0100 -#define NTF_ATTR_OR_WT 0b1000 -#define NTF_ATTR_OR_WB 0b1100 - -#define Q_IDX(qs, p) ((p) & ((1 << (qs)) - 1)) -#define Q_WRP(qs, p) ((p) & (1 << (qs))) -#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) - -enum NOTIFY_TYPE { - DISABLE_NOTIFY = 0, - ENABLE_NOTIFY = 1, -}; - -static bool queue_has_space(struct ub_decoder_queue *q, u32 n) -{ - u32 space, prod, cons; - - prod = Q_IDX(q->qs, q->prod.cmdq_wr_idx); - cons = Q_IDX(q->qs, q->cons.cmdq_rd_idx); - - if (Q_WRP(q->qs, q->prod.cmdq_wr_idx) == - Q_WRP(q->qs, q->cons.cmdq_rd_idx)) - space = (1 << q->qs) - (prod - cons); - else - space = cons - prod; - - return space >= n; -} - -static u32 queue_inc_prod_n(struct ub_decoder_queue *q, u32 n) -{ - u32 prod = (Q_WRP(q->qs, q->prod.cmdq_wr_idx) | - Q_IDX(q->qs, q->prod.cmdq_wr_idx)) + n; - return Q_WRP(q->qs, prod) | Q_IDX(q->qs, prod); -} - -#define CMD_0_OP GENMASK_ULL(7, 0) -#define CMD_0_ADDR_BASE GENMASK_ULL(59, 32) -#define CMD_1_ADDR_LIMT GENMASK_ULL(27, 0) - -static void decoder_cmdq_issue_cmd(struct ub_decoder *decoder, phys_addr_t addr, - u64 size, enum ub_cmd_op_type op) -{ - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - struct tlbi_partial_entry entry = {}; - u64 cmd[CMDQ_ENT_DWORDS] = {}; - void *pi; - int i; - - entry.op = op; - entry.tlbi_addr_base = (addr & TLBI_ADDR_MASK) >> TLBI_ADDR_OFFSET; - entry.tlbi_addr_limt = ((addr + size - 1U) & TLBI_ADDR_MASK) >> - TLBI_ADDR_OFFSET; - - cmd[0] |= FIELD_PREP(CMD_0_OP, entry.op); - cmd[0] |= FIELD_PREP(CMD_0_ADDR_BASE, entry.tlbi_addr_base); - cmd[1] |= FIELD_PREP(CMD_1_ADDR_LIMT, entry.tlbi_addr_limt); - - pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * - sizeof(struct tlbi_partial_entry); - - for (i = 0; i < CMDQ_ENT_DWORDS; i++) - writeq(cmd[i], pi + i * sizeof(u64)); - - cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); -} - -#define NTF_DMA_ADDR_OFFSERT 2 -#define SYNC_0_OP GENMASK_ULL(7, 0) -#define SYNC_0_CM GENMASK_ULL(13, 12) -#define SYNC_0_NTF_ISH GENMASK_ULL(15, 14) -#define SYNC_0_NTF_ATTR GENMASK_ULL(19, 16) -#define SYNC_0_NTF_DATA GENMASK_ULL(63, 32) -#define SYNC_1_NTF_ADDR GENMASK_ULL(51, 2) -#define SYNC_NTF_DATA 0xffffffff - -static void decoder_cmdq_issue_sync(struct ub_decoder *decoder) -{ - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - u64 cmd[CMDQ_ENT_DWORDS] = {}; - struct sync_entry entry = {}; - phys_addr_t sync_dma; - void __iomem *pi; - int i; - - entry.op = SYNC; - entry.cm = ENABLE_NOTIFY; - sync_dma = cmdq->base + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * - sizeof(struct sync_entry); - entry.ntf_sh = NTF_SH_NSH; - entry.ntf_attr = NTF_ATTR_IR_NC | NTF_ATTR_OR_NC; - entry.notify_data = SYNC_NTF_DATA; - entry.notify_addr = sync_dma >> NTF_DMA_ADDR_OFFSERT; - - cmd[0] |= FIELD_PREP(SYNC_0_OP, entry.op); - cmd[0] |= FIELD_PREP(SYNC_0_CM, entry.cm); - cmd[0] |= FIELD_PREP(SYNC_0_NTF_ISH, entry.ntf_sh); - cmd[0] |= FIELD_PREP(SYNC_0_NTF_ATTR, entry.ntf_attr); - cmd[0] |= FIELD_PREP(SYNC_0_NTF_DATA, entry.notify_data); - cmd[1] |= FIELD_PREP(SYNC_1_NTF_ADDR, entry.notify_addr); - - pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * - sizeof(struct sync_entry); - for (i = 0; i < CMDQ_ENT_DWORDS; i++) - writeq(cmd[i], pi + i * sizeof(u64)); - - decoder->notify = pi; - cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); -} - -static void decoder_cmdq_update_prod(struct ub_decoder *decoder) -{ - struct ub_entity *uent = decoder->uent; - struct queue_idx q; - int ret; - - ret = ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &q.val); - if (ret) - ub_err(uent, "update pi, read decoder cmdq prod failed\n"); - - decoder->cmdq.prod.cmdq_err_resp = q.cmdq_err_resp; - ret = ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, - decoder->cmdq.prod.val); - if (ret) - ub_err(uent, "update pi, write decoder cmdq prod failed\n"); -} - -static int wait_for_cmdq_free(struct ub_decoder *decoder, u32 n) -{ - ktime_t timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - struct ub_entity *uent = decoder->uent; - int ret; - - while (true) { - ret = ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, - &(cmdq->cons.val)); - if (ret) - return ret; - - if (queue_has_space(cmdq, n + 1)) - return 0; - - if (ktime_compare(ktime_get(), timeout) > 0) { - ub_err(uent, "decoder cmdq wait free entry timeout\n"); - return -ETIMEDOUT; - } - cpu_relax(); - } -} - -static int wait_for_cmdq_notify(struct ub_decoder *decoder) -{ - ktime_t timeout; - u32 val; - - timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); - while (true) { - val = readl(decoder->notify); - if (val == SYNC_NTF_DATA) - return 0; - - if (ktime_compare(ktime_get(), timeout) > 0) { - ub_err(decoder->uent, "decoder cmdq wait notify timeout\n"); - return -ETIMEDOUT; - } - cpu_relax(); - } -} - -int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, - u64 size, enum ub_cmd_op_type op) -{ - int ret; - - ret = wait_for_cmdq_free(decoder, 1); - if (ret) - return ret; - - decoder_cmdq_issue_cmd(decoder, addr, size, op); - decoder_cmdq_issue_sync(decoder); - decoder_cmdq_update_prod(decoder); - - ret = wait_for_cmdq_notify(decoder); - return ret; -} -EXPORT_SYMBOL_GPL(ub_decoder_cmd_request); - -static bool queue_empty(struct ub_decoder_queue *q) -{ - return (Q_IDX(q->qs, q->prod.eventq_wr_idx) == - Q_IDX(q->qs, q->cons.eventq_rd_idx)) && - (Q_WRP(q->qs, q->prod.eventq_wr_idx) == - Q_WRP(q->qs, q->cons.eventq_rd_idx)); -} - -static void queue_inc_cons(struct ub_decoder_queue *q) -{ - u32 cons = (Q_WRP(q->qs, q->cons.eventq_rd_idx) | - Q_IDX(q->qs, q->cons.eventq_rd_idx)) + 1; - q->cons.eventq_rd_idx = Q_WRP(q->qs, cons) | Q_IDX(q->qs, cons); -} - -enum event_op_type { - RESERVED = 0x00, - EVENT_ADDR_OUT_OF_RANGE = 0x01, - EVENT_ILLEGAL_CMD = 0x02, -}; - -#define EVTQ_0_ID GENMASK_ULL(7, 0) -#define EVTQ_0_ADDR GENMASK_ULL(59, 32) -#define EVTQ_0_CMD_OPCODE GENMASK_ULL(39, 32) -#define EVTQ_ENT_DWORDS 2 -#define MAX_REASON_NUM 3 - -static const char * const cmd_err_reason[MAX_REASON_NUM] = { - "no error", - "illegal command", - "abort error(read command with 2bit ecc)" -}; - -static void fix_err_cmd(struct ub_decoder *decoder) -{ - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - struct ub_entity *uent = decoder->uent; - u64 cmd[CMDQ_ENT_DWORDS] = {}; - struct queue_idx prod, cons; - void *pi; - int i; - - if (ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, &cons.val)) { - ub_err(uent, "decoder fix error cmd, read ci failed\n"); - return; - } - if (ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &prod.val)) { - ub_err(uent, "decoder fix error cmd, read pi failed\n"); - return; - } - - cmd[0] |= FIELD_PREP(CMD_0_OP, TLBI_ALL); - pi = cmdq->qbase + Q_IDX(cmdq->qs, cons.cmdq_rd_idx) * - sizeof(struct tlbi_partial_entry); - - for (i = 0; i < CMDQ_ENT_DWORDS; i++) - writeq(cmd[i], pi + i * sizeof(u64)); - - if (cons.cmdq_err_reason >= MAX_REASON_NUM) - ub_err(uent, "cmdq err reason is invalid, reason=%u\n", - cons.cmdq_err_reason); - else - ub_err(uent, "cmdq err reason is %s\n", cmd_err_reason[cons.cmdq_err_reason]); - - prod.cmdq_err_resp = cons.cmdq_err; - - if (ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, prod.val)) - ub_err(uent, "decoder fix error cmd, write pi err resp failed\n"); -} - -static void handle_evt(struct ub_decoder *decoder, u64 *evt) -{ - struct ub_entity *uent = decoder->uent; - - switch (FIELD_GET(EVTQ_0_ID, evt[0])) { - case EVENT_ADDR_OUT_OF_RANGE: - ub_err(uent, "decoder event, input addr out of range, addr=%#.7x00000\n", - (u32)FIELD_GET(EVTQ_0_ADDR, evt[0])); - break; - case EVENT_ILLEGAL_CMD: - ub_err(uent, "decoder event, illegal cmd, cmd_opcode=%#x\n", - (u32)FIELD_GET(EVTQ_0_CMD_OPCODE, evt[0])); - fix_err_cmd(decoder); - break; - default: - ub_err(uent, "invalid event opcode, opcode=%#x\n", - (u32)FIELD_GET(EVTQ_0_ID, evt[0])); - } -} - -static void decoder_event_deal(struct ub_decoder *decoder) -{ - struct ub_decoder_queue *evtq = &decoder->evtq; - struct ub_entity *uent = decoder->uent; - u64 evt[EVTQ_ENT_DWORDS]; - void *ci; - int i; - - if (ub_cfg_read_dword(uent, DECODER_EVENTQ_PROD, &(evtq->prod.val))) { - ub_err(uent, "decoder handle event, read eventq pi failed\n"); - return; - } - - while (!queue_empty(evtq)) { - ci = evtq->qbase + Q_IDX(evtq->qs, evtq->cons.eventq_rd_idx) * - EVT_ENTRY_SIZE; - - for (i = 0; i < EVTQ_ENT_DWORDS; i++) - evt[i] = readq(ci + i * sizeof(u64)); - - handle_evt(decoder, evt); - queue_inc_cons(evtq); - - if (ub_cfg_write_dword(uent, DECODER_EVENTQ_CONS, - evtq->cons.val)) - ub_err(uent, "decoder handle event, write eventq ci failed\n"); - } -} static irqreturn_t decoder_event_deal_handle(int irq, void *data) { struct ub_entity *uent = (struct ub_entity *)data; struct ub_decoder *decoder = uent->ubc->decoder; - if (!decoder) { ub_err(uent, "decoder does not exist\n"); - return IRQ_HANDLED; + return IRQ_NONE; + } + + if (!uent->ubc->ops->decoder_event_deal) { + ub_err(uent, "decoder event deal does not exist\n"); + return IRQ_NONE; } - decoder_event_deal(decoder); + uent->ubc->ops->decoder_event_deal(decoder); return IRQ_HANDLED; } @@ -885,8 +520,8 @@ int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) } ubc = decoder->uent->ubc; - if (!ubc->ops->decoder_map) { - pr_err("decoder_map ops not exist\n"); + if (!ubc->ops->decoder_map && !ubc->ops->decoder_unmap) { + pr_err("decoder_map or decoder_unmap ops not exist\n"); return -EINVAL; } diff --git a/drivers/ub/ubus/decoder.h b/drivers/ub/ubus/decoder.h index 6667d07e9219..48ffe9102a46 100644 --- a/drivers/ub/ubus/decoder.h +++ b/drivers/ub/ubus/decoder.h @@ -107,8 +107,6 @@ void ub_decoder_init(struct ub_entity *uent); void ub_decoder_uninit(struct ub_entity *uent); void ub_init_decoder_usi(struct ub_entity *uent); void ub_uninit_decoder_usi(struct ub_entity *uent); -int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, - u64 size, enum ub_cmd_op_type op); int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); #endif /* __DECODER_H__ */ diff --git a/drivers/ub/ubus/ubus_controller.h b/drivers/ub/ubus/ubus_controller.h index 04eb4a3d7648..7ef19e3eaa73 100644 --- a/drivers/ub/ubus/ubus_controller.h +++ b/drivers/ub/ubus/ubus_controller.h @@ -18,8 +18,8 @@ struct ub_bus_controller_ops { void (*mem_decoder_remove)(struct ub_bus_controller *ubc); void (*register_ubmem_irq)(struct ub_bus_controller *ubc); void (*unregister_ubmem_irq)(struct ub_bus_controller *ubc); - void (*register_decoder_base_addr)(struct ub_bus_controller *ubc, - u64 *cmd_queue, u64 *event_queue); + int (*init_decoder_queue)(struct ub_decoder *decoder); + void (*uninit_decoder_queue)(struct ub_decoder *decoder); int (*entity_enable)(struct ub_entity *uent, u8 enable); int (*create_decoder_table)(struct ub_decoder *decoder); void (*free_decoder_table)(struct ub_decoder *decoder); @@ -27,6 +27,7 @@ struct ub_bus_controller_ops { struct decoder_map_info *info); int (*decoder_unmap)(struct ub_decoder *decoder, phys_addr_t addr, u64 size); + void (*decoder_event_deal)(struct ub_decoder *decoder); KABI_RESERVE(1) KABI_RESERVE(2) diff --git a/drivers/ub/ubus/vendor/hisilicon/controller.c b/drivers/ub/ubus/vendor/hisilicon/controller.c index 6c9c8e320479..b9a4e6dc02d0 100644 --- a/drivers/ub/ubus/vendor/hisilicon/controller.c +++ b/drivers/ub/ubus/vendor/hisilicon/controller.c @@ -22,12 +22,14 @@ static struct ub_bus_controller_ops hi_ubc_ops = { .mem_decoder_remove = hi_mem_decoder_remove, .register_ubmem_irq = hi_register_ubmem_irq, .unregister_ubmem_irq = hi_unregister_ubmem_irq, - .register_decoder_base_addr = hi_register_decoder_base_addr, + .init_decoder_queue = hi_init_decoder_queue, + .uninit_decoder_queue = hi_uninit_decoder_queue, .entity_enable = hi_send_entity_enable_msg, .create_decoder_table = hi_create_decoder_table, .free_decoder_table = hi_free_decoder_table, .decoder_map = hi_decoder_map, .decoder_unmap = hi_decoder_unmap, + .decoder_event_deal = hi_decoder_event_deal, }; static void ub_bus_controller_debugfs_init(struct ub_bus_controller *ubc) diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c index ac1fa0498ffc..00f958696b66 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) "ubus hisi decoder: " fmt #include +#include #include #include "../../ubus.h" #include "hisi-ubus.h" @@ -136,6 +137,10 @@ struct range_table_entry { DECODER_SUB_PAGE_TABLE_MASK) >> \ DECODER_SUB_PAGE_TABLE_LOC) +#define DECODER_QUEUE_TIMEOUT_US 1000000 /* 1s */ +#define CMD_ENTRY_SIZE 16 +#define EVT_ENTRY_SIZE 16 + static void fill_page_entry(struct page_entry *page, struct decoder_map_info *info, u64 offset) { @@ -616,13 +621,37 @@ static void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_b } } -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, - u64 *cmd_queue, u64 *event_queue) +int hi_init_decoder_queue(struct ub_decoder *decoder) { - struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; + struct hi_ubc_private_data *data; + struct ub_bus_controller *ubc; + + if (!decoder) + return -EINVAL; + + ubc = decoder->uent->ubc; + data = (struct hi_ubc_private_data *)ubc->data; + decoder->cmdq.base = data->io_decoder_cmdq; + decoder->evtq.base = data->io_decoder_evtq; + + decoder->cmdq.qbase = ioremap(decoder->cmdq.base, + (1 << decoder->cmdq.qs) * CMD_ENTRY_SIZE); + if (!decoder->cmdq.qbase) + return -ENOMEM; + + decoder->evtq.qbase = ioremap(decoder->evtq.base, + (1 << decoder->evtq.qs) * EVT_ENTRY_SIZE); + if (!decoder->evtq.qbase) { + iounmap(decoder->cmdq.qbase); + return -ENOMEM; + } + return 0; +} - *cmd_queue = data->io_decoder_cmdq; - *event_queue = data->io_decoder_evtq; +void hi_uninit_decoder_queue(struct ub_decoder *decoder) +{ + iounmap(decoder->cmdq.qbase); + iounmap(decoder->evtq.qbase); } int hi_create_decoder_table(struct ub_decoder *decoder) @@ -697,7 +726,7 @@ int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) ret = handle_table(decoder, &info, false); if (ret) return ret; - return ub_decoder_cmd_request(decoder, addr, size, TLBI_PARTIAL); + return hi_decoder_cmd_request(decoder, addr, size, TLBI_PARTIAL); } int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) @@ -712,3 +741,356 @@ int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) return handle_table(decoder, info, true); } + +struct sync_entry { + u64 op : 8; + u64 reserve0 : 4; + u64 cm : 2; + u64 ntf_sh : 2; + u64 ntf_attr : 4; + u64 reserve1 : 12; + u64 notify_data : 32; + u64 reserve2 : 2; + u64 notify_addr : 50; + u64 reserve3 : 12; +}; + +struct tlbi_all_entry { + u32 op : 8; + u32 reserve0 : 24; + u32 reserve1; + u32 reserve2; + u32 reserve3; +}; + +struct tlbi_partial_entry { + u32 op : 8; + u32 reserve0 : 24; + u32 tlbi_addr_base : 28; + u32 reserve1 : 4; + u32 tlbi_addr_limt : 28; + u32 reserve2 : 4; + u32 reserve3; +}; + +#define TLBI_ADDR_MASK GENMASK_ULL(43, 20) +#define TLBI_ADDR_OFFSET 20 +#define CMDQ_ENT_DWORDS 2 + +#define NTF_SH_NSH 0b00 +#define NTF_SH_OSH 0b10 +#define NTF_SH_ISH 0b11 + +#define NTF_ATTR_IR_NC 0b00 +#define NTF_ATTR_IR_WBRA 0b01 +#define NTF_ATTR_IR_WT 0b10 +#define NTF_ATTR_IR_WB 0b11 +#define NTF_ATTR_OR_NC 0b0000 +#define NTF_ATTR_OR_WBRA 0b0100 +#define NTF_ATTR_OR_WT 0b1000 +#define NTF_ATTR_OR_WB 0b1100 + +#define Q_IDX(qs, p) ((p) & ((1 << (qs)) - 1)) +#define Q_WRP(qs, p) ((p) & (1 << (qs))) +#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) + +enum NOTIFY_TYPE { + DISABLE_NOTIFY = 0, + ENABLE_NOTIFY = 1, +}; + +static bool queue_has_space(struct ub_decoder_queue *q, u32 n) +{ + u32 space, prod, cons; + + prod = Q_IDX(q->qs, q->prod.cmdq_wr_idx); + cons = Q_IDX(q->qs, q->cons.cmdq_rd_idx); + + if (Q_WRP(q->qs, q->prod.cmdq_wr_idx) == + Q_WRP(q->qs, q->cons.cmdq_rd_idx)) + space = (1 << q->qs) - (prod - cons); + else + space = cons - prod; + + return space >= n; +} + +static u32 queue_inc_prod_n(struct ub_decoder_queue *q, u32 n) +{ + u32 prod = (Q_WRP(q->qs, q->prod.cmdq_wr_idx) | + Q_IDX(q->qs, q->prod.cmdq_wr_idx)) + n; + return Q_WRP(q->qs, prod) | Q_IDX(q->qs, prod); +} + +#define CMD_0_OP GENMASK_ULL(7, 0) +#define CMD_0_ADDR_BASE GENMASK_ULL(59, 32) +#define CMD_1_ADDR_LIMT GENMASK_ULL(27, 0) + +static void decoder_cmdq_issue_cmd(struct ub_decoder *decoder, phys_addr_t addr, + u64 size, enum ub_cmd_op_type op) +{ + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + struct tlbi_partial_entry entry = {}; + u64 cmd[CMDQ_ENT_DWORDS] = {}; + void *pi; + int i; + + entry.op = op; + entry.tlbi_addr_base = (addr & TLBI_ADDR_MASK) >> TLBI_ADDR_OFFSET; + entry.tlbi_addr_limt = ((addr + size - 1U) & TLBI_ADDR_MASK) >> + TLBI_ADDR_OFFSET; + + cmd[0] |= FIELD_PREP(CMD_0_OP, entry.op); + cmd[0] |= FIELD_PREP(CMD_0_ADDR_BASE, entry.tlbi_addr_base); + cmd[1] |= FIELD_PREP(CMD_1_ADDR_LIMT, entry.tlbi_addr_limt); + + pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * + sizeof(struct tlbi_partial_entry); + + for (i = 0; i < CMDQ_ENT_DWORDS; i++) + writeq(cmd[i], pi + i * sizeof(u64)); + + cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); +} + +#define NTF_DMA_ADDR_OFFSERT 2 +#define SYNC_0_OP GENMASK_ULL(7, 0) +#define SYNC_0_CM GENMASK_ULL(13, 12) +#define SYNC_0_NTF_ISH GENMASK_ULL(15, 14) +#define SYNC_0_NTF_ATTR GENMASK_ULL(19, 16) +#define SYNC_0_NTF_DATA GENMASK_ULL(63, 32) +#define SYNC_1_NTF_ADDR GENMASK_ULL(51, 2) +#define SYNC_NTF_DATA 0xffffffff + +static void decoder_cmdq_issue_sync(struct ub_decoder *decoder) +{ + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + u64 cmd[CMDQ_ENT_DWORDS] = {}; + struct sync_entry entry = {}; + phys_addr_t sync_dma; + void __iomem *pi; + int i; + + entry.op = SYNC; + entry.cm = ENABLE_NOTIFY; + sync_dma = cmdq->base + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * + sizeof(struct sync_entry); + entry.ntf_sh = NTF_SH_NSH; + entry.ntf_attr = NTF_ATTR_IR_NC | NTF_ATTR_OR_NC; + entry.notify_data = SYNC_NTF_DATA; + entry.notify_addr = sync_dma >> NTF_DMA_ADDR_OFFSERT; + + cmd[0] |= FIELD_PREP(SYNC_0_OP, entry.op); + cmd[0] |= FIELD_PREP(SYNC_0_CM, entry.cm); + cmd[0] |= FIELD_PREP(SYNC_0_NTF_ISH, entry.ntf_sh); + cmd[0] |= FIELD_PREP(SYNC_0_NTF_ATTR, entry.ntf_attr); + cmd[0] |= FIELD_PREP(SYNC_0_NTF_DATA, entry.notify_data); + cmd[1] |= FIELD_PREP(SYNC_1_NTF_ADDR, entry.notify_addr); + + pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * + sizeof(struct sync_entry); + for (i = 0; i < CMDQ_ENT_DWORDS; i++) + writeq(cmd[i], pi + i * sizeof(u64)); + + decoder->notify = pi; + cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); +} + +static void decoder_cmdq_update_prod(struct ub_decoder *decoder) +{ + struct ub_entity *uent = decoder->uent; + struct queue_idx q; + int ret; + + ret = ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &q.val); + if (ret) + ub_err(uent, "update pi, read decoder cmdq prod fail\n"); + + decoder->cmdq.prod.cmdq_err_resp = q.cmdq_err_resp; + ret = ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, + decoder->cmdq.prod.val); + if (ret) + ub_err(uent, "update pi, write decoder cmdq prod fail\n"); +} + +static int wait_for_cmdq_free(struct ub_decoder *decoder, u32 n) +{ + ktime_t timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + struct ub_entity *uent = decoder->uent; + int ret; + + while (true) { + ret = ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, + &(cmdq->cons.val)); + if (ret) + return ret; + + if (queue_has_space(cmdq, n + 1)) + return 0; + + if (ktime_compare(ktime_get(), timeout) > 0) { + ub_err(uent, "decoder cmdq wait free entry timeout\n"); + return -ETIMEDOUT; + } + cpu_relax(); + } +} + +static int wait_for_cmdq_notify(struct ub_decoder *decoder) +{ + ktime_t timeout; + u32 val; + + timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); + while (true) { + val = readl(decoder->notify); + if (val == SYNC_NTF_DATA) + return 0; + + if (ktime_compare(ktime_get(), timeout) > 0) { + ub_err(decoder->uent, "decoder cmdq wait notify timeout\n"); + return -ETIMEDOUT; + } + cpu_relax(); + } +} + +int hi_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, + u64 size, enum ub_cmd_op_type op) +{ + int ret; + + ret = wait_for_cmdq_free(decoder, 1); + if (ret) + return ret; + + decoder_cmdq_issue_cmd(decoder, addr, size, op); + decoder_cmdq_issue_sync(decoder); + decoder_cmdq_update_prod(decoder); + + ret = wait_for_cmdq_notify(decoder); + return ret; +} +#ifdef UBUS_KP_TOOL_STUB +EXPORT_SYMBOL_GPL(hi_decoder_cmd_request); +#endif + +static bool queue_empty(struct ub_decoder_queue *q) +{ + return (Q_IDX(q->qs, q->prod.eventq_wr_idx) == + Q_IDX(q->qs, q->cons.eventq_rd_idx)) && + (Q_WRP(q->qs, q->prod.eventq_wr_idx) == + Q_WRP(q->qs, q->cons.eventq_rd_idx)); +} + +static void queue_inc_cons(struct ub_decoder_queue *q) +{ + u32 cons = (Q_WRP(q->qs, q->cons.eventq_rd_idx) | + Q_IDX(q->qs, q->cons.eventq_rd_idx)) + 1; + q->cons.eventq_rd_idx = Q_WRP(q->qs, cons) | Q_IDX(q->qs, cons); +} + +enum event_op_type { + RESERVED = 0x0, + EVENT_ADDR_OUT_OF_RANGE = 0x01, + EVENT_ILLEGAL_CMD = 0x02, +}; + +#define EVTQ_0_ID GENMASK_ULL(7, 0) +#define EVTQ_0_ADDR GENMASK_ULL(59, 32) +#define EVTQ_0_CMD_OPCODE GENMASK_ULL(39, 32) +#define EVTQ_ENT_DWORDS 2 +#define MAX_REASON_NUM 3 + +static const char *cmd_err_reason[MAX_REASON_NUM] = { + "no error", + "illegal command", + "abort error(read command with 2bit ecc)" +}; + +static void fix_err_cmd(struct ub_decoder *decoder) +{ + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + struct ub_entity *uent = decoder->uent; + u64 cmd[CMDQ_ENT_DWORDS] = {}; + struct queue_idx prod, cons; + void *pi; + int i; + + if (ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, &cons.val)) { + ub_err(uent, "decoder fix error cmd, read ci failed\n"); + return; + } + if (ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &prod.val)) { + ub_err(uent, "decoder fix error cmd, read pi failed\n"); + return; + } + + cmd[0] |= FIELD_PREP(CMD_0_OP, TLBI_ALL); + pi = cmdq->qbase + Q_IDX(cmdq->qs, cons.cmdq_rd_idx) * + sizeof(struct tlbi_partial_entry); + + for (i = 0; i < CMDQ_ENT_DWORDS; i++) + writeq(cmd[i], pi + i * sizeof(u64)); + + if (cons.cmdq_err_reason >= MAX_REASON_NUM) + ub_err(uent, "cmdq err reason is invalid, reason=%u\n", + cons.cmdq_err_reason); + else + ub_err(uent, "cmdq err reason is %s\n", cmd_err_reason[cons.cmdq_err_reason]); + + prod.cmdq_err_resp = cons.cmdq_err; + + if (ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, prod.val)) + ub_err(uent, "decoder fix error cmd, write pi err resp failed\n"); +} + +static void handle_evt(struct ub_decoder *decoder, u64 *evt) +{ + struct ub_entity *uent = decoder->uent; + + switch (FIELD_GET(EVTQ_0_ID, evt[0])) { + case EVENT_ADDR_OUT_OF_RANGE: + ub_err(uent, "decoder event, input addr out of range, addr=%#.7x00000\n", + (u32)FIELD_GET(EVTQ_0_ADDR, evt[0])); + break; + case EVENT_ILLEGAL_CMD: + ub_err(uent, "decoder event, illegal cmd, cmd_opcode=%#x\n", + (u32)FIELD_GET(EVTQ_0_CMD_OPCODE, evt[0])); + fix_err_cmd(decoder); + break; + default: + ub_err(uent, "invalid event opcode, opcode=%#x\n", + (u32)FIELD_GET(EVTQ_0_ID, evt[0])); + } +} + +void hi_decoder_event_deal(struct ub_decoder *decoder) +{ + struct ub_decoder_queue *evtq = &decoder->evtq; + struct ub_entity *uent = decoder->uent; + u64 evt[EVTQ_ENT_DWORDS]; + void *ci; + int i; + + if (ub_cfg_read_dword(uent, DECODER_EVENTQ_PROD, &(evtq->prod.val))) { + ub_err(uent, "decoder handle event, read eventq pi fail\n"); + return; + } + + while (!queue_empty(evtq)) { + ci = evtq->qbase + Q_IDX(evtq->qs, evtq->cons.eventq_rd_idx) * + EVT_ENTRY_SIZE; + + for (i = 0; i < EVTQ_ENT_DWORDS; i++) + evt[i] = readq(ci + i * sizeof(u64)); + + handle_evt(decoder, evt); + queue_inc_cons(evtq); + + if (ub_cfg_write_dword(uent, DECODER_EVENTQ_CONS, + evtq->cons.val)) + ub_err(uent, "decoder handle event, write eventq ci fail\n"); + } +} diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h index 50658ef7b9cb..fc49a25b80d6 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h @@ -38,8 +38,8 @@ DECODER_PAGE_SIZE * \ RGTLB_TO_PGTLB) -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, - u64 *cmd_queue, u64 *event_queue); +int hi_init_decoder_queue(struct ub_decoder *decoder); +void hi_uninit_decoder_queue(struct ub_decoder *decoder); int hi_create_decoder_table(struct ub_decoder *decoder); void hi_free_decoder_table(struct ub_decoder *decoder); @@ -47,4 +47,8 @@ void hi_free_decoder_table(struct ub_decoder *decoder); int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); +int hi_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, + u64 size, enum ub_cmd_op_type op); +void hi_decoder_event_deal(struct ub_decoder *decoder); + #endif /* __HISI_DECODER_H__ */ -- Gitee From 5317c3a4a4f4a77025b2ed3c91df9ab609815b93 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 23 Dec 2025 13:56:35 +0800 Subject: [PATCH 181/243] ub:ubus: fix ub_get_bus_controller getting null commit e7ad8b587f8fd0ef0cb74b894c7e53363c1a9c68 openEuler fix ub_get_bus_controller getting null Fixes: 26a640c9d1b1 ("ub:ubus: add ubus controller framework") Signed-off-by: Jianquan Lin Signed-off-by: zhao-lichang <943677312@qq.com> --- drivers/ub/ubus/ubus_driver.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/ub/ubus/ubus_driver.c b/drivers/ub/ubus/ubus_driver.c index 974020bf3c38..6d5594156c3d 100644 --- a/drivers/ub/ubus/ubus_driver.c +++ b/drivers/ub/ubus/ubus_driver.c @@ -61,6 +61,12 @@ int ub_get_bus_controller(struct ub_entity *ubc_dev[], unsigned int max_num, { struct ub_bus_controller *ubc; unsigned int ubc_num = 0; + int ret; + + if (!manage_subsystem_ops) { + pr_err("manage subsystem ops is null\n"); + return -EINVAL; + } if (!real_num || !ubc_dev) { pr_err("%s: input parameters invalid\n", __func__); @@ -70,16 +76,25 @@ int ub_get_bus_controller(struct ub_entity *ubc_dev[], unsigned int max_num, list_for_each_entry(ubc, &ubc_list, node) { if (ubc_num >= max_num) { pr_err("ubc list num over max num %u\n", max_num); - ub_put_bus_controller(ubc_dev, max_num); - return -ENOMEM; + ret = -ENOMEM; + goto ubc_put; } - ubc_dev[ubc_num] = ub_entity_get(ubc->uent); + if (!ub_entity_get(ubc->uent)) { + pr_err("The ub_entity of ubc is null\n"); + ret = -EINVAL; + goto ubc_put; + } + ubc_dev[ubc_num] = ubc->uent; ubc_num++; } *real_num = ubc_num; return 0; + +ubc_put: + ub_put_bus_controller(ubc_dev, max_num); + return ret; } EXPORT_SYMBOL_GPL(ub_get_bus_controller); -- Gitee From 1230eb406975fee16615ffec469787429abc9ba2 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Fri, 5 Dec 2025 15:22:43 +0800 Subject: [PATCH 182/243] ub: ub_fwctl: Modify the problem of incorret data when querying entry information commit 7eb5e8a2cf2f97fa71075bbd55d292f58e10dd9d openEuler Modify the problem of incorret data when querying entry information. Fixes: 7ed154d74ca3 ("ub: ub_fwctl: query the MSG queue information and entry details within UB.") Signed-off-by: Jiaqi Cheng Signed-off-by: zhaolichang --- drivers/fwctl/ub/ub_cmd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/fwctl/ub/ub_cmd.c b/drivers/fwctl/ub/ub_cmd.c index 5b3895107b31..4447c5da391b 100644 --- a/drivers/fwctl/ub/ub_cmd.c +++ b/drivers/fwctl/ub/ub_cmd.c @@ -608,13 +608,14 @@ static int ubctl_msgq_entry_move_data(struct ubctl_query_cmd_param *query_cmd_pa { u32 msgq_entry_data_size = block_size + offset * sizeof(u32); u32 *data_offset = query_cmd_param->out->data + offset; + u32 block_num = block_size / sizeof(u32); u32 i; if (msgq_entry_data_size > query_cmd_param->out_len) return -EINVAL; - for (i = 0; i < block_size / sizeof(u32); i++) - data_offset[i] = readl(entry_addr + i); + for (i = 0; i < block_num; i++) + data_offset[i] = readl((void __iomem *)((u32 *)entry_addr + i)); return 0; } -- Gitee From 3e7c4d4a69cf61bcb7ce22f8900ed09800eb5c76 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Tue, 9 Dec 2025 19:11:54 +0800 Subject: [PATCH 183/243] ub: ub_fwctl: Modify TP/TA/SCC register query process. commit f2923dd8d91ebeaf37dc393ccfdbfc71e0139f74 openEuler Modify TP/TA/SCC register query process. Fixes: 12da5b6ce2af ("ub: ub_fwctl: supports querying TP, BA related register information") Signed-off-by: Jiaqi Cheng Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/fwctl/ub/ub_cmd_reg.c | 45 +++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 12 +++++++++- drivers/fwctl/ub/ub_common.c | 6 +++++ drivers/fwctl/ub/ub_common.h | 4 ++++ include/uapi/fwctl/ub_fwctl.h | 12 ++++++++++ 5 files changed, 78 insertions(+), 1 deletion(-) diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index 026ac3f2fe90..c75de6543622 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -203,6 +203,7 @@ static int ubctl_query_tp_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_EX_DFX, UBCTL_TP_RX_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, @@ -251,6 +252,7 @@ static int ubctl_query_tp_pkt_stats_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_EX_DFX, UBCTL_TP_RX_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, }; return ubctl_query_data(ucdev, query_cmd_param, query_func, @@ -277,6 +279,7 @@ static int ubctl_query_ta_data(struct ubctl_dev *ucdev, { struct ubctl_query_dp query_dp[] = { { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_PKT_STATS_EX_DFX, UBCTL_TA_PKT_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, }; @@ -290,6 +293,7 @@ static int ubctl_query_ta_pkt_stats(struct ubctl_dev *ucdev, { struct ubctl_query_dp query_dp[] = { { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_PKT_STATS_EX_DFX, UBCTL_TA_PKT_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, }; return ubctl_query_data(ucdev, query_cmd_param, query_func, @@ -572,6 +576,42 @@ static int ubctl_config_prbs(struct ubctl_dev *ucdev, return ret; } +static int ubctl_query_nl_ssu_sw(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_SW_DFX, UBCTL_NL_SSU_SW_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_nl_ssu_oq(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_OQ_DFX, UBCTL_NL_SSU_OQ_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_nl_ssu_p2p(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_P2P_DFX, UBCTL_NL_SSU_P2P_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static int ubctl_query_dump_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func) @@ -585,6 +625,7 @@ static int ubctl_query_dump_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_EX_DFX, UBCTL_TP_RX_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, @@ -594,6 +635,7 @@ static int ubctl_query_dump_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_ABN_STATS_DFX, UBCTL_TP_REG_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_PKT_STATS_EX_DFX, UBCTL_TA_PKT_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_DL_PKT_STATS_DFX, UBCTL_DL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, @@ -623,6 +665,9 @@ static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_NL_SSU_STATS, ubctl_query_nl_ssu_stats_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_NL_ABN, ubctl_query_nl_abn_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_SW, ubctl_query_nl_ssu_sw, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_OQ, ubctl_query_nl_ssu_oq, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_P2P, ubctl_query_nl_ssu_p2p, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_DL, ubctl_query_dl_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_DL_PKT_STATS, ubctl_query_dl_pkt_stats_data, diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index a8a4e63c42e3..691869977e65 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -9,9 +9,13 @@ #define UBCTL_QUERY_NL_PKT_STATS_DFX 0xA001 #define UBCTL_QUERY_NL_SSU_STATS_DFX 0xA002 #define UBCTL_QUERY_NL_ABN_DFX 0xA003 +#define UBCTL_QUERY_NL_SSU_SW_DFX 0xA028 +#define UBCTL_QUERY_NL_SSU_OQ_DFX 0xA029 +#define UBCTL_QUERY_NL_SSU_P2P_DFX 0xA02A #define UBCTL_QUERY_TP_TX_DFX 0xA004 #define UBCTL_QUERY_TP_RX_DFX 0xA005 +#define UBCTL_QUERY_TP_RX_EX_DFX 0xA02B #define UBCTL_QUERY_TP_TX_ROUTE_DFX 0xA01A #define UBCTL_QUERY_TP_RX_BANK_DFX 0xA01C #define UBCTL_QUERY_TP_ABN_STATS_DFX 0xA01D @@ -19,6 +23,7 @@ #define UBCTL_QUERY_TP_STATE_DFX 0xA024 #define UBCTL_QUERY_TA_PKT_STATS_DFX 0xA006 +#define UBCTL_QUERY_TA_PKT_STATS_EX_DFX 0xA02C #define UBCTL_QUERY_TA_ABN_STATS_DFX 0xA023 #define UBCTL_QUERY_DL_PKT_STATS_DFX 0xA007 @@ -58,9 +63,13 @@ #define UBCTL_NL_PKT_STATS_LEN 632 #define UBCTL_NL_SSU_STATS_LEN 408 #define UBCTL_NL_ABN_LEN 56 +#define UBCTL_NL_SSU_SW_LEN 24 +#define UBCTL_NL_SSU_OQ_LEN 24 +#define UBCTL_NL_SSU_P2P_LEN 24 #define UBCTL_TP_TX_STATS_LEN 904 #define UBCTL_TP_RX_STATS_LEN 704 +#define UBCTL_TP_RX_STATS_EX_LEN 120 #define UBCTL_TP_TX_ABN_LEN 948 #define UBCTL_TP_RX_ABN_LEN 760 #define UBCTL_TP_REG_LEN 24 @@ -70,7 +79,8 @@ #define UBCTL_TP_STATE_DFX_LEN 376 #define UBCTL_TA_PKT_STATS_LEN 920 -#define UBCTL_TA_ABN_STATS_LEN 168 +#define UBCTL_TA_PKT_STATS_EX_LEN 60 +#define UBCTL_TA_ABN_STATS_LEN 180 #define UBCTL_DL_PKT_STATS_LEN 984 #define UBCTL_DL_REPL_LEN 120 diff --git a/drivers/fwctl/ub/ub_common.c b/drivers/fwctl/ub/ub_common.c index 23d67829c8de..3827689c7565 100644 --- a/drivers/fwctl/ub/ub_common.c +++ b/drivers/fwctl/ub/ub_common.c @@ -96,6 +96,7 @@ static int ubctl_cmd_send_deal(struct ubctl_dev *ucdev, struct ubctl_query_dp *query_dp, struct ubctl_query_cmd_dp *cmd_data, u32 offset) { +#define UTOOL_EOPNOTSUPP (-95) int *retval = &query_cmd_param->out->retval; struct ubctl_cmd cmd = {}; int ret = 0; @@ -109,6 +110,11 @@ static int ubctl_cmd_send_deal(struct ubctl_dev *ucdev, } *retval = ubctl_ubase_cmd_send(ucdev->adev, &cmd); + if (*retval == UTOOL_EOPNOTSUPP) { + ubctl_warn(ucdev, "opcode is not support.\n"); + *retval = 0; + } + if (*retval) { ubctl_err(ucdev, "ubctl ubase cmd send failed, retval = %d.\n", *retval); diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index ab10576e3914..225218431254 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -26,6 +26,10 @@ dev_info(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ ##__VA_ARGS__) +#define ubctl_warn(ucdev, format, ...) \ + dev_warn(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ + ##__VA_ARGS__) + #define UBCTL_GET_PHY_ADDR(high, low) ((((u64)(high)) << 32) | (low)) #define UBCTL_EXTRACT_BITS(value, start, end) \ (((value) >> (start)) & ((1UL << ((end) - (start) + 1)) - 1)) diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 38787e5cc8ca..5457c04585fb 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -58,6 +58,18 @@ enum ub_fwctl_cmdrpc_type { * @UTOOL_CMD_QUERY_NL_ABN: Query NL layer NL_ABN related registers */ UTOOL_CMD_QUERY_NL_ABN = 0x0004, + /** + * @UTOOL_CMD_QUERY_NL_SSU_SW: Query ssu_sw non-empty dfx statistics + */ + UTOOL_CMD_QUERY_NL_SSU_SW = 0x0005, + /** + * @UTOOL_CMD_QUERY_NL_SSU_OQ: Query ssu_oq non-empty dfx statistics + */ + UTOOL_CMD_QUERY_NL_SSU_OQ = 0x0006, + /** + * @UTOOL_CMD_QUERY_NL_SSU_P2P: Query ssu_p2p queue non-empty dfx statistics + */ + UTOOL_CMD_QUERY_NL_SSU_P2P = 0x0007, /** * @UTOOL_CMD_QUERY_TP: Query all registers at the TP layer -- Gitee From 2e60d21ff25db489157533ba39ec8967800b06f1 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 10 Dec 2025 22:11:05 +0800 Subject: [PATCH 184/243] ub: ubase: Fix spell error of month commit 1c6b154e151f218d7bf293d3bed956b87c321b92 openEuler Fix spell error of month. Fixes: 5515a4226467 ("ub: ubase: support for activate/deactivate dev interface") Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 3b66f2558845..a562ef8543b2 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -864,7 +864,7 @@ int ubase_dbg_format_time(time64_t time, struct seq_file *s) { #define YEAR_OFFSET 1900 const char week[7][4] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; - const char mouth[12][4] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", + const char month[12][4] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; struct tm t; @@ -874,7 +874,7 @@ int ubase_dbg_format_time(time64_t time, struct seq_file *s) time64_to_tm(time, 0, &t); seq_printf(s, "%s %s %02d %02d:%02d:%02d %ld", week[t.tm_wday], - mouth[t.tm_mon], t.tm_mday, t.tm_hour, t.tm_min, + month[t.tm_mon], t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, t.tm_year + YEAR_OFFSET); return 0; } -- Gitee From df5c33d7317144ee4e36224d52c0523a9541d3b7 Mon Sep 17 00:00:00 2001 From: Liming An Date: Tue, 18 Nov 2025 16:39:47 +0800 Subject: [PATCH 185/243] iommu/ummu: Fix ubmem unmap return value error commit 5272139386db86969e427b04578a8671f4221ef6 openEuler Fix ubmem_mmu_unmap_pages return value error. In the original code, mdom->iova_len is directly returned. mmu_domain_cfg_clear cleared mdom->iova_len Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c b/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c index 4fe5d4635ea4..b8d22b7bfc64 100644 --- a/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c +++ b/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c @@ -516,6 +516,7 @@ static size_t ubmem_mmu_unmap_pages(struct iommu_domain *domain, { struct ubmem_mmu_domain *mdom = to_ubmem_mmu_domain(domain); struct maple_tree *mt = (struct maple_tree *)mdom->cached_pa_list; + unsigned long unmapped = 0; struct pa_info *info; MA_STATE(mas, mt, 0, 0); @@ -539,10 +540,11 @@ static size_t ubmem_mmu_unmap_pages(struct iommu_domain *domain, mdom->pte_valid = false; } + unmapped = mdom->iova_len; clear_cached_pa_list(mt); mmu_domain_cfg_clear(mdom); - return mdom->iova_len; + return unmapped; } static int ubmem_mmu_iotlb_sync_map(struct iommu_domain *domain, -- Gitee From a0464bac550a30e3da334ab9dc9c5227a8e113ef Mon Sep 17 00:00:00 2001 From: Liming An Date: Tue, 18 Nov 2025 18:39:53 +0800 Subject: [PATCH 186/243] iommu/ummu: Fix 2P virtualization error commit 32a6cbe9d7b61fa24701f930c914f6f17d948ab4 openEuler In the 2-socket virtualization scenario, the invalidation command needs to be issued on both UMMUs. In the original code, the invalidation command is issued twice on one UMMU. Signed-off-by: Jingbin Wu Signed-off-by: Liming An --- drivers/iommu/hisilicon/logic_ummu/logic_ummu.c | 1 + drivers/iommu/hisilicon/nested.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index 448ad6e5514b..cd859bee39ec 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -1003,6 +1003,7 @@ logic_ummu_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, if (!domain->pgsize_bitmap) domain->pgsize_bitmap = drv_ops->pgsize_bitmap; nested_base_domain = to_ummu_base_domain(domain); + nested_base_domain->core_dev = &ummu->core_dev; nested_base_domain->parent = logic_vummu->parent; if (!domain->ops) { ret = -EOPNOTSUPP; diff --git a/drivers/iommu/hisilicon/nested.c b/drivers/iommu/hisilicon/nested.c index 366d47f7821e..0eed1d9f8d25 100644 --- a/drivers/iommu/hisilicon/nested.c +++ b/drivers/iommu/hisilicon/nested.c @@ -210,7 +210,7 @@ int ummu_viommu_cache_invalidate_user(struct iommu_domain *domain, nested_domain = to_nested_domain(domain); tecte_tag = nested_domain->s2_parent->cfgs.tecte_tag; - ummu = core_to_ummu_device(nested_domain->s2_parent->base_domain.core_dev); + ummu = core_to_ummu_device(nested_domain->base_domain.core_dev); cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); if (!cmds) -- Gitee From b9eb5ae8d7143fc2104e6ed1cf6d1d539a7fa9ad Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 19 Nov 2025 14:49:53 +0800 Subject: [PATCH 187/243] iommu/ummu: Fix builds under different kconfigs commit 00f81c7cc17c2e969149b4b51a6b9ae1f1f5b64a openEuler 1.Fix builds under disable UB_UMMU_SVA 2.Fix builds under disable UB_UMMU_BYPASSDEV 3.Fix builds under disable UB_UMMU_CORE_DRIVER Signed-off-by: WangJie Signed-off-by: Liming An --- drivers/iommu/hisilicon/Kconfig | 4 ++-- drivers/iommu/hisilicon/iommu.c | 3 ++- drivers/iommu/hisilicon/logic_ummu/logic_ummu.c | 2 ++ drivers/iommu/hisilicon/sva.h | 5 +++++ include/linux/ummu_core.h | 2 -- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/hisilicon/Kconfig b/drivers/iommu/hisilicon/Kconfig index 9b1a2acddcbc..e41f492a7ca6 100644 --- a/drivers/iommu/hisilicon/Kconfig +++ b/drivers/iommu/hisilicon/Kconfig @@ -24,7 +24,8 @@ config UB_UMMU select IOMMU_IO_PGTABLE_LPAE select GENERIC_MSI_IRQ select IOMMUFD_DRIVER if IOMMUFD - select UMMU_CORE + select UB_UMMU_CORE + select UB_UMMU_CORE_DRIVER select UB_UMMU_BASE help Support for implementations of the hisilicon UMMU architecture. @@ -39,7 +40,6 @@ config UB_UMMU_SVA select IOMMU_SVA select IOMMU_KSVA select IOMMU_IOPF - select MMU_NOTIFIER default n help Support for sharing process address spaces with devices using diff --git a/drivers/iommu/hisilicon/iommu.c b/drivers/iommu/hisilicon/iommu.c index 5581b8a6ae98..2a50d7bed835 100644 --- a/drivers/iommu/hisilicon/iommu.c +++ b/drivers/iommu/hisilicon/iommu.c @@ -506,12 +506,13 @@ static int ummu_dev_disable_feat(struct device *dev, static int ummu_def_domain_type(struct device *dev) { +#ifdef CONFIG_UB_UMMU_BYPASSDEV int ret; ret = ummu_bypass_dev_domain_type(dev); if (ret) return ret; - +#endif if (iommu_default_passthrough()) return IOMMU_DOMAIN_IDENTITY; return 0; diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index cd859bee39ec..fd66ca5fa05f 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -1287,6 +1287,7 @@ static int logic_ummu_def_domain_type(struct device *dev) return ops->def_domain_type(dev); } +#ifdef CONFIG_UB_UMMU_SVA static void logic_ummu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct iommu_domain *domain) { @@ -1322,6 +1323,7 @@ static void logic_ummu_remove_dev_pasid(struct device *dev, ioasid_t pasid, /* release the tid */ ummu_core_free_tid(&logic_ummu.core_dev, tid); } +#endif /* depend on MPAM static int logic_ummu_set_group_qos_params(struct iommu_group *group, diff --git a/drivers/iommu/hisilicon/sva.h b/drivers/iommu/hisilicon/sva.h index 2f6a9a383444..e91fa1e11920 100644 --- a/drivers/iommu/hisilicon/sva.h +++ b/drivers/iommu/hisilicon/sva.h @@ -44,6 +44,11 @@ static inline int ummu_master_enable_sva(struct ummu_master *master, return -ENODEV; } +static inline bool ummu_master_sva_enabled(struct ummu_master *master) +{ + return false; +} + static inline int ummu_master_disable_sva(struct ummu_master *master, enum iommu_dev_features feat) { diff --git a/include/linux/ummu_core.h b/include/linux/ummu_core.h index 29d0952e35e7..843a6ba336d0 100644 --- a/include/linux/ummu_core.h +++ b/include/linux/ummu_core.h @@ -423,8 +423,6 @@ enum ummu_device_config_type { #if IS_ENABLED(CONFIG_UB_UMMU_CORE_DRIVER) extern const struct tid_ops *ummu_core_tid_ops[TID_OPS_MAX]; -#else -static const struct tid_ops *ummu_core_tid_ops[TID_OPS_MAX]; #endif /* CONFIG_UB_UMMU_CORE_DRIVER */ static inline struct ummu_core_device *to_ummu_core(struct iommu_device *iommu) -- Gitee From 1b6586f5868b58958202f8e41ad9150259daa019 Mon Sep 17 00:00:00 2001 From: Liming-An Date: Thu, 27 Nov 2025 21:50:11 +0800 Subject: [PATCH 188/243] iommu/ummu: Fix UMMU documentation error commit d6b2477ee785bf4f05cd9e6c0076aa21dde2bd1e openEuler This patch fix ummu documentation index error Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 22276b791363..ee12df0aa405 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -14,6 +14,6 @@ UnifiedBus Subsystem ubase/index ubfi/index ubus/index - ummu-core + ummu/index cdma/index urma/udma/index -- Gitee From 550a2afedb262d3d55595c22439c1027586fcf0a Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Thu, 27 Nov 2025 09:22:03 +0800 Subject: [PATCH 189/243] ub: ubase: Remove useless character '+' commit a28376d6ef2503e88a0bf7c8976aa1a396f227fc openEuler Remove useless character '+'. Fixes: 904e31bebd61 ("ub: ubase: adapt to response message structure change for ctrlq sl query") Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_qos_hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index b67b7f7e5c91..ca65878d9515 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -1016,7 +1016,7 @@ static int ubase_ctrlq_query_sl(struct ubase_dev *udev) } if (ubase_dev_udma_supported(udev) && -+ !(udma_tp_sl_cnt + udma_ctp_sl_cnt)) { + !(udma_tp_sl_cnt + udma_ctp_sl_cnt)) { ubase_err(udev, "udma doesn't have any sl.\n"); return -EIO; } -- Gitee From 6db569a14ac5fa1cc28055b5c55b15c000b9d8cc Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 23 Dec 2025 21:47:29 +0800 Subject: [PATCH 190/243] ub:ubus delete undefined class code commit 387bd97726ce3e73bad28ad8539d06d75b976efc openEuler Delete undefined class code macro. Fixes: e42bc0097589 ("ub:ubus: Support for ub bus driver framework") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: zhao-lichang <943677312@qq.com> --- include/ub/ubus/ubus_ids.h | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/include/ub/ubus/ubus_ids.h b/include/ub/ubus/ubus_ids.h index 5e5158ba5527..62ead65e5061 100644 --- a/include/ub/ubus/ubus_ids.h +++ b/include/ub/ubus/ubus_ids.h @@ -19,29 +19,16 @@ #define UB_BASE_CODE_STORAGE 0x01 #define UB_CLASS_STORAGE_LPC 0x0001 #define UB_CLASS_STORAGE_LBC 0x0101 -#define UB_CLASS_STORAGE_RAID 0x0201 #define UB_BASE_CODE_NETWORK 0x02 #define UB_CLASS_NETWORK_UB 0x0002 #define UB_CLASS_NETWORK_ETH 0x0102 -#define UB_BASE_CODE_DISPLAY 0x03 +#define UB_BASE_CODE_SWITCH 0x03 +#define UB_CLASS_SWITCH_UB 0x0003 -#define UB_BASE_CODE_SWITCH 0x04 -#define UB_CLASS_SWITCH_UB 0x0004 - -#define UB_BASE_CODE_VIRTIO 0x05 -#define UB_CLASS_LEGACY_VIRTIO_NETWORK 0x0005 -#define UB_CLASS_LEGACY_VIRTIO_BLOCK 0x0105 -#define UB_CLASS_LEGACY_VIRTIO_SCSI 0x0205 -#define UB_CLASS_LEGACY_VIRTIO_GRAPHIC 0x0305 -#define UB_CLASS_LEGACY_VIRTIO_SOCKET 0x0405 -#define UB_CLASS_LEGACY_VIRTIO_FS 0x0505 - -#define UB_BASE_CODE_VIRTUAL 0x06 - -#define UB_BASE_CODE_NPU 0x07 -#define UB_CLASS_NPU_UB 0x0007 +#define UB_BASE_CODE_NPU 0x04 +#define UB_CLASS_NPU_UB 0x0004 #define UB_BASE_CODE_UNKNOWN 0xFF #define UB_CLASS_UNKNOWN 0x00FF -- Gitee From e570d40cd793e0c5f9dc68090f892c5049e2c5eb Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Fri, 14 Nov 2025 10:33:25 +0800 Subject: [PATCH 191/243] net: unic: Add support for setting link speed, duplex, and autonegotiation via ethtool. commit 5d87e7c0cb6d518fa8809983e8052daa755c6cf0 openEuler This patch adds support for setting link speed, duplex, and autonegotiation parameters via ethtool. It includes validation checks for supported speeds and lanes, and handles changes in link configurations. The patch modifies the UNIC_ethtool.c, UNIC_hw.c, and UNIC_hw.h files to implement these features. Signed-off-by: Chuan Wu Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_dev.c | 5 ++ drivers/net/ub/unic/unic_ethtool.c | 80 ++++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_hw.c | 45 +++++++++++++++++ drivers/net/ub/unic/unic_hw.h | 4 ++ 4 files changed, 134 insertions(+) diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index a0b25e52695e..7617ad2cf3f6 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -969,6 +969,11 @@ static struct net_device *unic_alloc_netdev(struct auxiliary_device *adev) dev_warn(adev->dev.parent, "failed to alloc netdev because of ubl macro is not enabled.\n"); #endif + } else { + snprintf(name, IFNAMSIZ, "ethc%ud%ue%u", caps->chip_id, + caps->die_id, caps->ue_id); + netdev = alloc_netdev_mq(sizeof(struct unic_dev), name, + NET_NAME_USER, ether_setup, channel_num); } return netdev; diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index f2cfa3df1126..3fef26c625b1 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -82,6 +82,85 @@ static int unic_get_link_ksettings(struct net_device *netdev, return 0; } +static bool unic_speed_supported(struct unic_dev *unic_dev, u32 speed, u32 lanes) +{ + u32 speed_bit = 0; + + if (unic_get_speed_bit(speed, lanes, &speed_bit)) + return false; + + return !!(speed_bit & unic_dev->hw.mac.speed_ability); +} + +static int unic_check_ksettings_param(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_mac *mac = &unic_dev->hw.mac; + u32 lanes; + + if (cmd->base.autoneg && !mac->support_autoneg) { + unic_err(unic_dev, "hw not support autoneg.\n"); + return -EINVAL; + } + + /* when autoneg is on, hw not support specified speed params, + * unnecessary to check them. + */ + if (cmd->base.autoneg) + return 0; + + /* if user not specify lanes, use current lanes */ + lanes = cmd->lanes ? cmd->lanes : mac->lanes; + if (!unic_speed_supported(unic_dev, cmd->base.speed, lanes)) { + unic_err(unic_dev, "speed(%u) and lanes(%u) is not supported.\n", + cmd->base.speed, lanes); + return -EINVAL; + } + + if (cmd->base.duplex != DUPLEX_FULL) { + unic_err(unic_dev, "only support full duplex.\n"); + return -EINVAL; + } + + return 0; +} + +static bool unic_link_ksettings_changed(struct unic_mac *mac, + const struct ethtool_link_ksettings *cmd) +{ + /* when autoneg is disabled and lanes not specified, lanes is 0. */ + if (cmd->base.autoneg == mac->autoneg && + cmd->base.duplex == mac->duplex && + cmd->base.speed == mac->speed && + (cmd->lanes == mac->lanes || (!cmd->lanes && !cmd->base.autoneg))) + return false; + + return true; +} + +static int unic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_mac *mac = &unic_dev->hw.mac; + int ret; + + if (!unic_link_ksettings_changed(mac, cmd)) + return 0; + + ret = unic_check_ksettings_param(netdev, cmd); + if (ret) + return ret; + + unic_info(unic_dev, + "set link: autoneg = %u, speed = %u, duplex = %u, lanes = %u.\n", + cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, cmd->lanes); + + return unic_set_mac_link_ksettings(unic_dev, cmd); +} + static void unic_get_driver_info(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -357,6 +436,7 @@ static const struct ethtool_ops unic_ethtool_ops = { .supported_coalesce_params = UNIC_ETHTOOL_COALESCE, .get_link = unic_get_link_status, .get_link_ksettings = unic_get_link_ksettings, + .set_link_ksettings = unic_set_link_ksettings, .get_drvinfo = unic_get_driver_info, .get_regs_len = unic_get_regs_len, .get_regs = unic_get_regs, diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 565ac56fb638..144fe958c4ac 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -31,6 +31,21 @@ static const struct unic_speed_bit_map speed_bit_map[] = { {UNIC_MAC_SPEED_10G, UNIC_LANES_1, UNIC_SUPPORT_10G_X1_BIT}, }; +int unic_get_speed_bit(u32 speed, u32 lanes, u32 *speed_bit) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed && + lanes == speed_bit_map[i].lanes) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + static int unic_get_port_info(struct unic_dev *unic_dev) { struct unic_query_port_info_resp resp = {0}; @@ -113,6 +128,36 @@ int unic_set_mac_speed_duplex(struct unic_dev *unic_dev, u32 speed, u8 duplex, return ret; } +int unic_set_mac_link_ksettings(struct unic_dev *unic_dev, + const struct ethtool_link_ksettings *cmd) +{ + /* if user not specify lanes, use current lanes */ + u32 lanes = cmd->lanes ? cmd->lanes : unic_dev->hw.mac.lanes; + int ret; + + ret = unic_set_mac_autoneg(unic_dev, cmd->base.autoneg); + if (ret) + return ret; + + /* when autoneg is on, hw not support specified speed params. */ + if (cmd->base.autoneg) { + unic_info(unic_dev, + "autoneg is on, ignore other speed params.\n"); + return 0; + } + + ret = unic_set_mac_speed_duplex(unic_dev, cmd->base.speed, + cmd->base.duplex, lanes); + if (ret) + return ret; + + unic_dev->hw.mac.speed = cmd->base.speed; + unic_dev->hw.mac.duplex = cmd->base.duplex; + unic_dev->hw.mac.lanes = lanes; + + return 0; +} + static void unic_set_fec_ability(struct unic_mac *mac) { linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); diff --git a/drivers/net/ub/unic/unic_hw.h b/drivers/net/ub/unic/unic_hw.h index ba64d398e44b..1e04e08b04f1 100644 --- a/drivers/net/ub/unic/unic_hw.h +++ b/drivers/net/ub/unic/unic_hw.h @@ -71,11 +71,15 @@ static inline bool unic_is_port_down(struct unic_dev *unic_dev) return unic_dev->hw.mac.link_status == UNIC_LINK_STATUS_DOWN; } +int unic_get_speed_bit(u32 speed, u32 lanes, u32 *speed_bit); + int unic_update_port_info(struct unic_dev *unic_dev); int unic_set_mac_speed_duplex(struct unic_dev *unic_dev, u32 speed, u8 duplex, u8 lanes); int unic_set_mac_autoneg(struct unic_dev *unic_dev, u8 autoneg); +int unic_set_mac_link_ksettings(struct unic_dev *unic_dev, + const struct ethtool_link_ksettings *cmd); int unic_query_dev_res(struct unic_dev *unic_dev); -- Gitee From 529410934c6f746ef40b2d2e2b38bf7e455f0984 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Fri, 14 Nov 2025 11:17:00 +0800 Subject: [PATCH 192/243] net: unic: Add debugfs support for dumping hardware MAC tables. commt e6edbf2ccbff427281b92f6ba46c36eb87e8c685 openEuler This patch adds support for dumping the hardware MAC tables through DebugFS in the UNIC driver. It introduces new data structures and functions to query and display the MAC table entries, including both UNICast and multicast MACs. The patch modifies several files to implement this feature, ensuring that the MAC table information can be easily accessed for debugging and monitoring purposes. Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 12 ++ drivers/net/ub/unic/debugfs/unic_debugfs.h | 1 + .../net/ub/unic/debugfs/unic_entry_debugfs.c | 138 ++++++++++++++++++ .../net/ub/unic/debugfs/unic_entry_debugfs.h | 38 +++++ drivers/net/ub/unic/unic_dev.h | 9 +- drivers/net/ub/unic/unic_hw.c | 2 + include/ub/ubase/ubase_comm_cmd.h | 11 ++ 7 files changed, 210 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 4c7bf3bb83fc..2eb39b94cdf2 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -105,6 +105,7 @@ static const struct unic_dbg_cap_bit_info { {"\tsupport_tx_csum_offload: %u\n", &unic_dev_tx_csum_offload_supported}, {"\tsupport_rx_csum_offload: %u\n", &unic_dev_rx_csum_offload_supported}, {"\tsupport_fec_stats: %u\n", &unic_dev_fec_stats_supported}, + {"\tsupport_cfg_mac: %u\n", &unic_dev_cfg_mac_supported}, }; static void unic_dbg_dump_caps_bits(struct unic_dev *unic_dev, @@ -328,6 +329,10 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { .name = "qos", .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, .support = unic_dbg_dentry_support, + }, { + .name = "mac_tbl", + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, }, /* keep unic at the bottom and add new directory above */ { @@ -408,6 +413,13 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_caps_info, + }, { + .name = "mac_tbl_list_hw", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_mac_tbl_list_hw, }, { .name = "page_pool_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 853597b90f45..1feba9d78f83 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -17,6 +17,7 @@ enum unic_dbg_dentry_type { UNIC_DBG_DENTRY_CONTEXT, UNIC_DBG_DENTRY_VPORT, UNIC_DBG_DENTRY_QOS, + UNIC_DBG_DENTRY_MAC, /* must be the last entry. */ UNIC_DBG_DENTRY_ROOT }; diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c index 74b5fdb95aaa..8313fd24bba7 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c @@ -16,6 +16,14 @@ static const char * const unic_entry_state_str[] = { "TO_ADD", "TO_DEL", "ACTIVE" }; +static int unic_dbg_check_dev_state(struct unic_dev *unic_dev) +{ + if (__unic_resetting(unic_dev)) + return -EBUSY; + + return 0; +} + int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data) { struct unic_dev *unic_dev = dev_get_drvdata(s->private); @@ -32,6 +40,35 @@ int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data) return 0; } +static int unic_common_query_addr_list(struct unic_dev *unic_dev, u32 total_size, + u32 size, struct list_head *list, + int (*query_list)(struct unic_dev *, u32 *, + struct list_head *, + bool *complete)) +{ +#define UNIC_LOOP_COUNT(total_size, size) ((total_size) / (size) + 1) + + u32 idx = 0, cnt = 0; + bool complete; + int ret = 0; + + while (cnt < UNIC_LOOP_COUNT(total_size, size)) { + complete = false; + ret = query_list(unic_dev, &idx, list, &complete); + if (ret) { + unic_err(unic_dev, + "failed to query addr list, ret = %d.\n", ret); + break; + } + + if (complete) + break; + cnt++; + } + + return ret == -EPERM ? -EOPNOTSUPP : ret; +} + int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) { struct unic_dev *unic_dev = dev_get_drvdata(s->private); @@ -57,3 +94,104 @@ int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) return 0; } + +static int unic_query_mac_list_hw(struct unic_dev *unic_dev, u32 *mac_idx, + struct list_head *list, bool *complete) +{ + struct unic_dbg_comm_addr_node *mac_node; + struct unic_dbg_mac_entry *mac_entry; + struct unic_dbg_mac_head req = {0}; + struct unic_dbg_mac_head *head; + struct ubase_cmd_buf in, out; + int ret; + u8 i; + + head = kzalloc(UNIC_QUERY_MAC_LEN, GFP_ATOMIC); + if (!head) + return -ENOMEM; + + mac_entry = head->mac_entry; + req.mac_idx = cpu_to_le32(*mac_idx); + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_MAC_TBL, true, sizeof(req), + &req); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_MAC_TBL, true, + UNIC_QUERY_MAC_LEN, head); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + unic_err(unic_dev, + "failed to query mac hw tbl, ret = %d.\n", ret); + goto err_out; + } + + if (head->cur_mac_cnt > UNIC_DBG_MAC_NUM) { + ret = -EINVAL; + unic_err(unic_dev, + "invalid cur_mac_cnt(%u).\n", head->cur_mac_cnt); + goto err_out; + } + + for (i = 0; i < head->cur_mac_cnt; i++) { + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + ret = -ENOMEM; + goto err_out; + } + + memcpy(&mac_node->mac_addr, &mac_entry[i].mac_addr, + sizeof(mac_node->mac_addr)); + mac_node->eport = le32_to_cpu(mac_entry[i].eport); + list_add_tail(&mac_node->node, list); + } + + *complete = head->cur_mac_cnt < UNIC_DBG_MAC_NUM; + + *mac_idx = le32_to_cpu(head->mac_idx); + +err_out: + kfree(head); + + return ret; +} + +int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_dbg_comm_addr_node *mac_node, *next_node; + struct unic_caps *caps = &unic_dev->caps; + struct list_head list; + int ret, cnt = 0; + u32 size; + + ret = unic_dbg_check_dev_state(unic_dev); + if (ret) + return ret; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + size = caps->uc_mac_tbl_size + caps->mc_mac_tbl_size; + + INIT_LIST_HEAD(&list); + ret = unic_common_query_addr_list(unic_dev, size, + UNIC_DBG_MAC_NUM, &list, + unic_query_mac_list_hw); + if (ret) + goto release_list; + + seq_printf(s, "No %-28sEXTEND_INFO\n", "MAC_ADDR"); + + list_for_each_entry(mac_node, &list, node) { + seq_printf(s, "%-7d", cnt++); + seq_printf(s, "%-28pM", &mac_node->mac_addr); + seq_printf(s, "0x%08x\n", mac_node->eport); + } + +release_list: + list_for_each_entry_safe(mac_node, next_node, &list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } + + return ret; +} diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h index 73ab85f4d5f3..0076d08758a4 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h @@ -10,7 +10,45 @@ #include #include +#include "unic_comm_addr.h" + +#define UNIC_BITMAP_LEN 8 +#define UNIC_DBG_MAC_NUM 16 +#define UNIC_QUERY_MAC_LEN (sizeof(struct unic_dbg_mac_head) + \ + sizeof(struct unic_dbg_mac_entry) * UNIC_DBG_MAC_NUM) + +struct unic_dbg_mac_entry { + u8 mac_addr[ETH_ALEN]; + __le32 eport; +}; + +struct unic_dbg_mac_head { + __le32 mac_idx; + u8 cur_mac_cnt; + u8 rsv[3]; + struct unic_dbg_mac_entry mac_entry[]; +}; + +struct unic_dbg_comm_addr_node { + struct list_head node; + u16 ue_id; + u32 ue_bitmap[UNIC_BITMAP_LEN]; + u32 port_bitmap; + union { + u8 guid[UNIC_ADDR_LEN]; + struct { + struct in6_addr ip_addr; + u32 extend_info; + }; + struct { + u8 mac_addr[ETH_ALEN]; + u32 eport; + }; + }; +}; + int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data); int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data); +int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data); #endif /* _UNIC_ENTRY_DEBUGFS_H */ diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 51708850e38d..c57ec2049539 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -161,7 +161,9 @@ struct unic_channels { struct unic_caps { u16 rx_buff_len; u16 total_ip_tbl_size; - u32 rsvd0[5]; + u32 uc_mac_tbl_size; + u32 mc_mac_tbl_size; + u32 rsvd0[2]; u16 max_trans_unit; u16 min_trans_unit; u32 vport_buf_size; /* unit: byte */ @@ -313,6 +315,11 @@ static inline bool unic_dev_fec_stats_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_STATS_B); } +static inline bool unic_dev_cfg_mac_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_CFG_MAC_B); +} + static inline bool __unic_removing(struct unic_dev *unic_dev) { return test_bit(UNIC_STATE_REMOVING, &unic_dev->state); diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 144fe958c4ac..5cabde8c535a 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -525,6 +525,8 @@ static void unic_parse_dev_caps(struct unic_dev *unic_dev, caps->rx_buff_len = le16_to_cpu(resp->rx_buff_len); caps->total_ip_tbl_size = le16_to_cpu(resp->total_ip_tbl_size); + caps->uc_mac_tbl_size = le32_to_cpu(resp->uc_mac_tbl_size); + caps->mc_mac_tbl_size = le32_to_cpu(resp->mc_mac_tbl_size); caps->max_trans_unit = le16_to_cpu(resp->max_trans_unit); caps->min_trans_unit = le16_to_cpu(resp->min_trans_unit); caps->vport_buf_size = le16_to_cpu(resp->vport_buf_size) * KB; diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index cadc707a23e6..4efbf8402d9d 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -47,12 +47,19 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_UBCL_CONFIG = 0x0050, /* NL commands */ + UBASE_OPC_VLAN_FILTER_CTRL = 0x2100, + UBASE_OPC_VLAN_FILTER_CFG = 0x2101, + UBASE_OPC_QUERY_VLAN_TBL = 0x2102, UBASE_OPC_CFG_VL_MAP = 0x2206, UBASE_OPC_CFG_ETS_TC_INFO = 0x2340, UBASE_OPC_QUERY_ETS_TCG_INFO = 0x2341, UBASE_OPC_QUERY_ETS_PORT_INFO = 0x2342, UBASE_OPC_QUERY_VL_AGEING_EN = 0x2343, UBASE_OPC_CFG_PROMISC_MODE = 0x240A, + UBASE_OPC_QUERY_MAC = 0x241A, + UBASE_OPC_ADD_MAC_TBL = 0x241B, + UBASE_OPC_DEL_MAC_TBL = 0x241C, + UBASE_OPC_QUERY_MAC_TBL = 0x241E, /* TP commands */ UBASE_OPC_TP_TIMER_VA_CONFIG = 0x3007, @@ -76,6 +83,7 @@ enum ubase_opcode_type { /* DL commands */ UBASE_OPC_DL_CONFIG_MODE = 0x5100, + UBASE_OPC_DL_CONFIG_LB = 0x5101, UBASE_OPC_QUERY_FLUSH_STATUS = 0x5102, UBASE_OPC_START_PERF_STATS = 0x5103, UBASE_OPC_STOP_PERF_STATS = 0x5104, @@ -87,6 +95,9 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_PORT_INFO = 0x6200, UBASE_OPC_QUERY_CHIP_INFO = 0x6201, UBASE_OPC_QUERY_FEC_STATS = 0x6202, + UBASE_OPC_QUERY_LINK_DIAGNOSIS = 0x6203, + UBASE_OPC_CFG_MAC_PAUSE_EN = 0x6300, + UBASE_OPC_CFG_PFC_PAUSE_EN = 0x6301, UBASE_OPC_HIMAC_RESET = 0x6302, /* Mailbox commands */ -- Gitee From 7a471c5c5eefa221efa84682bff70b809321cdd5 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Fri, 14 Nov 2025 16:23:27 +0800 Subject: [PATCH 193/243] net: unic: Add loopback test support. commit f09caf9c7d53e7a935592abc478645b2046842d5 openEuler This patch adds comprehensive support for loopback testing in the UNIC driver. It introduces new files `UNIC_lb.c` and `UNIC_lb.h` to handle loopback configurations and tests. The patch includes: 1. Support for new loopback types, including application, serial SerDes, parallel SerDes, and external loopback modes. 2. Integration with ethtool self-tests, allowing users to run tests and retrieve results. 3. Support for setting up and verifying loopback modes. 4. Support to prepare, send, and verify test packets during loopback tests. This enhancement provides a robust framework for testing UNIC driver functionality in various loopback scenarios. Signed-off-by: Zihao Sheng Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/Makefile | 1 + drivers/net/ub/unic/debugfs/unic_debugfs.c | 1 + .../net/ub/unic/debugfs/unic_entry_debugfs.h | 7 +- drivers/net/ub/unic/unic.h | 4 + drivers/net/ub/unic/unic_cmd.h | 16 + drivers/net/ub/unic/unic_dev.h | 5 + drivers/net/ub/unic/unic_ethtool.c | 4 +- drivers/net/ub/unic/unic_lb.c | 532 ++++++++++++++++++ drivers/net/ub/unic/unic_lb.h | 17 + drivers/net/ub/unic/unic_netdev.h | 2 + drivers/net/ub/unic/unic_stats.c | 4 + drivers/net/ub/unic/unic_tx.h | 1 - 12 files changed, 590 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ub/unic/unic_lb.c create mode 100644 drivers/net/ub/unic/unic_lb.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 041815219e0c..4fb7b82a9967 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,5 +9,6 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o +unic-objs += unic_lb.o unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o debugfs/unic_entry_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 2eb39b94cdf2..8f62c65da184 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -104,6 +104,7 @@ static const struct unic_dbg_cap_bit_info { {"\tsupport_tc_speed_limit: %u\n", &unic_dev_tc_speed_limit_supported}, {"\tsupport_tx_csum_offload: %u\n", &unic_dev_tx_csum_offload_supported}, {"\tsupport_rx_csum_offload: %u\n", &unic_dev_rx_csum_offload_supported}, + {"\tsupport_app_lb: %u\n", &unic_dev_app_lb_supported}, {"\tsupport_fec_stats: %u\n", &unic_dev_fec_stats_supported}, {"\tsupport_cfg_mac: %u\n", &unic_dev_cfg_mac_supported}, }; diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h index 0076d08758a4..3db8e90eaa72 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h @@ -8,9 +8,12 @@ #define __UNIC_ENTRY_DEBUGFS_H__ #include +#include #include -#include "unic_comm_addr.h" +#ifndef UBL_ALEN +#define UBL_ALEN 16 +#endif #define UNIC_BITMAP_LEN 8 #define UNIC_DBG_MAC_NUM 16 @@ -35,7 +38,7 @@ struct unic_dbg_comm_addr_node { u32 ue_bitmap[UNIC_BITMAP_LEN]; u32 port_bitmap; union { - u8 guid[UNIC_ADDR_LEN]; + u8 guid[UBL_ALEN]; struct { struct in6_addr ip_addr; u32 extend_info; diff --git a/drivers/net/ub/unic/unic.h b/drivers/net/ub/unic/unic.h index e63ee6e900ff..0b3fb090f1de 100644 --- a/drivers/net/ub/unic/unic.h +++ b/drivers/net/ub/unic/unic.h @@ -52,6 +52,10 @@ enum { #define UNIC_MPE (UNIC_USER_MPE | \ UNIC_OVERFLOW_MGP) +#define UNIC_SUPPORT_APP_LB BIT(0) +#define UNIC_SUPPORT_EXTERNAL_LB BIT(3) +#define UNIC_LB_TEST_FLAGS (UNIC_SUPPORT_APP_LB) + #define UNIC_RSS_MAX_VL_NUM UBASE_NIC_MAX_VL_NUM #define UNIC_INVALID_PRIORITY (0xff) #define UNIC_MAX_PRIO_NUM IEEE_8021QAZ_MAX_TCS diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index ac571815be6a..334390913e7c 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -164,4 +164,20 @@ struct unic_config_vl_speed_cmd { u8 resv1[20]; }; +enum unic_lb_en_sub_cmd { + UNIC_LB_APP = 0, + UNIC_LB_SERIAL_SERDES, + UNIC_LB_PARALLEL_SERDES, + UNIC_LB_EXTERNAL, + UNIC_LB_MAX, +}; + +struct unic_lb_en_cfg { + u8 sub_cmd; + u8 lb_en : 1; + u8 rsvd : 7; + u8 result; + u8 rsv[21]; +}; + #endif diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index c57ec2049539..b340e274841b 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -310,6 +310,11 @@ static inline bool unic_dev_rx_csum_offload_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_RX_CSUM_OFFLOAD_B); } +static inline bool unic_dev_app_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_APP_LB_B); +} + static inline bool unic_dev_fec_stats_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_STATS_B); diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 3fef26c625b1..9f2eac42f7ab 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -8,11 +8,12 @@ #include #include "unic.h" +#include "unic_channel.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_lb.h" #include "unic_netdev.h" #include "unic_stats.h" -#include "unic_channel.h" #include "unic_ethtool.h" static u32 unic_get_link_status(struct net_device *netdev) @@ -447,6 +448,7 @@ static const struct ethtool_ops unic_ethtool_ops = { .set_channels = unic_set_channels, .get_ringparam = unic_get_channels_param, .set_ringparam = unic_set_channels_param, + .self_test = unic_self_test, .get_fecparam = unic_get_fecparam, .set_fecparam = unic_set_fecparam, .get_fec_stats = unic_get_fec_stats, diff --git a/drivers/net/ub/unic/unic_lb.c b/drivers/net/ub/unic/unic_lb.c new file mode 100644 index 000000000000..285a15d852c2 --- /dev/null +++ b/drivers/net/ub/unic/unic_lb.c @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include + +#include "unic.h" +#include "unic_cmd.h" +#include "unic_dev.h" +#include "unic_hw.h" +#include "unic_netdev.h" +#include "unic_lb.h" + +#define UNIC_LB_TEST_CHANNEL_ID 0 +#define UNIC_LB_TEST_PKT_NUM 1 +#define UNIC_LB_TEST_UNEXECUTED 1 +#define UNIC_LB_TEST_PACKET_SIZE 128 + +#define UNIC_SW_TYPE_LEN 1 +#define UNIC_HEX 16 +#define UNIC_DHCPV4_PROTO 0x0100 + +static void unic_set_selftest_param(struct unic_dev *unic_dev, int *st_param) +{ + st_param[UNIC_LB_APP] = + unic_dev->loopback_flags & UNIC_SUPPORT_APP_LB; +} + +static int unic_lb_link_status_wait(struct unic_dev *unic_dev, bool en) +{ +#define UNIC_LINK_STATUS_MS 100 +#define UNIC_MAC_LINK_STATUS_NUM 100 + + u8 link_status = UNIC_LINK_STATUS_DOWN; + u8 link_ret; + int i = 0; + int ret; + + link_ret = en ? UNIC_LINK_STATUS_UP : UNIC_LINK_STATUS_DOWN; + + do { + ret = unic_query_link_status(unic_dev, &link_status); + if (ret) + return ret; + if (link_status == link_ret) + return 0; + + msleep(UNIC_LINK_STATUS_MS); + } while (++i < UNIC_MAC_LINK_STATUS_NUM); + + unic_warn(unic_dev, "query mac link status timeout, en = %d.\n", en); + return -EBUSY; +} + +static int unic_set_app_lb(struct unic_dev *unic_dev, bool en) +{ + int ret; + + if (!unic_dev_app_lb_supported(unic_dev)) + return -EOPNOTSUPP; + + ret = unic_mac_cfg(unic_dev, en); + if (ret) + return ret; + + return unic_lb_link_status_wait(unic_dev, en); +} + +static int unic_lb_config(struct net_device *ndev, int loop_type, bool en, + struct unic_promisc_en *promisc_en) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret = 0; + + switch (loop_type) { + case UNIC_LB_APP: + ret = unic_set_app_lb(unic_dev, en); + break; + case UNIC_LB_SERIAL_SERDES: + case UNIC_LB_PARALLEL_SERDES: + case UNIC_LB_EXTERNAL: + break; + default: + unic_info(unic_dev, + "loop_type is not supported, loop_type = %d.\n", + loop_type); + return -EOPNOTSUPP; + } + + if (ret && ret != -EOPNOTSUPP) + unic_err(unic_dev, + "lb_config return error, ret = %d, enable = %d.\n", + ret, en); + + unic_set_promisc_mode(unic_dev, promisc_en); + + return ret; +} + +static int unic_selftest_prepare(struct net_device *ndev, bool if_running, + u8 autoneg) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + ret = if_running ? unic_net_stop(ndev) : 0; + if (ret) { + unic_err(unic_dev, "failed to stop net, ret = %d.\n", ret); + return ret; + } + + ret = autoneg ? unic_set_mac_autoneg(unic_dev, false) : 0; + if (ret) { + unic_err(unic_dev, "failed to set mac autoneg, ret = %d.\n", ret); + goto restore_net; + } + + set_bit(UNIC_STATE_TESTING, &unic_dev->state); + + return 0; + +restore_net: + ret = if_running ? unic_net_open(ndev) : 0; + if (ret) + unic_err(unic_dev, "failed to restore net, ret = %d.\n", ret); + + return ret; +} + +static void unic_eth_lb_check_skb_data(struct unic_channel *c, + struct sk_buff *skb) +{ + struct unic_dev *unic_dev = netdev_priv(skb->dev); + struct net_device *ndev = skb->dev; + struct unic_rq *rq = c->rq; + u32 len = skb_headlen(skb); + u8 *packet = skb->data; + struct ethhdr *ethh; + u32 i; + + if (ZERO_OR_NULL_PTR(packet)) { + unic_err(unic_dev, "eth packet content is null.\n"); + goto out; + } + + if (len != UNIC_LB_TEST_PACKET_SIZE) { + unic_err(unic_dev, + "eth test packet size error, len = %u.\n", len); + goto out; + } + + ethh = (struct ethhdr *)(skb->data - ETH_HLEN); + if (memcmp(ethh->h_dest, ndev->dev_addr, ETH_ALEN) || + memcmp(ethh->h_source, ndev->dev_addr, ETH_ALEN) || + ethh->h_proto != htons(ETH_P_ARP)) { + unic_err(unic_dev, "eth segment error.\n"); + goto out; + } + + for (i = 0; i < len; i++) { + if (packet[i] != (i & 0xff)) { + unic_err(unic_dev, + "eth packet content error, i = %u.\n", i); + goto out; + } + } + + dev_kfree_skb_any(skb); + return; +out: + /* Due to the fact that incorrect packet content in the poll rx process + * can also increase packet and byte counts, the statistics should be + * subtracted when counting if the packets are incorrect. + */ + u64_stats_update_begin(&rq->syncp); + rq->stats.packets--; + rq->stats.bytes -= skb->len; + u64_stats_update_end(&rq->syncp); + print_hex_dump(KERN_ERR, "eth selftest:", DUMP_PREFIX_OFFSET, + UNIC_HEX, 1, skb->data, len, true); + dev_kfree_skb_any(skb); +} + +static u32 unic_lb_check_rx(struct unic_dev *unic_dev, u32 budget, + struct sk_buff *skb) +{ + struct unic_channel *c; + u64 pre_pkt, pre_byte; + u32 pkt_total = 0; + u32 i; + + for (i = 0; i < unic_dev->channels.num; i++) { + c = &unic_dev->channels.c[i]; + pre_pkt = c->rq->stats.packets; + pre_byte = c->rq->stats.bytes; + + preempt_disable(); + unic_poll_rx(c, budget, unic_eth_lb_check_skb_data); + preempt_enable(); + + pkt_total += (c->rq->stats.packets - pre_pkt); + c->rq->stats.packets = pre_pkt; + c->rq->stats.bytes = pre_byte; + } + return pkt_total; +} + +static void unic_eth_lb_setup_skb(struct sk_buff *skb) +{ + struct net_device *ndev = skb->dev; + struct ethhdr *ethh; + u8 *packet; + u32 i; + + skb_reserve(skb, NET_IP_ALIGN); + ethh = skb_put(skb, sizeof(struct ethhdr)); + packet = skb_put(skb, UNIC_LB_TEST_PACKET_SIZE); + + memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + memcpy(ethh->h_source, ndev->dev_addr, ETH_ALEN); + + ethh->h_proto = htons(ETH_P_ARP); + + for (i = 0; i < UNIC_LB_TEST_PACKET_SIZE; i++) + packet[i] = (i & 0xff); +} + +static struct sk_buff *unic_lb_skb_prepare(struct net_device *ndev) +{ + u32 size = UNIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN; + struct sk_buff *skb; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return NULL; + + skb->dev = ndev; + skb->queue_mapping = UNIC_LB_TEST_CHANNEL_ID; + + unic_eth_lb_setup_skb(skb); + + return skb; +} + +static void unic_lb_poll_tx(struct unic_dev *unic_dev, struct sk_buff *skb) +{ + u64 pre_pkt, pre_byte; + struct unic_sq *sq; + + sq = unic_dev->channels.c[skb->queue_mapping].sq; + + pre_pkt = sq->stats.packets; + pre_byte = sq->stats.bytes; + + unic_poll_tx(sq, 0); + if (sq->pi != sq->ci) { + unic_err(unic_dev, "cqe error, sp pi doesn't match sp ci.\n"); + kfree_skb(skb); + } + + sq->stats.packets = pre_pkt; + sq->stats.bytes = pre_byte; +} + +static int unic_lb_run_test(struct net_device *ndev, int loop_mode) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct sk_buff *skb; + netdev_tx_t tx_ret; + int ret_val = 0; + u32 i, cnt = 0; + + /* Avoid loopback failure caused by receiving packets after mac_en + * takes effect but before loopback_en takes effect. + */ + for (i = 0; i < unic_dev->channels.num; i++) + unic_clear_rq(unic_dev->channels.c[i].rq); + + skb = unic_lb_skb_prepare(ndev); + if (!skb) { + unic_err(unic_dev, "failed to alloc skb.\n"); + return -ENOMEM; + } + + /* Used to handle the release of skb in different situations of xmit. + * 1. skb is released through poll_tx and kfree in success situation. + * 2. skb is released through dev_kfree_skb_any in dropped situation. + * 3. skb is released through kfree in busy situation. + */ + skb_get(skb); + + tx_ret = unic_start_xmit(skb, ndev); + if (tx_ret == NETDEV_TX_OK) { + cnt++; + } else { + kfree_skb(skb); + unic_err(unic_dev, "failed to xmit loopback skb, ret = %d.\n", + tx_ret); + } + + if (cnt != UNIC_LB_TEST_PKT_NUM) { + ret_val = -EBUSY; + unic_err(unic_dev, "mode %d sent fail, cnt = %u, budget = %d.\n", + loop_mode, cnt, UNIC_LB_TEST_PKT_NUM); + goto out; + } + + /* Allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + cnt = unic_lb_check_rx(unic_dev, UNIC_LB_TEST_PKT_NUM, skb); + if (cnt != UNIC_LB_TEST_PKT_NUM) { + ret_val = -EINVAL; + unic_err(unic_dev, "mode %d recv fail, cnt = %u, budget = %d.\n", + loop_mode, cnt, UNIC_LB_TEST_PKT_NUM); + } + +out: + unic_lb_poll_tx(unic_dev, skb); + kfree_skb(skb); + return ret_val; +} + +static void unic_external_selftest_prepare(struct net_device *ndev) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (test_and_set_bit(UNIC_STATE_DOWN, &unic_dev->state)) + return; + + netif_carrier_off(ndev); + netif_tx_disable(ndev); + + unic_disable_channels(unic_dev); + + unic_clear_all_queue(ndev); + + unic_reset_tx_queue(ndev); +} + +static void unic_do_external_selftest(struct net_device *ndev, int *st_param, + struct ethtool_test *eth_test, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_vport *vport = &unic_dev->vport; + struct unic_promisc_en promisc_all_en; + struct unic_promisc_en promisc_en; + + if (!st_param[UNIC_LB_EXTERNAL]) + return; + + unic_fill_promisc_en(&promisc_en, + unic_dev->netdev_flags | vport->last_promisc_flags); + memset(&promisc_all_en, 1, sizeof(promisc_all_en)); + data[UNIC_LB_EXTERNAL] = unic_lb_config(ndev, UNIC_LB_EXTERNAL, + true, &promisc_all_en); + if (!data[UNIC_LB_EXTERNAL]) + data[UNIC_LB_EXTERNAL] = unic_lb_run_test(ndev, UNIC_LB_EXTERNAL); + unic_lb_config(ndev, UNIC_LB_EXTERNAL, false, &promisc_en); + + if (data[UNIC_LB_EXTERNAL]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; +} + +static void unic_external_selftest_restore(struct net_device *ndev) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (unic_resetting(ndev)) + return; + + if (!test_bit(UNIC_STATE_DOWN, &unic_dev->state)) + return; + + unic_clear_all_queue(ndev); + + unic_enable_channels(unic_dev); + + netif_tx_wake_all_queues(ndev); + + clear_bit(UNIC_STATE_DOWN, &unic_dev->state); +} + +static void unic_do_selftest(struct net_device *ndev, int *st_param, + struct ethtool_test *eth_test, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_vport *vport = &unic_dev->vport; + struct unic_promisc_en promisc_all_en; + struct unic_promisc_en promisc_en; + int lb_type; + + unic_fill_promisc_en(&promisc_en, + unic_dev->netdev_flags | vport->last_promisc_flags); + memset(&promisc_all_en, 1, sizeof(promisc_all_en)); + for (lb_type = UNIC_LB_APP; lb_type < UNIC_LB_EXTERNAL; lb_type++) { + if (!st_param[lb_type]) + continue; + + data[lb_type] = unic_lb_config(ndev, lb_type, true, + &promisc_all_en); + if (!data[lb_type]) + data[lb_type] = unic_lb_run_test(ndev, lb_type); + + unic_lb_config(ndev, lb_type, false, &promisc_en); + + if (data[lb_type]) + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static void unic_selftest_restore(struct net_device *ndev, bool if_running, + u8 autoneg) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + clear_bit(UNIC_STATE_TESTING, &unic_dev->state); + + ret = autoneg ? unic_set_mac_autoneg(unic_dev, true) : 0; + if (ret) + unic_err(unic_dev, "failed to restore mac autoneg, ret = %d.\n", + ret); + + ret = if_running ? unic_net_open(ndev) : 0; + if (ret) + unic_err(unic_dev, "failed to restore unic ndev, ret = %d.\n", + ret); +} + +static bool unic_self_test_is_unexecuted(struct net_device *ndev, + struct ethtool_test *eth_test, + u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (unic_dev_ubl_supported(unic_dev)) { + unic_err(unic_dev, + "failed to self test, due to in ub mode.\n"); + return true; + } + + if (test_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state)) { + unic_err(unic_dev, + "failed to self test, due to dev deactivate.\n"); + return true; + } + + if (unic_resetting(ndev)) { + unic_err(unic_dev, + "failed to self test, due to dev resetting.\n"); + return true; + } + + if (!(eth_test->flags & ETH_TEST_FL_OFFLINE)) { + unic_err(unic_dev, + "failed to self test, due to disable test flags.\n"); + return true; + } + + if (unic_dev->loopback_flags & UNIC_SUPPORT_EXTERNAL_LB) + data[UNIC_LB_EXTERNAL] = UNIC_LB_TEST_UNEXECUTED; + + return false; +} + +int unic_get_selftest_count(struct unic_dev *unic_dev) +{ + int count = 0; + + /* clear loopback bit flags at first */ + unic_dev->loopback_flags &= (~UNIC_LB_TEST_FLAGS); + + if (unic_dev_app_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_APP_LB; + count++; + } + + return count == 0 ? -EOPNOTSUPP : UNIC_LB_MAX; +} + +void unic_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_mac *mac = &unic_dev->hw.mac; + bool if_running = netif_running(ndev); + int st_param[UNIC_LB_MAX]; + int ret, i; + + ret = unic_get_selftest_count(unic_dev); + if (ret <= 0) { + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + /* initialize the loopback test result, avoiding mark not support loopback + * test as PASS. + */ + for (i = 0; i < UNIC_LB_MAX; i++) + data[i] = -EOPNOTSUPP; + + if (unic_self_test_is_unexecuted(ndev, eth_test, data)) { + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + unic_set_selftest_param(unic_dev, st_param); + + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + if (if_running) { + unic_external_selftest_prepare(ndev); + unic_do_external_selftest(ndev, st_param, eth_test, data); + unic_external_selftest_restore(ndev); + } else { + unic_warn(unic_dev, + "not to run external selftest, due to link down.\n"); + } + } + + ret = unic_selftest_prepare(ndev, if_running, mac->autoneg); + if (ret) + return; + + unic_do_selftest(ndev, st_param, eth_test, data); + unic_selftest_restore(ndev, if_running, mac->autoneg); +} diff --git a/drivers/net/ub/unic/unic_lb.h b/drivers/net/ub/unic/unic_lb.h new file mode 100644 index 000000000000..97e1e87763ef --- /dev/null +++ b/drivers/net/ub/unic/unic_lb.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_LB_H__ +#define __UNIC_LB_H__ + +#include +#include + +int unic_get_selftest_count(struct unic_dev *unic_dev); +void unic_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data); + +#endif diff --git a/drivers/net/ub/unic/unic_netdev.h b/drivers/net/ub/unic/unic_netdev.h index fd8fd16f1e92..c3683361dd55 100644 --- a/drivers/net/ub/unic/unic_netdev.h +++ b/drivers/net/ub/unic/unic_netdev.h @@ -20,6 +20,8 @@ void unic_link_status_update(struct unic_dev *unic_dev); int unic_register_ipaddr_notifier(void); void unic_unregister_ipaddr_notifier(void); void unic_link_status_change(struct net_device *netdev, bool linkup); +void unic_enable_channels(struct unic_dev *unic_dev); +void unic_disable_channels(struct unic_dev *unic_dev); int unic_query_link_status(struct unic_dev *unic_dev, u8 *link_status); #endif diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index 4647ba5e3e5e..ec929328db37 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -11,6 +11,7 @@ #include "unic.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_lb.h" #include "unic_netdev.h" #include "unic_stats.h" @@ -430,6 +431,9 @@ int unic_get_sset_count(struct net_device *netdev, int stringset) count = ARRAY_SIZE(unic_sq_stats_str) * channel_num; count += ARRAY_SIZE(unic_rq_stats_str) * channel_num; break; + case ETH_SS_TEST: + count = unic_get_selftest_count(unic_dev); + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ub/unic/unic_tx.h b/drivers/net/ub/unic/unic_tx.h index b472065af4d8..755795d5ce81 100644 --- a/drivers/net/ub/unic/unic_tx.h +++ b/drivers/net/ub/unic/unic_tx.h @@ -87,7 +87,6 @@ struct unic_sq_stats { u64 pad_err; u64 bytes; u64 packets; - u64 map_err; u64 busy; u64 more; u64 restart_queue; -- Gitee From d0fe97acca55fcba5f35ea5617bba47cd948920b Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 10:21:11 +0800 Subject: [PATCH 194/243] net: unic: Extend loopback test support to include serdes loopback modes. commit 35902cddb3a8dc3f1009848fbcab90f9bb540c4c openEuler This patch extends the loopback test support in the UNIC driver to include serial and parallel Serdes loopback modes. It introduces new functions to enable and disable these loopback modes and integrates them with the existing self-test framework. The patch also updates the capability checks to support these new loopback types. Key changes include: 1.Adding support for serial and parallel Serdes loopback in `UNIC_lb.c`. 2.Introducing new helper functions to manage Serdes loopback configurations. 3.Updating capability checks in `UNIC_dev.h` to support Serdes loopback. 4.Modifying `UNIC.h` to include new loopback flags for Serdes modes. This enhancement provides a more comprehensive set of loopback testing options for UNIC driver, supporting both application and Serdes-based loopbacks. Signed-off-by: Zihao Sheng Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 2 + drivers/net/ub/unic/unic.h | 6 +- drivers/net/ub/unic/unic_dev.h | 10 +++ drivers/net/ub/unic/unic_lb.c | 76 ++++++++++++++++++++++ 4 files changed, 93 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 8f62c65da184..d07d94140829 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -105,6 +105,8 @@ static const struct unic_dbg_cap_bit_info { {"\tsupport_tx_csum_offload: %u\n", &unic_dev_tx_csum_offload_supported}, {"\tsupport_rx_csum_offload: %u\n", &unic_dev_rx_csum_offload_supported}, {"\tsupport_app_lb: %u\n", &unic_dev_app_lb_supported}, + {"\tsupport_serial_serdes_lb: %u\n", &unic_dev_serial_serdes_lb_supported}, + {"\tsupport_parallel_serdes_lb: %u\n", unic_dev_parallel_serdes_lb_supported}, {"\tsupport_fec_stats: %u\n", &unic_dev_fec_stats_supported}, {"\tsupport_cfg_mac: %u\n", &unic_dev_cfg_mac_supported}, }; diff --git a/drivers/net/ub/unic/unic.h b/drivers/net/ub/unic/unic.h index 0b3fb090f1de..34b92c0f365f 100644 --- a/drivers/net/ub/unic/unic.h +++ b/drivers/net/ub/unic/unic.h @@ -53,8 +53,12 @@ enum { UNIC_OVERFLOW_MGP) #define UNIC_SUPPORT_APP_LB BIT(0) +#define UNIC_SUPPORT_SERIAL_SERDES_LB BIT(1) +#define UNIC_SUPPORT_PARALLEL_SERDES_LB BIT(2) #define UNIC_SUPPORT_EXTERNAL_LB BIT(3) -#define UNIC_LB_TEST_FLAGS (UNIC_SUPPORT_APP_LB) +#define UNIC_LB_TEST_FLAGS (UNIC_SUPPORT_APP_LB | \ + UNIC_SUPPORT_SERIAL_SERDES_LB | \ + UNIC_SUPPORT_PARALLEL_SERDES_LB) #define UNIC_RSS_MAX_VL_NUM UBASE_NIC_MAX_VL_NUM #define UNIC_INVALID_PRIORITY (0xff) diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index b340e274841b..5a5c993f4d50 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -295,6 +295,11 @@ static inline bool unic_dev_fec_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_B); } +static inline bool unic_dev_serial_serdes_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_SERIAL_SERDES_LB_B); +} + static inline bool unic_dev_tc_speed_limit_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_TC_SPEED_LIMIT_B); @@ -320,6 +325,11 @@ static inline bool unic_dev_fec_stats_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_STATS_B); } +static inline bool unic_dev_parallel_serdes_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PARALLEL_SERDES_LB_B); +} + static inline bool unic_dev_cfg_mac_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_CFG_MAC_B); diff --git a/drivers/net/ub/unic/unic_lb.c b/drivers/net/ub/unic/unic_lb.c index 285a15d852c2..3928e39bdb85 100644 --- a/drivers/net/ub/unic/unic_lb.c +++ b/drivers/net/ub/unic/unic_lb.c @@ -27,6 +27,31 @@ static void unic_set_selftest_param(struct unic_dev *unic_dev, int *st_param) { st_param[UNIC_LB_APP] = unic_dev->loopback_flags & UNIC_SUPPORT_APP_LB; + st_param[UNIC_LB_SERIAL_SERDES] = + unic_dev->loopback_flags & UNIC_SUPPORT_SERIAL_SERDES_LB; + st_param[UNIC_LB_PARALLEL_SERDES] = + unic_dev->loopback_flags & UNIC_SUPPORT_PARALLEL_SERDES_LB; +} + +static int unic_set_lb_mode(struct unic_dev *unic_dev, bool en, int loop_type) +{ + struct unic_lb_en_cfg req = {0}; + struct ubase_cmd_buf in; + int ret; + + req.lb_en = en ? 1 : 0; + req.sub_cmd = loop_type; + + ubase_fill_inout_buf(&in, UBASE_OPC_DL_CONFIG_LB, false, sizeof(req), + &req); + + ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); + if (ret) + unic_err(unic_dev, + "failed to config loopback mode, ret = %d, loop_type = %d.\n", + ret, loop_type); + + return ret; } static int unic_lb_link_status_wait(struct unic_dev *unic_dev, bool en) @@ -55,6 +80,45 @@ static int unic_lb_link_status_wait(struct unic_dev *unic_dev, bool en) return -EBUSY; } +static int unic_enable_serdes_lb(struct unic_dev *unic_dev, int loop_type) +{ + int ret; + + ret = unic_mac_cfg(unic_dev, true); + if (ret) + return ret; + + ret = unic_set_lb_mode(unic_dev, true, loop_type); + if (ret) + return ret; + + return unic_lb_link_status_wait(unic_dev, true); +} + +static int unic_disable_serdes_lb(struct unic_dev *unic_dev, int loop_type) +{ + int ret; + + ret = unic_set_lb_mode(unic_dev, false, loop_type); + if (ret) + return ret; + + ret = unic_mac_cfg(unic_dev, false); + if (ret) + return ret; + + return unic_lb_link_status_wait(unic_dev, false); +} + +static int unic_set_serdes_lb(struct unic_dev *unic_dev, bool en, int loop_type) +{ + if (!unic_dev_parallel_serdes_lb_supported(unic_dev)) + return -EOPNOTSUPP; + + return en ? unic_enable_serdes_lb(unic_dev, loop_type) : + unic_disable_serdes_lb(unic_dev, loop_type); +} + static int unic_set_app_lb(struct unic_dev *unic_dev, bool en) { int ret; @@ -81,6 +145,8 @@ static int unic_lb_config(struct net_device *ndev, int loop_type, bool en, break; case UNIC_LB_SERIAL_SERDES: case UNIC_LB_PARALLEL_SERDES: + ret = unic_set_serdes_lb(unic_dev, en, loop_type); + break; case UNIC_LB_EXTERNAL: break; default: @@ -481,6 +547,16 @@ int unic_get_selftest_count(struct unic_dev *unic_dev) count++; } + if (unic_dev_serial_serdes_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_SERIAL_SERDES_LB; + count++; + } + + if (unic_dev_parallel_serdes_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_PARALLEL_SERDES_LB; + count++; + } + return count == 0 ? -EOPNOTSUPP : UNIC_LB_MAX; } -- Gitee From c2ed6e5d3900ee5393e13060e8cdc8fe77bcf7f7 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 10:27:13 +0800 Subject: [PATCH 195/243] net: unic: Extend loopback test support to include external loopback modes commit f8796e29249e1e527fc9e5d6a91d74a6236ff976 openEuler This patch extends the loopback test support in the UNIC driver to include external loopback mode. It introduces new functions to enable and disable this loopback mode and integrates it with the existing self-test framework. The patch also updates the capability checks to support the new external loopback type. Key changes include: 1. Adding support for external loopback in `UNIC_lb.c`. 2. Introducing new helper functions to manage external loopback configurations. 3. Updating capability checks in `UNIC_dev.h` to support external loopback. 4. Modifying `UNIC.h` to include a new loopback flag for external loopback. This enhancement provides an additional loopback testing option for UNIC driver, supporting external loopbacks in addition to application and Serdes-based loopbacks. Signed-off-by: Zihao Sheng Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic.h | 3 ++- drivers/net/ub/unic/unic_dev.h | 5 +++++ drivers/net/ub/unic/unic_lb.c | 7 +++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/unic.h b/drivers/net/ub/unic/unic.h index 34b92c0f365f..af11cefb6577 100644 --- a/drivers/net/ub/unic/unic.h +++ b/drivers/net/ub/unic/unic.h @@ -58,7 +58,8 @@ enum { #define UNIC_SUPPORT_EXTERNAL_LB BIT(3) #define UNIC_LB_TEST_FLAGS (UNIC_SUPPORT_APP_LB | \ UNIC_SUPPORT_SERIAL_SERDES_LB | \ - UNIC_SUPPORT_PARALLEL_SERDES_LB) + UNIC_SUPPORT_PARALLEL_SERDES_LB | \ + UNIC_SUPPORT_EXTERNAL_LB) #define UNIC_RSS_MAX_VL_NUM UBASE_NIC_MAX_VL_NUM #define UNIC_INVALID_PRIORITY (0xff) diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 5a5c993f4d50..4baeff01349e 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -325,6 +325,11 @@ static inline bool unic_dev_fec_stats_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_STATS_B); } +static inline bool unic_dev_external_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_EXTERNAL_LB_B); +} + static inline bool unic_dev_parallel_serdes_lb_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PARALLEL_SERDES_LB_B); diff --git a/drivers/net/ub/unic/unic_lb.c b/drivers/net/ub/unic/unic_lb.c index 3928e39bdb85..d0e33f9a52a1 100644 --- a/drivers/net/ub/unic/unic_lb.c +++ b/drivers/net/ub/unic/unic_lb.c @@ -31,6 +31,8 @@ static void unic_set_selftest_param(struct unic_dev *unic_dev, int *st_param) unic_dev->loopback_flags & UNIC_SUPPORT_SERIAL_SERDES_LB; st_param[UNIC_LB_PARALLEL_SERDES] = unic_dev->loopback_flags & UNIC_SUPPORT_PARALLEL_SERDES_LB; + st_param[UNIC_LB_EXTERNAL] = + unic_dev->loopback_flags & UNIC_SUPPORT_EXTERNAL_LB; } static int unic_set_lb_mode(struct unic_dev *unic_dev, bool en, int loop_type) @@ -557,6 +559,11 @@ int unic_get_selftest_count(struct unic_dev *unic_dev) count++; } + if (unic_dev_external_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_EXTERNAL_LB; + count++; + } + return count == 0 ? -EOPNOTSUPP : UNIC_LB_MAX; } -- Gitee From b703ec04dc502254dd754e2587d682618cb5b0fb Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 13:58:07 +0800 Subject: [PATCH 196/243] net: unic: Add support for querying link diagnosis status commit f886fbc13970994f666388da74295ef8b00ffe11 openEuler This patch adds support for querying link diagnosis status in the UNIC driver. It introduces a new function to retrieve and interpret link failure status codes, mapping them to ethtool link extended states. The patch also modifies the ethtool operations to include the new link diagnosis query. Key changes include: 1. Adding a new structure to hold link diagnosis response data. 2. Implementing a function to query and interpret link diagnosis status. 3. Mapping UNIC-specific status codes to ethtool link extended states. 4. Integrating the new functionality with the existing ethtool operations. This enhancement provides more detailed information about link failures, allowing for troubleshooting of network issues in UNIC driver. Signed-off-by: Xiaobo Zhang Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_cmd.h | 13 ++++++ drivers/net/ub/unic/unic_crq.c | 28 ++++++++++++ drivers/net/ub/unic/unic_dcbnl.c | 5 +-- drivers/net/ub/unic/unic_ethtool.c | 71 ++++++++++++++++++++++++++++++ 4 files changed, 114 insertions(+), 3 deletions(-) diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index 334390913e7c..9f9ab6460985 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -52,6 +52,14 @@ struct unic_ld_config_mode_cmd { u8 rsv[20]; }; +enum unic_link_fail_code { + UNIC_LF_NORMAL, + UNIC_LF_REF_CLOCK_LOST, + UNIC_LF_XSFP_TX_DISABLE, + UNIC_LF_XSFP_ABSENT, + UNIC_LF_REF_MAX +}; + struct unic_link_status_cmd_resp { u8 status; u8 link_fail_code; @@ -180,4 +188,9 @@ struct unic_lb_en_cfg { u8 rsv[21]; }; +struct unic_query_link_diagnosis_resp { + __le32 status_code; + u8 rsv[20]; +}; + #endif diff --git a/drivers/net/ub/unic/unic_crq.c b/drivers/net/ub/unic/unic_crq.c index 29a27c6c547d..fa01e880a005 100644 --- a/drivers/net/ub/unic/unic_crq.c +++ b/drivers/net/ub/unic/unic_crq.c @@ -37,6 +37,31 @@ static void __unic_handle_link_status_event(struct auxiliary_device *adev, clear_bit(UNIC_STATE_LINK_UPDATING, &unic_dev->state); } +static void unic_link_fail_parse(struct auxiliary_device *adev, + u8 link_fail_code) +{ + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + static const struct { + u8 link_fail_code; + const char *str; + } codes[] = { + {UNIC_LF_REF_CLOCK_LOST, "Reference clock lost!\n"}, + {UNIC_LF_XSFP_TX_DISABLE, "SFP tx is disabled!\n"}, + {UNIC_LF_XSFP_ABSENT, "SFP is absent!\n"} + }; + + if (link_fail_code == UNIC_LF_NORMAL) + return; + + if (link_fail_code >= UNIC_LF_REF_MAX) { + unic_warn(unic_dev, "unknown fail code, fail_code = %u.\n", + link_fail_code); + return; + } + + unic_warn(unic_dev, "link fail cause: %s", codes[link_fail_code - 1].str); +} + int unic_handle_link_status_event(void *dev, void *data, u32 len) { struct unic_link_status_cmd_resp *resp = data; @@ -45,5 +70,8 @@ int unic_handle_link_status_event(void *dev, void *data, u32 len) __unic_handle_link_status_event(adev, hw_link_status); + if (!hw_link_status && !ubase_adev_ubl_supported(adev)) + unic_link_fail_parse(adev, resp->link_fail_code); + return 0; } diff --git a/drivers/net/ub/unic/unic_dcbnl.c b/drivers/net/ub/unic/unic_dcbnl.c index 5f14cebac540..9f85044aeb05 100644 --- a/drivers/net/ub/unic/unic_dcbnl.c +++ b/drivers/net/ub/unic/unic_dcbnl.c @@ -121,7 +121,7 @@ static int unic_setets_preconditions(struct net_device *net_dev) if (netif_running(net_dev)) { unic_err(unic_dev, - "failed to set ets, due to network interface is up, pls down it first and try again.\n"); + "failed to set ets, due to network interface is up, please down it first and try again.\n"); return -EBUSY; } @@ -260,8 +260,7 @@ static int unic_dscp_prio_check(struct net_device *netdev, struct dcb_app *app) return -EOPNOTSUPP; if (netif_running(netdev)) { - unic_err(unic_dev, - "failed to set dscp-prio, due to network interface is up, pls down it first and try again.\n"); + unic_err(unic_dev, "failed to set dscp-prio, due to network interface is up, please down it first and try again.\n"); return -EBUSY; } diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 9f2eac42f7ab..a20bb3a05462 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -356,6 +356,12 @@ static int unic_set_coalesce(struct net_device *netdev, struct unic_coalesce old_tx_coal, old_rx_coal; int ret, ret1; + if (netif_running(netdev)) { + unic_err(unic_dev, + "failed to set coalesce param, due to network interface is up, please down it first and try again.\n"); + return -EBUSY; + } + if (unic_resetting(netdev)) return -EBUSY; @@ -425,6 +431,70 @@ static int unic_reset(struct net_device *ndev, u32 *flags) return 0; } +struct unic_ethtool_link_ext_state_mapping { + u32 status_code; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct unic_ethtool_link_ext_state_mapping +unic_link_ext_state_map[] = { + {516, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, +}; + +static int unic_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *info) +{ + const struct unic_ethtool_link_ext_state_mapping *map; + struct unic_query_link_diagnosis_resp resp = {0}; + struct unic_dev *unic_dev = netdev_priv(netdev); + struct ubase_cmd_buf in, out; + u32 status_code; + int ret; + u8 i; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + if (unic_dev_ubl_supported(unic_dev)) + return -EOPNOTSUPP; + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_LINK_DIAGNOSIS, true, 0, NULL); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_LINK_DIAGNOSIS, false, + sizeof(resp), &resp); + + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + unic_err(unic_dev, "failed to query link diagnosis, ret = %d.\n", + ret); + return ret; + } + + status_code = le32_to_cpu(resp.status_code); + if (!status_code) + return -ENODATA; + + for (i = 0; i < ARRAY_SIZE(unic_link_ext_state_map); i++) { + map = &unic_link_ext_state_map[i]; + if (map->status_code == status_code) { + info->link_ext_state = map->link_ext_state; + info->__link_ext_substate = map->link_ext_substate; + return 0; + } + } + + unic_warn(unic_dev, "unknown link failure status_code = %u.\n", + status_code); + + return -ENODATA; +} + #define UNIC_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ ETHTOOL_RING_USE_TX_PUSH) #define UNIC_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ @@ -455,6 +525,7 @@ static const struct ethtool_ops unic_ethtool_ops = { .get_coalesce = unic_get_coalesce, .set_coalesce = unic_set_coalesce, .reset = unic_reset, + .get_link_ext_state = unic_get_link_ext_state, }; void unic_set_ethtool_ops(struct net_device *netdev) -- Gitee From 028335ad1a4a37b687f0b8f3e87d2bdb331a040e Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 14:58:28 +0800 Subject: [PATCH 197/243] net: unic: Add valn filtering support. f225d7e71474747aaf2cc94405cca9d172a785c4 openEuler This patch adds comprehensive support for VLAN filtering in the UNIC driver. It introduces new data structures and functions to manage VLAN configurations, including adding and removing VLANs, enabling/disabling VLAN filters, and synchronizing VLAN states. The patch also modifies the initialization and cleanup routines to handle VLAN configurations and integrates with the existing network device operations. Key changes include: 1. Adding new structures to manage VLAN configurations and states. 2. Implementing functions to add/remove VLANs and enable/disable VLAN filters. 3. Integrating VLAN support with the UNIC device initialization and cleanup process. 4. Synchronizing VLAN states during periodic tasks and device resets. 5. Adding support for VLAN filtering in the UNIC hardware interface. This enhancement provides robust VLAN filtering capabilities for UNIC driver, allowing for better network segmentation and traffic management. Signed-off-by: Guangwei Zhang Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/Makefile | 2 +- drivers/net/ub/unic/unic_cmd.h | 11 ++ drivers/net/ub/unic/unic_dev.c | 18 ++ drivers/net/ub/unic/unic_dev.h | 22 ++- drivers/net/ub/unic/unic_hw.c | 45 +++++ drivers/net/ub/unic/unic_hw.h | 2 + drivers/net/ub/unic/unic_netdev.c | 1 + drivers/net/ub/unic/unic_vlan.c | 309 ++++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_vlan.h | 19 ++ 9 files changed, 427 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ub/unic/unic_vlan.c create mode 100644 drivers/net/ub/unic/unic_vlan.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 4fb7b82a9967..24a19961a5a3 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,6 +9,6 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o -unic-objs += unic_lb.o +unic-objs += unic_lb.o unic_vlan.o unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o debugfs/unic_entry_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index 9f9ab6460985..5d3a180f8f3b 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -152,6 +152,17 @@ struct unic_query_flush_status_resp { u8 rsv[23]; }; +struct unic_vlan_filter_cfg_cmd { + u16 vlan_id; + u8 is_add; + u8 rsv[21]; +}; + +struct unic_vlan_filter_ctrl_cmd { + u8 filter_en; + u8 rsv[23]; +}; + enum unic_vl_map_type { UNIC_PRIO_VL_MAP, UNIC_DSCP_VL_MAP, diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index 7617ad2cf3f6..9e083873a119 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -26,6 +26,7 @@ #include "unic_qos_hw.h" #include "unic_netdev.h" #include "unic_rack_ip.h" +#include "unic_vlan.h" #include "unic_dev.h" #define UNIC_WATCHDOG_TIMEOUT (5 * HZ) @@ -543,6 +544,14 @@ static void unic_set_netdev_attr(struct net_device *netdev) if (unic_dev_ubl_supported(unic_dev)) { netdev->features |= NETIF_F_VLAN_CHALLENGED; netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + } else { + netdev->flags |= IFF_BROADCAST | IFF_MULTICAST; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if (!unic_dev_cfg_vlan_filter_supported(unic_dev)) { + netdev->features |= NETIF_F_VLAN_CHALLENGED; + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } if (unic_dev_tx_csum_offload_supported(unic_dev)) @@ -656,6 +665,7 @@ static void unic_periodic_service_task(struct unic_dev *unic_dev) unic_update_port_info(unic_dev); unic_sync_rack_ip_table(unic_dev); unic_sync_promisc_mode(unic_dev); + unic_sync_vlan_filter(unic_dev); if (!(unic_dev->serv_processed_cnt % UNIC_UPDATE_STATS_TIMER_INTERVAL)) unic_update_stats_for_all(unic_dev); @@ -764,12 +774,20 @@ static int unic_init_vport(struct unic_dev *unic_dev) unic_init_vport_info(unic_dev); + ret = unic_init_vlan_config(unic_dev); + if (ret) + unic_uninit_vport_buf(unic_dev); + return ret; } static void unic_uninit_vport(struct unic_dev *unic_dev) { unic_uninit_rack_ip_table(unic_dev); + + if (unic_dev_eth_mac_supported(unic_dev)) + unic_uninit_vlan_config(unic_dev); + unic_uninit_vport_buf(unic_dev); } diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 4baeff01349e..1d79f1924791 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -43,6 +43,7 @@ enum unic_vport_state { UNIC_VPORT_STATE_ALIVE, UNIC_VPORT_STATE_PROMISC_CHANGE, UNIC_VPORT_STATE_IP_TBL_CHANGE, + UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, UNIC_VPORT_STATE_IP_QUERYING, }; @@ -163,7 +164,8 @@ struct unic_caps { u16 total_ip_tbl_size; u32 uc_mac_tbl_size; u32 mc_mac_tbl_size; - u32 rsvd0[2]; + u32 vlan_tbl_size; + u32 rsvd0[1]; u16 max_trans_unit; u16 min_trans_unit; u32 vport_buf_size; /* unit: byte */ @@ -208,6 +210,18 @@ struct unic_addr_tbl { struct list_head tmp_ip_list; /* Store temprary ip table */ }; +struct unic_vlan_tbl { + bool cur_vlan_fltr_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct list_head vlan_list; /* Store vlan table */ + spinlock_t vlan_lock; /* protect vlan list */ +}; + +struct unic_vlan_cfg { + struct list_head node; + u16 vlan_id; +}; + struct unic_vport_buf { void *buf; dma_addr_t dma_addr; @@ -216,6 +230,7 @@ struct unic_vport_buf { struct unic_vport { struct unic_dev *back; struct unic_addr_tbl addr_tbl; + struct unic_vlan_tbl vlan_tbl; u8 overflow_promisc_flags; u8 last_promisc_flags; unsigned long state; @@ -335,6 +350,11 @@ static inline bool unic_dev_parallel_serdes_lb_supported(struct unic_dev *unic_d return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PARALLEL_SERDES_LB_B); } +static inline bool unic_dev_cfg_vlan_filter_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_CFG_VLAN_FILTER_B); +} + static inline bool unic_dev_cfg_mac_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_CFG_MAC_B); diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 5cabde8c535a..0fafc8369d20 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -827,6 +827,51 @@ int unic_update_fec_stats(struct unic_dev *unic_dev) return ret; } +int unic_set_vlan_filter_hw(struct unic_dev *unic_dev, bool filter_en) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_vlan_filter_ctrl_cmd req = {0}; + struct ubase_cmd_buf in; + u32 time_out; + int ret; + + req.filter_en = filter_en ? 1 : 0; + + ubase_fill_inout_buf(&in, UBASE_OPC_VLAN_FILTER_CTRL, false, + sizeof(req), &req); + + time_out = unic_cmd_timeout(unic_dev); + ret = ubase_cmd_send_in_ex(unic_dev->comdev.adev, &in, time_out); + if (ret) + dev_err(adev->dev.parent, + "failed to set vlan filter, ret = %d.\n", ret); + + return ret; +} + +int unic_set_port_vlan_hw(struct unic_dev *unic_dev, u16 vlan_id, bool is_add) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_vlan_filter_cfg_cmd req = {0}; + struct ubase_cmd_buf in; + u32 time_out; + int ret; + + req.vlan_id = cpu_to_le16(vlan_id); + req.is_add = is_add ? 1 : 0; + + ubase_fill_inout_buf(&in, UBASE_OPC_VLAN_FILTER_CFG, false, sizeof(req), + &req); + + time_out = unic_cmd_timeout(unic_dev); + ret = ubase_cmd_send_in_ex(unic_dev->comdev.adev, &in, time_out); + if (ret) + dev_err(adev->dev.parent, + "failed to send port vlan command, ret = %d.\n", ret); + + return ret; +} + static void unic_set_rss_tc0_param(struct unic_channels *channels, u16 jfr_cnt, __le16 *jfr_idx) { diff --git a/drivers/net/ub/unic/unic_hw.h b/drivers/net/ub/unic/unic_hw.h index 1e04e08b04f1..68db5cf9ebe7 100644 --- a/drivers/net/ub/unic/unic_hw.h +++ b/drivers/net/ub/unic/unic_hw.h @@ -103,6 +103,8 @@ int unic_query_vport_ctx(struct unic_dev *unic_dev, u16 offset, struct unic_vport_ctx_cmd *resp); int unic_set_fec_mode(struct unic_dev *unic_dev, u32 fec_mode); int unic_update_fec_stats(struct unic_dev *unic_dev); +int unic_set_vlan_filter_hw(struct unic_dev *unic_dev, bool filter_en); +int unic_set_port_vlan_hw(struct unic_dev *unic_dev, u16 vlan_id, bool is_kill); int unic_set_rss_tc_mode(struct unic_dev *unic_dev, u8 tc_vaild); int unic_query_rss_cfg(struct unic_dev *unic_dev, struct unic_cfg_rss_cmd *resp); diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index a9d8240d59b0..135a4745500b 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -27,6 +27,7 @@ #include "unic_rx.h" #include "unic_tx.h" #include "unic_txrx.h" +#include "unic_vlan.h" #include "unic_netdev.h" #include "unic_rack_ip.h" diff --git a/drivers/net/ub/unic/unic_vlan.c b/drivers/net/ub/unic/unic_vlan.c new file mode 100644 index 000000000000..4a2e10f32272 --- /dev/null +++ b/drivers/net/ub/unic/unic_vlan.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#define dev_fmt(fmt) "unic: (pid %d) " fmt, current->pid + +#include "unic_hw.h" +#include "unic_vlan.h" + +static int unic_init_vlan_filter(struct unic_dev *unic_dev) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + + vlan_tbl->cur_vlan_fltr_en = false; + + INIT_LIST_HEAD(&vlan_tbl->vlan_list); + + return unic_set_vlan_filter_hw(unic_dev, false); +} + +int unic_init_vlan_config(struct unic_dev *unic_dev) +{ + int ret; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return 0; + + ret = unic_init_vlan_filter(unic_dev); + if (ret) + return ret; + + return unic_set_vlan_table(unic_dev, htons(ETH_P_8021Q), 0, true); +} + +void unic_uninit_vlan_config(struct unic_dev *unic_dev) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + struct list_head tmp_del_list; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return; + + INIT_LIST_HEAD(&tmp_del_list); + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) + list_move_tail(&vlan->node, &tmp_del_list); + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &tmp_del_list, node) { + (void)unic_set_port_vlan_hw(unic_dev, vlan->vlan_id, false); + list_del(&vlan->node); + kfree(vlan); + } +} + +static bool unic_need_update_port_vlan(struct unic_dev *unic_dev, u16 vlan_id, + bool is_add) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + bool exist = false; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) + if (vlan->vlan_id == vlan_id) { + exist = true; + break; + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + /* vlan 0 may be added twice when 8021q module is enabled */ + if (is_add && !vlan_id && exist) + return false; + + if (is_add && exist) { + dev_warn(unic_dev->comdev.adev->dev.parent, + "failed to add port vlan(%u), which is already in hw.\n", + vlan_id); + return false; + } + + if (!is_add && !exist) { + dev_warn(unic_dev->comdev.adev->dev.parent, + "failed to delete port vlan(%u), which is not in hw.\n", + vlan_id); + return false; + } + + return true; +} + +static int unic_set_port_vlan(struct unic_dev *unic_dev, u16 vlan_id, + bool is_add) +{ + if (!is_add && !vlan_id) + return 0; + + if (!unic_need_update_port_vlan(unic_dev, vlan_id, is_add)) + return 0; + + return unic_set_port_vlan_hw(unic_dev, vlan_id, is_add); +} + +static void unic_add_vlan_table(struct unic_dev *unic_dev, u16 vlan_id) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) { + if (vlan->vlan_id == vlan_id) + goto out; + } + + vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); + if (!vlan) + goto out; + + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vlan_tbl->vlan_list); + +out: + spin_unlock_bh(&vlan_tbl->vlan_lock); +} + +static void unic_rm_vlan_table(struct unic_dev *unic_dev, u16 vlan_id) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + list_del(&vlan->node); + kfree(vlan); + break; + } + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); +} + +static void unic_set_vlan_filter_change(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + + if (unic_dev_cfg_vlan_filter_supported(unic_dev)) + set_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, &vport->state); +} + +int unic_set_vlan_table(struct unic_dev *unic_dev, __be16 proto, u16 vlan_id, + bool is_add) +{ +#define UNIC_MAX_VLAN_ID 4095 + + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + int ret; + + if (vlan_id > UNIC_MAX_VLAN_ID) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + if (is_add && test_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap)) { + clear_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + } else if (test_bit(UNIC_STATE_RESETTING, &unic_dev->state) && + !is_add) { + set_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + spin_unlock_bh(&vlan_tbl->vlan_lock); + return -EBUSY; + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + ret = unic_set_port_vlan(unic_dev, vlan_id, is_add); + if (!ret) { + if (is_add) + unic_add_vlan_table(unic_dev, vlan_id); + else if (!is_add && vlan_id != 0) + unic_rm_vlan_table(unic_dev, vlan_id); + } else if (!is_add) { + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack. + */ + spin_lock_bh(&vlan_tbl->vlan_lock); + set_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + spin_unlock_bh(&vlan_tbl->vlan_lock); + } + + unic_set_vlan_filter_change(unic_dev); + + return ret; +} + +static bool unic_need_enable_vlan_filter(struct unic_dev *unic_dev, bool enable) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + + if ((unic_dev->netdev_flags & UNIC_USER_UPE) || !enable) + return false; + + spin_lock_bh(&vlan_tbl->vlan_lock); + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) { + if (vlan->vlan_id != 0) { + spin_unlock_bh(&vlan_tbl->vlan_lock); + return true; + } + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + return false; +} + +int unic_set_vlan_filter(struct unic_dev *unic_dev, bool enable) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + bool need_en; + int ret = 0; + + need_en = unic_need_enable_vlan_filter(unic_dev, enable); + if (need_en == vlan_tbl->cur_vlan_fltr_en) + return ret; + + ret = unic_set_vlan_filter_hw(unic_dev, need_en); + if (ret) + return ret; + + vlan_tbl->cur_vlan_fltr_en = need_en; + + return ret; +} + +static void unic_sync_vlan_filter_state(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + int ret; + + if (!test_and_clear_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, + &vport->state)) + return; + + ret = unic_set_vlan_filter(unic_dev, true); + if (ret) { + unic_err(unic_dev, + "failed to sync vlan filter state, ret = %d.\n", ret); + set_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, &vport->state); + } +} + +static u16 unic_find_del_fail_vlan(struct unic_dev *unic_dev) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + u16 vlan_id; + + spin_lock_bh(&vlan_tbl->vlan_lock); + vlan_id = find_first_bit(vlan_tbl->vlan_del_fail_bmap, VLAN_N_VID); + spin_unlock_bh(&vlan_tbl->vlan_lock); + + return vlan_id; +} + +void unic_sync_vlan_filter(struct unic_dev *unic_dev) +{ +#define UNIC_MAX_SYNC_COUNT 60 + + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + int ret, sync_cnt = 0; + u16 vlan_id; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return; + + vlan_id = unic_find_del_fail_vlan(unic_dev); + while (vlan_id != VLAN_N_VID) { + ret = unic_set_port_vlan(unic_dev, vlan_id, false); + if (ret) + break; + + clear_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + unic_rm_vlan_table(unic_dev, vlan_id); + unic_set_vlan_filter_change(unic_dev); + + if (++sync_cnt >= UNIC_MAX_SYNC_COUNT) + break; + + vlan_id = unic_find_del_fail_vlan(unic_dev); + } + + unic_sync_vlan_filter_state(unic_dev); +} diff --git a/drivers/net/ub/unic/unic_vlan.h b/drivers/net/ub/unic/unic_vlan.h new file mode 100644 index 000000000000..3842bfd05db2 --- /dev/null +++ b/drivers/net/ub/unic/unic_vlan.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_VLAN_H__ +#define __UNIC_VLAN_H__ + +#include "unic_dev.h" + +int unic_init_vlan_config(struct unic_dev *unic_dev); +void unic_uninit_vlan_config(struct unic_dev *unic_dev); +int unic_set_vlan_table(struct unic_dev *unic_dev, __be16 proto, u16 vlan_id, + bool is_add); +int unic_set_vlan_filter(struct unic_dev *unic_dev, bool enable); +void unic_sync_vlan_filter(struct unic_dev *unic_dev); + +#endif -- Gitee From c89f4cf3e156a785e73d9d44d797de3537bdd026 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 15:59:37 +0800 Subject: [PATCH 198/243] net: unic: Integrate valn filtering features with network device operations. commit e93b2bde002080864f23cd30e1ab809e3eb8872e openEuler This patch integrates VLAN filtering features with the network device operations in the UNIC driver. It adds support for adding and removing VLANs, as well as enabling/disabling VLAN filters through the network device operations. The patch also ensures that VLAN filtering capabilities are properly synchronized with the device's features. Key changes include: 1. Adding `UNIC_vlan_rx_add_vid` and `UNIC_vlan_rx_kill_vid` functions to handle VLAN addition and removal. 2. Implementing `UNIC_set_features` to enable or disable VLAN filtering based on the device's features. 3. Integrating the new functions with the `net_device_ops` structure. This integration ensures that VLAN filtering operations are seamlessly handled by the network stack, providing a smooth and efficient VLAN management experience for UNIC driver. Signed-off-by: Guangwei Zhang Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_netdev.c | 41 +++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index 135a4745500b..bc19e45bee15 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -605,6 +605,44 @@ static u16 unic_select_queue(struct net_device *netdev, struct sk_buff *skb, return netdev_pick_tx(netdev, skb, sb_dev); } +static int unic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vlan_id) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + + return unic_set_vlan_table(unic_dev, proto, vlan_id, true); +} + +static int unic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vlan_id) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + + return unic_set_vlan_table(unic_dev, proto, vlan_id, false); +} + +static int unic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct unic_dev *unic_dev = netdev_priv(netdev); + bool enable; + int ret; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return 0; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + ret = unic_set_vlan_filter(unic_dev, enable); + if (ret) + return ret; + } + + return 0; +} + static const struct net_device_ops unic_netdev_ops = { .ndo_get_stats64 = unic_get_stats64, .ndo_start_xmit = unic_start_xmit, @@ -614,6 +652,9 @@ static const struct net_device_ops unic_netdev_ops = { .ndo_stop = unic_net_stop, .ndo_set_rx_mode = unic_set_rx_mode, .ndo_select_queue = unic_select_queue, + .ndo_vlan_rx_add_vid = unic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = unic_vlan_rx_kill_vid, + .ndo_set_features = unic_set_features, }; void unic_set_netdev_ops(struct net_device *netdev) -- Gitee From 03dbaf759ed0fffa063deafd58e2232d24614e00 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 16:44:32 +0800 Subject: [PATCH 199/243] net: unic: Add debugfs support for dumping vlan table commit 4dbb83c60d3b9e9d7a703176b8fe786f7ad82e1d openEuler This patch adds support for dumping the VLAN table through DebugFS in the UNIC driver. It introduces new data structures and functions to query and display the VLAN table entries. The patch also modifies the initialization and cleanup routines to handle VLAN configurations and integrates with the existing network device operations. Key changes include: 1. Adding new structures to manage VLAN configurations and states. 2. Implementing functions to query and display the VLAN table entries. 3. Integrating the new functions with the DebugFS framework. 4. Adding support for VLAN filtering in the UNIC hardware interface. This enhancement provides a convenient way to view and debug VLAN configurations in UNIC driver, allowing for better network segmentation and traffic management. Signed-off-by: Guangwei Zhang Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 12 +++ drivers/net/ub/unic/debugfs/unic_debugfs.h | 1 + .../net/ub/unic/debugfs/unic_entry_debugfs.c | 91 +++++++++++++++++++ .../net/ub/unic/debugfs/unic_entry_debugfs.h | 21 +++++ drivers/net/ub/unic/unic_hw.c | 1 + 5 files changed, 126 insertions(+) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index d07d94140829..3701a7df575b 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -129,6 +129,7 @@ static void unic_dbg_dump_caps(struct unic_dev *unic_dev, struct seq_file *s) u32 caps_info; } unic_caps_info[] = { {"\ttotal_ip_tbl_size: %hu\n", unic_caps->total_ip_tbl_size}, + {"\tvlan_tbl_size: %u\n", unic_caps->vlan_tbl_size}, {"\tmax_trans_unit: %hu\n", unic_caps->max_trans_unit}, {"\tmin_trans_unit: %hu\n", unic_caps->min_trans_unit}, {"\tvport_buf_size: %u\n", unic_caps->vport_buf_size}, @@ -332,6 +333,10 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { .name = "qos", .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, .support = unic_dbg_dentry_support, + }, { + .name = "vlan_tbl", + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, }, { .name = "mac_tbl", .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, @@ -423,6 +428,13 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_mac_tbl_list_hw, + }, { + .name = "vlan_tbl_list_hw", + .dentry_index = UNIC_DBG_DENTRY_VLAN, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_vlan_tbl_list_hw, }, { .name = "page_pool_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 1feba9d78f83..869be0526ae9 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -17,6 +17,7 @@ enum unic_dbg_dentry_type { UNIC_DBG_DENTRY_CONTEXT, UNIC_DBG_DENTRY_VPORT, UNIC_DBG_DENTRY_QOS, + UNIC_DBG_DENTRY_VLAN, UNIC_DBG_DENTRY_MAC, /* must be the last entry. */ UNIC_DBG_DENTRY_ROOT diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c index 8313fd24bba7..c6373196bd4e 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c @@ -195,3 +195,94 @@ int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data) return ret; } + +static int unic_query_vlan_list_hw(struct unic_dev *unic_dev, u32 *idx, + struct list_head *list, bool *complete) +{ + struct unic_dbg_vlan_entry *vlan_entry; + struct unic_dbg_vlan_node *vlan_node; + struct unic_dbg_vlan_head req = {0}; + struct unic_dbg_vlan_head *resp; + struct ubase_cmd_buf in, out; + u16 vlan_cnt, i; + int ret; + + resp = kzalloc(UNIC_QUERY_VLAN_LEN, GFP_ATOMIC); + if (!resp) + return -ENOMEM; + + vlan_entry = resp->vlan_entry; + req.idx = cpu_to_le16((u16)*idx); + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_VLAN_TBL, true, + sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_VLAN_TBL, true, + UNIC_QUERY_VLAN_LEN, resp); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret && ret != -EPERM) { + unic_err(unic_dev, "failed to query vlan hw tbl, ret = %d.\n", + ret); + goto err_out; + } + + vlan_cnt = le16_to_cpu(resp->vlan_cnt); + if (vlan_cnt > UNIC_DBG_VLAN_NUM) { + ret = -EINVAL; + unic_err(unic_dev, "invalid vlan_cnt(%u).\n", vlan_cnt); + goto err_out; + } + + for (i = 0; i < vlan_cnt; i++) { + vlan_node = kzalloc(sizeof(*vlan_node), GFP_ATOMIC); + if (!vlan_node) { + ret = -ENOMEM; + goto err_out; + } + vlan_node->ue_id = le16_to_cpu(vlan_entry[i].ue_id); + vlan_node->vlan_id = le16_to_cpu(vlan_entry[i].vlan_id); + list_add_tail(&vlan_node->node, list); + } + + *idx = le16_to_cpu(resp->idx); + +err_out: + kfree(resp); + + return ret; +} + +int unic_dbg_dump_vlan_tbl_list_hw(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_dbg_vlan_node *vlan_node, *tmp_node; + struct list_head list; + int ret, cnt = 0; + u32 size; + + ret = unic_dbg_check_dev_state(unic_dev); + if (ret) + return ret; + + INIT_LIST_HEAD(&list); + size = unic_dev->caps.vlan_tbl_size; + ret = unic_common_query_addr_list(unic_dev, size, UNIC_DBG_VLAN_NUM, + &list, unic_query_vlan_list_hw); + if (ret) + goto release_list; + + seq_puts(s, "No UE_ID VLAN_ID\n"); + + list_for_each_entry(vlan_node, &list, node) { + seq_printf(s, "%-7d", cnt++); + seq_printf(s, "%-13u", vlan_node->ue_id); + seq_printf(s, "%-12u\n", vlan_node->vlan_id); + } + +release_list: + list_for_each_entry_safe(vlan_node, tmp_node, &list, node) { + list_del(&vlan_node->node); + kfree(vlan_node); + } + + return ret; +} diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h index 3db8e90eaa72..d4d25c41d42b 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h @@ -17,8 +17,11 @@ #define UNIC_BITMAP_LEN 8 #define UNIC_DBG_MAC_NUM 16 +#define UNIC_DBG_VLAN_NUM 250 #define UNIC_QUERY_MAC_LEN (sizeof(struct unic_dbg_mac_head) + \ sizeof(struct unic_dbg_mac_entry) * UNIC_DBG_MAC_NUM) +#define UNIC_QUERY_VLAN_LEN (sizeof(struct unic_dbg_vlan_head) + \ + sizeof(struct unic_dbg_vlan_entry) * UNIC_DBG_VLAN_NUM) struct unic_dbg_mac_entry { u8 mac_addr[ETH_ALEN]; @@ -32,6 +35,23 @@ struct unic_dbg_mac_head { struct unic_dbg_mac_entry mac_entry[]; }; +struct unic_dbg_vlan_entry { + __le16 ue_id; + __le16 vlan_id; +}; + +struct unic_dbg_vlan_head { + __le16 idx; + __le16 vlan_cnt; + struct unic_dbg_vlan_entry vlan_entry[]; +}; + +struct unic_dbg_vlan_node { + struct list_head node; + u16 ue_id; + u16 vlan_id; +}; + struct unic_dbg_comm_addr_node { struct list_head node; u16 ue_id; @@ -52,6 +72,7 @@ struct unic_dbg_comm_addr_node { int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data); int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data); +int unic_dbg_dump_vlan_tbl_list_hw(struct seq_file *s, void *data); int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data); #endif /* _UNIC_ENTRY_DEBUGFS_H */ diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 0fafc8369d20..40fa7f50560d 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -527,6 +527,7 @@ static void unic_parse_dev_caps(struct unic_dev *unic_dev, caps->total_ip_tbl_size = le16_to_cpu(resp->total_ip_tbl_size); caps->uc_mac_tbl_size = le32_to_cpu(resp->uc_mac_tbl_size); caps->mc_mac_tbl_size = le32_to_cpu(resp->mc_mac_tbl_size); + caps->vlan_tbl_size = le32_to_cpu(resp->vlan_tbl_size); caps->max_trans_unit = le16_to_cpu(resp->max_trans_unit); caps->min_trans_unit = le16_to_cpu(resp->min_trans_unit); caps->vport_buf_size = le16_to_cpu(resp->vport_buf_size) * KB; -- Gitee From aa8f4fcd4142a2f58df4786c11a0fbac2d3eb773 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 26 Nov 2025 11:03:36 +0800 Subject: [PATCH 200/243] net: unic: Supports MAC filter, and allows users to add and delete MAC commit 8173414e9f4ebb206d5efcc43202c2e65af5391f openEuler This patch adds the following functionality: 1.Currently, the UNIC driver uses the random MAC address mode. The fixed MAC address of the device is not obtained. In this submission, the fixed MAC address is obtained during initialization. Only the MUE can obtain the fixed MAC address. In other cases, the obtained MAC address is all 0s and a random MAC address is generated in the driver. 2.This commit adds the ability to modify UNICast MAC and configure UNICast/multicast MAC addresses. Implements the .ndo_set_MAC_address and .ndo_set_rx_mode interfaces. Signed-off-by: jianqiang Li Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/Makefile | 2 +- drivers/net/ub/unic/unic.h | 8 +- drivers/net/ub/unic/unic_cmd.h | 14 + drivers/net/ub/unic/unic_comm_addr.c | 1 + drivers/net/ub/unic/unic_comm_addr.h | 16 + drivers/net/ub/unic/unic_dev.c | 28 +- drivers/net/ub/unic/unic_dev.h | 5 + drivers/net/ub/unic/unic_event.c | 7 + drivers/net/ub/unic/unic_hw.c | 9 + drivers/net/ub/unic/unic_hw.h | 3 + drivers/net/ub/unic/unic_mac.c | 599 +++++++++++++++++++++++++++ drivers/net/ub/unic/unic_mac.h | 27 ++ drivers/net/ub/unic/unic_netdev.c | 90 +++- drivers/net/ub/unic/unic_reset.c | 4 + drivers/ub/ubase/ubase_dev.c | 53 +++ drivers/ub/ubase/ubase_dev.h | 2 + include/ub/ubase/ubase_comm_dev.h | 4 + 17 files changed, 860 insertions(+), 12 deletions(-) create mode 100644 drivers/net/ub/unic/unic_mac.c create mode 100644 drivers/net/ub/unic/unic_mac.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 24a19961a5a3..3d8d175c2984 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -9,6 +9,6 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o -unic-objs += unic_lb.o unic_vlan.o +unic-objs += unic_lb.o unic_vlan.o unic_mac.o unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o debugfs/unic_entry_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/unic.h b/drivers/net/ub/unic/unic.h index af11cefb6577..f2aef47451bf 100644 --- a/drivers/net/ub/unic/unic.h +++ b/drivers/net/ub/unic/unic.h @@ -47,10 +47,14 @@ enum { #define UNIC_USER_BPE BIT(2) /* broadcast promisc enabled by user */ #define UNIC_OVERFLOW_MGP BIT(3) /* mulitcast guid overflow */ #define UNIC_OVERFLOW_IPP BIT(4) /* unicast ip overflow */ +#define UNIC_OVERFLOW_UP_MAC BIT(5) /* unicast mac overflow */ +#define UNIC_OVERFLOW_MP_MAC BIT(6) /* multicast mac overflow */ #define UNIC_UPE (UNIC_USER_UPE | \ - UNIC_OVERFLOW_IPP) + UNIC_OVERFLOW_IPP | \ + UNIC_OVERFLOW_UP_MAC) #define UNIC_MPE (UNIC_USER_MPE | \ - UNIC_OVERFLOW_MGP) + UNIC_OVERFLOW_MGP | \ + UNIC_OVERFLOW_MP_MAC) #define UNIC_SUPPORT_APP_LB BIT(0) #define UNIC_SUPPORT_SERIAL_SERDES_LB BIT(1) diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index 5d3a180f8f3b..4161deeaf2f1 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -152,6 +152,20 @@ struct unic_query_flush_status_resp { u8 rsv[23]; }; +struct unic_query_mac_addr_resp { + u8 mac[ETH_ALEN]; + u8 rsv[18]; +}; + +struct unic_mac_tbl_entry_cmd { + u8 resp_code; + u8 mac_type; + u8 is_pfc; + u8 rsvd0; + u8 mac_addr[ETH_ALEN]; + u8 rsvd1[14]; +}; + struct unic_vlan_filter_cfg_cmd { u16 vlan_id; u8 is_add; diff --git a/drivers/net/ub/unic/unic_comm_addr.c b/drivers/net/ub/unic/unic_comm_addr.c index cc2822453614..9edfd9c351ae 100644 --- a/drivers/net/ub/unic/unic_comm_addr.c +++ b/drivers/net/ub/unic/unic_comm_addr.c @@ -180,6 +180,7 @@ bool unic_comm_sync_addr_table(struct unic_vport *vport, memcpy(new_node->unic_addr, addr_node->unic_addr, UNIC_ADDR_LEN); new_node->state = addr_node->state; + new_node->is_pfc = addr_node->is_pfc; new_node->node_mask = addr_node->node_mask; list_add_tail(&new_node->node, &tmp_add_list); break; diff --git a/drivers/net/ub/unic/unic_comm_addr.h b/drivers/net/ub/unic/unic_comm_addr.h index ae390e4a7f07..fd142d51b21c 100644 --- a/drivers/net/ub/unic/unic_comm_addr.h +++ b/drivers/net/ub/unic/unic_comm_addr.h @@ -22,16 +22,32 @@ enum UNIC_COMM_ADDR_STATE { #define UNIC_IPV4_PREFIX 0xffff0000 #define UNIC_ADDR_LEN 16 + struct unic_comm_addr_node { struct list_head node; enum UNIC_COMM_ADDR_STATE state; union { u8 unic_addr[UNIC_ADDR_LEN]; struct in6_addr ip_addr; + u8 mac_addr[ETH_ALEN]; }; + u8 is_pfc; u16 node_mask; }; +#define UNIC_FORMAT_MAC_LEN 18 +#define UNIC_FORMAT_MAC_OFFSET_0 0 +#define UNIC_FORMAT_MAC_OFFSET_4 4 +#define UNIC_FORMAT_MAC_OFFSET_5 5 +static inline void unic_comm_format_mac_addr(char *format_mac, const u8 *mac) +{ + (void)snprintf(format_mac, UNIC_FORMAT_MAC_LEN, + "%02x:**:**:**:%02x:%02x", + mac[UNIC_FORMAT_MAC_OFFSET_0], + mac[UNIC_FORMAT_MAC_OFFSET_4], + mac[UNIC_FORMAT_MAC_OFFSET_5]); +} + static inline bool unic_comm_addr_equal(const u8 *addr1, const u8 *addr2, u16 mask1, u16 mask2) { diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index 9e083873a119..23da49fa6777 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -24,6 +24,7 @@ #include "unic_guid.h" #include "unic_hw.h" #include "unic_qos_hw.h" +#include "unic_mac.h" #include "unic_netdev.h" #include "unic_rack_ip.h" #include "unic_vlan.h" @@ -619,6 +620,12 @@ static void unic_uninit_mac(struct unic_dev *unic_dev) mutex_destroy(&record->lock); } +static void unic_uninit_dev_addr(struct unic_dev *unic_dev) +{ + if (unic_dev_eth_mac_supported(unic_dev)) + unic_uninit_mac_addr(unic_dev); +} + int unic_set_mtu(struct unic_dev *unic_dev, int new_mtu) { u16 max_frame_size; @@ -664,6 +671,10 @@ static void unic_periodic_service_task(struct unic_dev *unic_dev) unic_link_status_update(unic_dev); unic_update_port_info(unic_dev); unic_sync_rack_ip_table(unic_dev); + + if (unic_dev_eth_mac_supported(unic_dev)) + unic_sync_mac_table(unic_dev); + unic_sync_promisc_mode(unic_dev); unic_sync_vlan_filter(unic_dev); @@ -690,6 +701,12 @@ static void unic_init_vport_info(struct unic_dev *unic_dev) spin_lock_init(&unic_dev->vport.addr_tbl.tmp_ip_lock); INIT_LIST_HEAD(&unic_dev->vport.addr_tbl.ip_list); spin_lock_init(&unic_dev->vport.addr_tbl.ip_list_lock); + + if (unic_dev_eth_mac_supported(unic_dev)) { + INIT_LIST_HEAD(&unic_dev->vport.addr_tbl.uc_mac_list); + INIT_LIST_HEAD(&unic_dev->vport.addr_tbl.mc_mac_list); + spin_lock_init(&unic_dev->vport.addr_tbl.mac_list_lock); + } } static int unic_alloc_vport_buf(struct unic_dev *unic_dev) @@ -785,8 +802,10 @@ static void unic_uninit_vport(struct unic_dev *unic_dev) { unic_uninit_rack_ip_table(unic_dev); - if (unic_dev_eth_mac_supported(unic_dev)) + if (unic_dev_eth_mac_supported(unic_dev)) { + unic_uninit_mac_table(unic_dev); unic_uninit_vlan_config(unic_dev); + } unic_uninit_vport_buf(unic_dev); } @@ -808,7 +827,7 @@ static int unic_init_dev_addr(struct unic_dev *unic_dev) if (unic_dev_ubl_supported(unic_dev)) return unic_init_guid(unic_dev); - return 0; + return unic_init_mac_addr(unic_dev); } static int unic_init_netdev_priv(struct net_device *netdev, @@ -848,7 +867,7 @@ static int unic_init_netdev_priv(struct net_device *netdev, ret = unic_init_channels_attr(priv); if (ret) - goto unic_unint_mac; + goto err_uninit_dev_addr; ret = unic_init_channels(priv, priv->channels.num); if (ret) { @@ -862,6 +881,8 @@ static int unic_init_netdev_priv(struct net_device *netdev, err_uninit_channels_attr: unic_uninit_channels_attr(priv); +err_uninit_dev_addr: + unic_uninit_dev_addr(priv); unic_unint_mac: unic_uninit_mac(priv); err_uninit_vport: @@ -878,6 +899,7 @@ static void unic_uninit_netdev_priv(struct net_device *netdev) unic_uninit_channels(priv); unic_uninit_channels_attr(priv); + unic_uninit_dev_addr(priv); unic_uninit_mac(priv); unic_uninit_vport(priv); mutex_destroy(&priv->act_info.mutex); diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 1d79f1924791..2d1830af72f0 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -44,6 +44,7 @@ enum unic_vport_state { UNIC_VPORT_STATE_PROMISC_CHANGE, UNIC_VPORT_STATE_IP_TBL_CHANGE, UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, + UNIC_VPORT_STATE_MAC_TBL_CHANGE, UNIC_VPORT_STATE_IP_QUERYING, }; @@ -208,6 +209,10 @@ struct unic_addr_tbl { spinlock_t tmp_ip_lock; /* protect ip address from controller */ struct list_head tmp_ip_list; /* Store temprary ip table */ + + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; /* store unicast mac table */ + struct list_head mc_mac_list; /* store multicast mac table */ }; struct unic_vlan_tbl { diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index 1785e0aad7f1..b26dd89c6c0b 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -20,6 +20,7 @@ #include "unic_dcbnl.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_mac.h" #include "unic_netdev.h" #include "unic_qos_hw.h" #include "unic_rack_ip.h" @@ -86,6 +87,9 @@ static void unic_activate_event_process(struct unic_dev *unic_dev) else clear_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &unic_dev->vport.state); + if (unic_dev_eth_mac_supported(unic_dev)) + unic_activate_mac_table(unic_dev); + out: mutex_lock(&act_info->mutex); act_info->deactivate = false; @@ -119,6 +123,9 @@ static void unic_deactivate_event_process(struct unic_dev *unic_dev) act_info->deactivate = true; mutex_unlock(&act_info->mutex); + if (unic_dev_eth_mac_supported(unic_dev)) + unic_deactivate_mac_table(unic_dev); + ret = unic_activate_promisc_mode(unic_dev, false); if (ret) unic_warn(unic_dev, "failed to close promisc, ret = %d.\n", ret); diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 40fa7f50560d..02033c4637d5 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -342,6 +342,10 @@ static void unic_setup_promisc_req(struct unic_promisc_cfg_cmd *req, req->promisc_mc_ind = 1; req->promisc_rx_mc_en = promisc_en->en_mc; + + req->promisc_rx_uc_mac_en = promisc_en->en_uc_mac; + req->promisc_rx_mc_mac_en = promisc_en->en_mc_mac; + req->promisc_rx_bc_en = promisc_en->en_bc; } int unic_get_promisc_mode(struct unic_dev *unic_dev, @@ -372,6 +376,9 @@ int unic_set_promisc_mode(struct unic_dev *unic_dev, u32 time_out; int ret; + if (!unic_dev_ubl_supported(unic_dev)) + promisc_en->en_bc = 1; + unic_setup_promisc_req(&req, promisc_en); ubase_fill_inout_buf(&in, UBASE_OPC_CFG_PROMISC_MODE, false, @@ -390,6 +397,8 @@ void unic_fill_promisc_en(struct unic_promisc_en *promisc_en, u8 flags) { promisc_en->en_uc_ip = !!(flags & UNIC_UPE); promisc_en->en_mc = !!(flags & UNIC_MPE); + promisc_en->en_uc_mac = !!(flags & UNIC_UPE); + promisc_en->en_mc_mac = !!(flags & UNIC_MPE); } int unic_activate_promisc_mode(struct unic_dev *unic_dev, bool activate) diff --git a/drivers/net/ub/unic/unic_hw.h b/drivers/net/ub/unic/unic_hw.h index 68db5cf9ebe7..59ac0ee4fa36 100644 --- a/drivers/net/ub/unic/unic_hw.h +++ b/drivers/net/ub/unic/unic_hw.h @@ -55,6 +55,9 @@ struct unic_promisc_en { u8 en_uc_ip; u8 en_uc_guid; u8 en_mc; + u8 en_uc_mac; + u8 en_mc_mac; + u8 en_bc; }; #define UNIC_RSS_MAX_CNT 10U diff --git a/drivers/net/ub/unic/unic_mac.c b/drivers/net/ub/unic/unic_mac.c new file mode 100644 index 000000000000..e3eb43f366df --- /dev/null +++ b/drivers/net/ub/unic/unic_mac.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei UNIC Linux driver + * Copyright (c) 2024-2025 Hisilicon Limited. + * + */ + +#define dev_fmt(fmt) "unic: (pid %d) " fmt, current->pid + +#include +#include + +#include "unic.h" +#include "unic_comm_addr.h" +#include "unic_cmd.h" +#include "unic_mac.h" + +int unic_cfg_mac_address(struct unic_dev *unic_dev, u8 *mac_addr) +{ + struct unic_comm_addr_node *new_node, *old_node; + struct unic_vport *vport = &unic_dev->vport; + u8 *old_mac = unic_dev->hw.mac.mac_addr; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + struct list_head *list; + + list = &vport->addr_tbl.uc_mac_list; + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + new_node = unic_comm_find_addr_node(list, mac_addr, + UNIC_COMM_ADDR_NO_MASK); + if (new_node) { + if (new_node->state == UNIC_COMM_ADDR_TO_DEL) + new_node->state = UNIC_COMM_ADDR_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after reset. + */ + new_node->is_pfc = 1; + list_move(&new_node->node, list); + } else { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) { + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + return -ENOMEM; + } + + new_node->state = UNIC_COMM_ADDR_TO_ADD; + new_node->is_pfc = 1; + ether_addr_copy(new_node->mac_addr, mac_addr); + list_add_tail(&new_node->node, list); + } + + ether_addr_copy(unic_addr, old_mac); + old_node = unic_comm_find_addr_node(list, unic_addr, + UNIC_COMM_ADDR_NO_MASK); + if (old_node) { + if (old_node->state == UNIC_COMM_ADDR_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = UNIC_COMM_ADDR_TO_DEL; + old_node->is_pfc = 0; + } + } + + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + ether_addr_copy(unic_dev->hw.mac.mac_addr, mac_addr); + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + + return 0; +} + +static int unic_add_mac_addr_common(struct unic_vport *vport, u8 *mac_addr, + enum unic_mac_addr_type mac_type, + u8 is_pfc) +{ + struct unic_mac_promisc { + const char *type_str; + u8 promisc_mode; + } promisc[] = { + [UNIC_MAC_ADDR_UC] = {"unicast", UNIC_OVERFLOW_UP_MAC}, + [UNIC_MAC_ADDR_MC] = {"multicast", UNIC_OVERFLOW_MP_MAC}, + }; + + struct auxiliary_device *adev = vport->back->comdev.adev; + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + struct unic_mac_tbl_entry_cmd resp = {0}; + struct unic_mac_tbl_entry_cmd req = {0}; + u8 format_mac[UNIC_FORMAT_MAC_LEN]; + struct ubase_cmd_buf in, out; + int ret; + + req.mac_type = mac_type; + req.is_pfc = is_pfc; + ether_addr_copy(req.mac_addr, mac_addr); + unic_comm_format_mac_addr(format_mac, mac_addr); + ubase_fill_inout_buf(&in, UBASE_OPC_ADD_MAC_TBL, false, sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_ADD_MAC_TBL, true, sizeof(resp), &resp); + ret = ubase_cmd_send_inout(adev, &in, &out); + ret = ret ? ret : -resp.resp_code; + if (!ret) { + return 0; + } else if (ret == -EEXIST && mac_type == UNIC_MAC_ADDR_UC) { + unic_info(unic_dev, "mac addr(%s) exists.\n", format_mac); + return -EEXIST; + } else if (ret != -ENOSPC) { + unic_err(unic_dev, + "failed to add mac addr(%s), ret = %d.\n", format_mac, + ret); + return ret; + } + + if (!(vport->overflow_promisc_flags & promisc[mac_type].promisc_mode)) + unic_warn(unic_dev, "%s mac table is full.\n", + promisc[mac_type].type_str); + + return ret; +} + +static int unic_del_mac_addr_common(struct unic_vport *vport, u8 *mac_addr, + enum unic_mac_addr_type mac_type, + u8 is_pfc) +{ + struct auxiliary_device *adev = vport->back->comdev.adev; + struct unic_mac_tbl_entry_cmd resp = {0}; + struct unic_mac_tbl_entry_cmd req = {0}; + u8 format_mac[UNIC_FORMAT_MAC_LEN]; + struct ubase_cmd_buf in, out; + u32 time_out; + int ret; + + req.mac_type = mac_type; + req.is_pfc = is_pfc; + ether_addr_copy(req.mac_addr, mac_addr); + ubase_fill_inout_buf(&in, UBASE_OPC_DEL_MAC_TBL, false, sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_DEL_MAC_TBL, true, sizeof(resp), &resp); + time_out = unic_cmd_timeout(vport->back); + ret = ubase_cmd_send_inout_ex(adev, &in, &out, time_out); + ret = ret ? ret : -resp.resp_code; + if (ret) { + unic_comm_format_mac_addr(format_mac, mac_addr); + dev_err(adev->dev.parent, "failed to rm mac addr(%s), ret = %d.\n", + format_mac, ret); + } + + return ret; +} + +static void unic_sync_mac_list(struct unic_vport *vport, struct list_head *list, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unic_add_mac_addr_common(vport, mac_node->mac_addr, mac_type, + mac_node->is_pfc); + if (!ret) { + mac_node->state = UNIC_COMM_ADDR_ACTIVE; + } else { + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + if ((mac_type == UNIC_MAC_ADDR_UC && ret != -EEXIST) || + (mac_type == UNIC_MAC_ADDR_MC && ret != -ENOSPC)) + break; + } + } +} + +static void unic_unsync_mac_list(struct unic_vport *vport, + struct list_head *list, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unic_del_mac_addr_common(vport, mac_node->mac_addr, mac_type, + mac_node->is_pfc); + if (!ret) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + break; + } + } +} + +static void unic_sync_uc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_sync_mac_list(vport, list, UNIC_MAC_ADDR_UC); +} + +static void unic_sync_mc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_sync_mac_list(vport, list, UNIC_MAC_ADDR_MC); +} + +static void unic_unsync_uc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_unsync_mac_list(vport, list, UNIC_MAC_ADDR_UC); +} + +static void unic_unsync_mc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_unsync_mac_list(vport, list, UNIC_MAC_ADDR_MC); +} + +static void unic_sync_mac_table_common(struct unic_vport *vport, + enum unic_mac_addr_type mac_type) +{ + void (*unsync)(struct unic_vport *vport, struct list_head *list); + void (*sync)(struct unic_vport *vport, struct list_head *list); + struct list_head *mac_list; + bool all_added; + + if (mac_type == UNIC_MAC_ADDR_UC) { + mac_list = &vport->addr_tbl.uc_mac_list; + sync = unic_sync_uc_mac_list; + unsync = unic_unsync_uc_mac_list; + } else { + mac_list = &vport->addr_tbl.mc_mac_list; + sync = unic_sync_mc_mac_list; + unsync = unic_unsync_mc_mac_list; + } + + all_added = unic_comm_sync_addr_table(vport, mac_list, + &vport->addr_tbl.mac_list_lock, + sync, unsync); + if (mac_type == UNIC_MAC_ADDR_UC) { + if (all_added) + vport->overflow_promisc_flags &= ~UNIC_OVERFLOW_UP_MAC; + else + vport->overflow_promisc_flags |= UNIC_OVERFLOW_UP_MAC; + } else { + if (all_added) + vport->overflow_promisc_flags &= ~UNIC_OVERFLOW_MP_MAC; + else + vport->overflow_promisc_flags |= UNIC_OVERFLOW_MP_MAC; + } +} + +void unic_sync_mac_table(struct unic_dev *unic_dev) +{ + struct unic_act_info *act_info = &unic_dev->act_info; + struct unic_vport *vport = &unic_dev->vport; + + if (!mutex_trylock(&act_info->mutex)) + return; + + if (act_info->deactivate) + goto out; + + if (!test_and_clear_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + goto out; + + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_UC); + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_MC); + +out: + mutex_unlock(&act_info->mutex); +} + +static int unic_update_mac_list(struct net_device *netdev, + enum UNIC_COMM_ADDR_STATE state, + enum unic_mac_addr_type mac_type, + const u8 *mac_addr) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_vport *vport = &unic_dev->vport; + char format_mac[UNIC_FORMAT_MAC_LEN]; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + struct list_head *list; + bool valid; + int ret; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + if (mac_type == UNIC_MAC_ADDR_UC) { + list = &vport->addr_tbl.uc_mac_list; + valid = is_valid_ether_addr(mac_addr); + } else { + list = &vport->addr_tbl.mc_mac_list; + valid = is_multicast_ether_addr(mac_addr); + } + + unic_comm_format_mac_addr(format_mac, mac_addr); + if (!valid) { + unic_err(unic_dev, "failed to %s %s mac addr(%s).\n", + state == UNIC_COMM_ADDR_TO_ADD ? "add" : "del", + mac_type == UNIC_MAC_ADDR_UC ? "uc" : "mc", format_mac); + return -EINVAL; + } + + ether_addr_copy(unic_addr, mac_addr); + ret = unic_comm_update_addr_list(list, &vport->addr_tbl.mac_list_lock, + state, unic_addr); + if (ret) { + unic_err(unic_dev, + "failed to update mac addr(%s). mac_type = %s.\n", + format_mac, mac_type == UNIC_MAC_ADDR_UC ? "uc" : "mc"); + return ret; + } + + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return ret; +} + +int unic_add_uc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_ADD, + UNIC_MAC_ADDR_UC, mac_addr); +} + +int unic_del_uc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + if (ether_addr_equal(mac_addr, netdev->dev_addr)) + return 0; + + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_DEL, + UNIC_MAC_ADDR_UC, mac_addr); +} + +int unic_add_mc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_ADD, + UNIC_MAC_ADDR_MC, mac_addr); +} + +int unic_del_mc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_DEL, + UNIC_MAC_ADDR_MC, mac_addr); +} + +static int unic_get_mac_addr(struct unic_dev *unic_dev, u8 *p) +{ + struct unic_query_mac_addr_resp resp = {0}; + struct ubase_cmd_buf in, out; + int ret; + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_MAC, true, 0, NULL); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_MAC, false, ETH_ALEN, + &resp); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to get mac address, ret = %d.\n", ret); + return ret; + } + + ether_addr_copy(p, resp.mac); + + return 0; +} + +int unic_init_mac_addr(struct unic_dev *unic_dev) +{ + struct net_device *netdev = unic_dev->comdev.netdev; + char format_mac[UNIC_FORMAT_MAC_LEN]; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + int ret; + + ret = unic_get_mac_addr(unic_dev, unic_addr); + if (ret) + return ret; + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(unic_addr)) { + eth_hw_addr_random(netdev); + ether_addr_copy(unic_addr, netdev->dev_addr); + unic_comm_format_mac_addr(format_mac, unic_addr); + dev_warn(unic_dev->comdev.adev->dev.parent, + "using random MAC address %s.\n", format_mac); + } else if (!ether_addr_equal(netdev->dev_addr, unic_addr)) { + dev_addr_set(netdev, unic_addr); + ether_addr_copy(netdev->perm_addr, unic_addr); + } else { + return 0; + } + + if (!unic_dev_cfg_mac_supported(unic_dev)) { + ether_addr_copy(unic_dev->hw.mac.mac_addr, unic_addr); + return 0; + } + + ret = unic_cfg_mac_address(unic_dev, unic_addr); + if (ret) { + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to cfg MAC address, ret = %d!\n", ret); + return ret; + } + + ubase_set_dev_mac(unic_dev->comdev.adev, netdev->dev_addr, + netdev->addr_len); + + return 0; +} + +void unic_uninit_mac_addr(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + struct unic_comm_addr_node *mac_node; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + ether_addr_copy(unic_addr, unic_dev->comdev.netdev->dev_addr); + mac_node = unic_comm_find_addr_node(&vport->addr_tbl.uc_mac_list, + unic_addr, UNIC_COMM_ADDR_NO_MASK); + if (mac_node) { + if (mac_node->state == UNIC_COMM_ADDR_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = UNIC_COMM_ADDR_TO_DEL; + } + } + + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); +} + +static void unic_uninit_mac_table_common(struct unic_dev *unic_dev, + enum unic_mac_addr_type mac_type) +{ + struct unic_vport *vport = &unic_dev->vport; + struct unic_comm_addr_node *mac_node, *tmp; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == UNIC_MAC_ADDR_UC) ? + &vport->addr_tbl.uc_mac_list : &vport->addr_tbl.mc_mac_list; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case UNIC_COMM_ADDR_TO_DEL: + case UNIC_COMM_ADDR_ACTIVE: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case UNIC_COMM_ADDR_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + default: + break; + } + } + + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + + unic_unsync_mac_list(vport, &tmp_del_list, mac_type); + + if (!list_empty(&tmp_del_list)) + dev_warn(unic_dev->comdev.adev->dev.parent, + "uninit mac list not completely.\n"); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +void unic_uninit_mac_table(struct unic_dev *unic_dev) +{ + unic_uninit_mac_table_common(unic_dev, UNIC_MAC_ADDR_UC); + unic_uninit_mac_table_common(unic_dev, UNIC_MAC_ADDR_MC); +} + +static void unic_deactivate_unsync_mac_list(struct unic_vport *vport, + struct list_head *list, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unic_del_mac_addr_common(vport, mac_node->mac_addr, + mac_type, mac_node->is_pfc); + if (ret) + break; + + if (mac_node->state == UNIC_COMM_ADDR_ACTIVE) { + mac_node->state = UNIC_COMM_ADDR_TO_ADD; + } else if (mac_node->state == UNIC_COMM_ADDR_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void unic_deactivate_update_mac_state(struct unic_comm_addr_node *mac_node, + enum UNIC_COMM_ADDR_STATE state) +{ + switch (state) { + case UNIC_COMM_ADDR_TO_ADD: + if (mac_node->state == UNIC_COMM_ADDR_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == UNIC_COMM_ADDR_ACTIVE) { + mac_node->state = UNIC_COMM_ADDR_TO_ADD; + } + break; + case UNIC_COMM_ADDR_TO_DEL: + mac_node->state = UNIC_COMM_ADDR_ACTIVE; + break; + default: + break; + } +} + +static void unic_deactivate_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct unic_comm_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = unic_comm_find_addr_node(mac_list, + mac_node->unic_addr, + UNIC_COMM_ADDR_NO_MASK); + if (new_node) { + unic_deactivate_update_mac_state(new_node, + mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_move_tail(&mac_node->node, mac_list); + } + } +} + +static void unic_deactivate_sync_mac_table(struct unic_vport *vport, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp, *new_node; + struct list_head *mac_list, tmp_list; + + INIT_LIST_HEAD(&tmp_list); + + if (mac_type == UNIC_MAC_ADDR_UC) + mac_list = &vport->addr_tbl.uc_mac_list; + else + mac_list = &vport->addr_tbl.mc_mac_list; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, mac_list, node) { + switch (mac_node->state) { + case UNIC_COMM_ADDR_TO_DEL: + list_move_tail(&mac_node->node, &tmp_list); + break; + case UNIC_COMM_ADDR_ACTIVE: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + memcpy(new_node->unic_addr, mac_node->unic_addr, + UNIC_ADDR_LEN); + new_node->state = mac_node->state; + new_node->is_pfc = mac_node->is_pfc; + list_add_tail(&new_node->node, &tmp_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + unic_deactivate_unsync_mac_list(vport, &tmp_list, mac_type); + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + unic_deactivate_sync_from_del_list(&tmp_list, mac_list); + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); +} + +void unic_deactivate_mac_table(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + + unic_deactivate_sync_mac_table(vport, UNIC_MAC_ADDR_UC); + unic_deactivate_sync_mac_table(vport, UNIC_MAC_ADDR_MC); + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); +} + +void unic_activate_mac_table(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + + clear_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_UC); + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_MC); +} diff --git a/drivers/net/ub/unic/unic_mac.h b/drivers/net/ub/unic/unic_mac.h new file mode 100644 index 000000000000..3c45d4ab3687 --- /dev/null +++ b/drivers/net/ub/unic/unic_mac.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei UNIC Linux driver + * Copyright (c) 2024-2025 Hisilicon Limited. + * + */ + +#ifndef UNIC_MAC_H +#define UNIC_MAC_H + +enum unic_mac_addr_type { + UNIC_MAC_ADDR_UC, + UNIC_MAC_ADDR_MC, +}; + +int unic_cfg_mac_address(struct unic_dev *unic_dev, u8 *mac_addr); +int unic_add_uc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_del_uc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_add_mc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_del_mc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_init_mac_addr(struct unic_dev *unic_dev); +void unic_uninit_mac_addr(struct unic_dev *unic_dev); +void unic_sync_mac_table(struct unic_dev *unic_dev); +void unic_uninit_mac_table(struct unic_dev *unic_dev); +void unic_deactivate_mac_table(struct unic_dev *unic_dev); +void unic_activate_mac_table(struct unic_dev *unic_dev); + +#endif diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index bc19e45bee15..53556f62c0b2 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -24,6 +24,7 @@ #include "unic_dev.h" #include "unic_event.h" #include "unic_hw.h" +#include "unic_mac.h" #include "unic_rx.h" #include "unic_tx.h" #include "unic_txrx.h" @@ -527,6 +528,42 @@ static int unic_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +static int unic_set_mac_address(struct net_device *netdev, void *addr) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + char format_mac[UNIC_FORMAT_MAC_LEN]; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + struct sockaddr *mac_addr = addr; + int ret; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) { + unic_err(unic_dev, "invalid user mac.\n"); + return -EADDRNOTAVAIL; + } + + unic_comm_format_mac_addr(format_mac, mac_addr->sa_data); + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + unic_info(unic_dev, "already using mac(%s).\n", format_mac); + return 0; + } + + ether_addr_copy(unic_addr, mac_addr->sa_data); + ret = unic_cfg_mac_address(unic_dev, unic_addr); + if (ret) { + unic_err(unic_dev, "failed to set mac address, ret = %d.\n", ret); + return ret; + } + + dev_addr_set(netdev, unic_addr); + ubase_set_dev_mac(unic_dev->comdev.adev, netdev->dev_addr, + netdev->addr_len); + + return ret; +} + static u8 unic_get_netdev_flags(struct net_device *netdev) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -535,6 +572,8 @@ static u8 unic_get_netdev_flags(struct net_device *netdev) if (netdev->flags & IFF_PROMISC) { if (unic_dev_ubl_supported(unic_dev)) flags = UNIC_USER_UPE | UNIC_USER_MPE; + else + flags = UNIC_USER_UPE | UNIC_USER_MPE | UNIC_USER_BPE; } else if (netdev->flags & IFF_ALLMULTI) { flags = UNIC_USER_MPE; } @@ -546,9 +585,19 @@ static void unic_set_rx_mode(struct net_device *netdev) { struct unic_dev *unic_dev = netdev_priv(netdev); struct unic_vport *vport = &unic_dev->vport; + u8 promisc_changed; u8 new_flags; new_flags = unic_get_netdev_flags(netdev); + if (unic_dev_eth_mac_supported(unic_dev)) { + __dev_uc_sync(netdev, unic_add_uc_mac, unic_del_uc_mac); + __dev_mc_sync(netdev, unic_add_mc_mac, unic_del_mc_mac); + promisc_changed = unic_dev->netdev_flags ^ new_flags; + if (promisc_changed & UNIC_USER_UPE) + set_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, + &vport->state); + } + unic_dev->netdev_flags = new_flags; set_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &vport->state); @@ -650,6 +699,7 @@ static const struct net_device_ops unic_netdev_ops = { .ndo_change_mtu = unic_change_mtu, .ndo_open = unic_net_open, .ndo_stop = unic_net_stop, + .ndo_set_mac_address = unic_set_mac_address, .ndo_set_rx_mode = unic_set_rx_mode, .ndo_select_queue = unic_select_queue, .ndo_vlan_rx_add_vid = unic_vlan_rx_add_vid, @@ -667,15 +717,31 @@ static bool unic_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &unic_netdev_ops; } -static int unic_ipaddr_event(struct notifier_block *nb, unsigned long event, - struct sockaddr *sa, struct net_device *ndev, - u16 ip_mask) +static int unic_eth_ip_event(struct sockaddr *sa, struct net_device *ndev, + u16 ip_mask, unsigned long event) { - struct unic_dev *unic_dev; - int ret; + enum UNIC_COMM_ADDR_STATE state; + int ret = NOTIFY_OK; - if (ndev->type != ARPHRD_UB) + switch (event) { + case NETDEV_UP: + state = UNIC_COMM_ADDR_TO_ADD; + break; + case NETDEV_DOWN: + state = UNIC_COMM_ADDR_TO_DEL; + break; + default: return NOTIFY_DONE; + } + + return ret; +} + +static int unic_ub_ip_event(struct sockaddr *sa, struct net_device *ndev, + u16 ip_mask, unsigned long event) +{ + struct unic_dev *unic_dev; + int ret; if (!unic_port_dev_check(ndev)) return NOTIFY_DONE; @@ -700,6 +766,18 @@ static int unic_ipaddr_event(struct notifier_block *nb, unsigned long event, return NOTIFY_OK; } +static int unic_ipaddr_event(struct notifier_block *nb, unsigned long event, + struct sockaddr *sa, struct net_device *ndev, + u16 ip_mask) +{ + if (ndev->type == ARPHRD_ETHER) + return unic_eth_ip_event(sa, ndev, ip_mask, event); + else if (ndev->type == ARPHRD_UB) + return unic_ub_ip_event(sa, ndev, ip_mask, event); + else + return NOTIFY_DONE; +} + static int unic_inetaddr_event(struct notifier_block *nb, unsigned long event, void *ptr) { diff --git a/drivers/net/ub/unic/unic_reset.c b/drivers/net/ub/unic/unic_reset.c index ff1239cc5131..4035fa9cf15c 100644 --- a/drivers/net/ub/unic/unic_reset.c +++ b/drivers/net/ub/unic/unic_reset.c @@ -9,6 +9,7 @@ #include "unic_cmd.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_mac.h" #include "unic_netdev.h" #include "unic_rack_ip.h" #include "unic_reset.h" @@ -42,6 +43,9 @@ static void unic_reset_down(struct auxiliary_device *adev) * to prevent that concurrent deactivate event ubable to close promisc * when resetting */ + if (unic_dev_eth_mac_supported(priv)) + unic_deactivate_mac_table(priv); + ret = unic_activate_promisc_mode(priv, false); if (ret) unic_warn(priv, "failed to close promisc, ret = %d.\n", ret); diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index f3f9ab6c7e96..92d81a658576 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -4,6 +4,7 @@ * */ +#include #include #include @@ -1634,3 +1635,55 @@ int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid) return __ubase_get_bus_eid(udev, eid); } EXPORT_SYMBOL(ubase_get_bus_eid); + +/** + * ubase_set_dev_mac() - Record the MAC address of the device + * @adev: auxiliary device + * @dev_addr: MAC address of the device + * @addr_len: MAC address length + * + * This function is used to record the MAC address of the device, and store the + * MAC address in the ubase_dev structure. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ +int ubase_set_dev_mac(struct auxiliary_device *adev, const u8 *dev_addr, + u8 addr_len) +{ + struct ubase_dev *udev; + + if (!adev || !dev_addr || addr_len < ETH_ALEN) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + ether_addr_copy(udev->dev_mac, dev_addr); + + return 0; +} +EXPORT_SYMBOL(ubase_set_dev_mac); + +/** + * ubase_get_dev_mac() - Obtain the device MAC address and output it. + * @adev: auxiliary device + * @dev_addr: Output parameter, save the obtained MAC address array. + * @addr_len: Length of the array for storing MAC addresses + * + * This function is used to get the device MAC address from ubase_dev. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ +int ubase_get_dev_mac(struct auxiliary_device *adev, u8 *dev_addr, u8 addr_len) +{ + struct ubase_dev *udev; + + if (!adev || !dev_addr || addr_len < ETH_ALEN) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + ether_addr_copy(dev_addr, udev->dev_mac); + + return 0; +} +EXPORT_SYMBOL(ubase_get_dev_mac); diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index 45605409de6b..ee7c5f605e65 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -291,6 +292,7 @@ struct ubase_dev { struct ubase_act_ctx act_ctx; struct ubase_arq_msg_ring arq; struct ubase_prealloc_mem_info pmem_info; + u8 dev_mac[ETH_ALEN]; }; #define UBASE_ERR_MSG_LEN 128 diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 8dfbb2dc91bd..c2fbd65268b9 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -387,4 +387,8 @@ int ubase_activate_dev(struct auxiliary_device *adev); int ubase_deactivate_dev(struct auxiliary_device *adev); int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid); +int ubase_get_dev_mac(struct auxiliary_device *adev, u8 *dev_addr, u8 addr_len); +int ubase_set_dev_mac(struct auxiliary_device *adev, const u8 *dev_addr, + u8 addr_len); + #endif -- Gitee From 71de769cac2379d20c903d95042e46094e631d83 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 20:00:18 +0800 Subject: [PATCH 201/243] net: unic: Add debugfs support for dumping MAC tables. commit cf1333dc9411fd5d45ce6f3ae26e0a0232fe496d openEuler This patch adds support for dumping UNICast and multicast MAC tables through DebugFS in the UNIC driver. It introduces new functions to query and display the MAC table entries, as well as the MAC table sizes. The patch also integrates these new functions with the existing DebugFS framework. Key changes include: 1. Adding new functions to dump UNICast and multicast MAC tables. 2. Implementing a function to display MAC table sizes. 3. Integrating the new functions with the DebugFS framework. This enhancement provides a convenient way to view and debug MAC configurations in UNIC driver, allowing for better network address management and troubleshooting. Signed-off-by: jianqiang Li Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 72 ++++++++++++------- .../net/ub/unic/debugfs/unic_entry_debugfs.c | 60 +++++++++++++++- .../net/ub/unic/debugfs/unic_entry_debugfs.h | 3 + drivers/net/ub/unic/unic_dev.h | 2 +- drivers/net/ub/unic/unic_hw.c | 1 + 5 files changed, 112 insertions(+), 26 deletions(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 3701a7df575b..e66d6fe37fcd 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -129,7 +129,10 @@ static void unic_dbg_dump_caps(struct unic_dev *unic_dev, struct seq_file *s) u32 caps_info; } unic_caps_info[] = { {"\ttotal_ip_tbl_size: %hu\n", unic_caps->total_ip_tbl_size}, + {"\tuc_mac_tbl_size: %u\n", unic_caps->uc_mac_tbl_size}, + {"\tmc_mac_tbl_size: %u\n", unic_caps->mc_mac_tbl_size}, {"\tvlan_tbl_size: %u\n", unic_caps->vlan_tbl_size}, + {"\tmng_tbl_size: %u\n", unic_caps->mng_tbl_size}, {"\tmax_trans_unit: %hu\n", unic_caps->max_trans_unit}, {"\tmin_trans_unit: %hu\n", unic_caps->min_trans_unit}, {"\tvport_buf_size: %u\n", unic_caps->vport_buf_size}, @@ -319,19 +322,19 @@ static bool unic_dbg_dentry_support(struct device *dev, u32 property) static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { { .name = "ip_tbl", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "context", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "vport", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "qos", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "vlan_tbl", @@ -345,7 +348,7 @@ static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { /* keep unic at the bottom and add new directory above */ { .name = "unic", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, }; @@ -365,38 +368,59 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_ip_tbl_list, + }, { + .name = "uc_mac_tbl_list", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_uc_mac_tbl_list, + }, { + .name = "mc_mac_tbl_list", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_mc_mac_tbl_list, + }, { + .name = "mac_tbl_spec", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_mac_tbl_spec, }, { .name = "jfs_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfs_ctx_sw, }, { .name = "jfr_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfr_ctx_sw, }, { .name = "sq_jfc_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_sq_jfc_ctx_sw, }, { .name = "rq_jfc_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rq_jfc_ctx_sw, }, { .name = "dev_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dev_info, @@ -417,7 +441,7 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { }, { .name = "caps_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_caps_info, @@ -438,91 +462,91 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { }, { .name = "page_pool_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_page_pool_info, }, { .name = "jfs_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfs_context_hw, }, { .name = "jfr_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfr_context_hw, }, { .name = "sq_jfc_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_sq_jfc_context_hw, }, { .name = "rq_jfc_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rq_jfc_context_hw, }, { .name = "vl_queue", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_vl_queue, }, { .name = "rss_cfg_hw", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rss_cfg_hw, }, { .name = "promisc_cfg_hw", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_promisc_cfg_hw, }, { .name = "dscp_vl_map", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dscp_vl_map, }, { .name = "prio_vl_map", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_prio_vl_map, }, { .name = "dscp_prio", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dscp_prio, }, { .name = "link_status_record", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_query_link_record, }, { .name = "clear_link_status_record", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_clear_link_record, diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c index c6373196bd4e..aecae53cce9a 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c @@ -69,6 +69,64 @@ static int unic_common_query_addr_list(struct unic_dev *unic_dev, u32 total_size return ret == -EPERM ? -EOPNOTSUPP : ret; } +static int unic_dbg_dump_mac_tbl_list(struct seq_file *s, void *data, + bool is_unicast) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_vport *vport = &unic_dev->vport; + struct unic_comm_addr_node *mac_node, *tmp; + struct list_head *list; + int i = 0; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + seq_printf(s, "%s mac_list:\n", is_unicast ? "unicast" : "multicast"); + seq_printf(s, "No. %-28sSTATE\n", "MAC_ADDR"); + + list = is_unicast ? + &vport->addr_tbl.uc_mac_list : &vport->addr_tbl.mc_mac_list; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + list_for_each_entry_safe(mac_node, tmp, list, node) { + seq_printf(s, "%-8d", i++); + seq_printf(s, "%-28pM", mac_node->mac_addr); + seq_printf(s, "%s\n", unic_entry_state_str[mac_node->state]); + } + + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + return 0; +} + +int unic_dbg_dump_uc_mac_tbl_list(struct seq_file *s, void *data) +{ + return unic_dbg_dump_mac_tbl_list(s, data, true); +} + +int unic_dbg_dump_mc_mac_tbl_list(struct seq_file *s, void *data) +{ + return unic_dbg_dump_mac_tbl_list(s, data, false); +} + +int unic_dbg_dump_mac_tbl_spec(struct seq_file *s, void *data) +{ + u32 mac_tbl_size, priv_uc_mac_tbl_size, priv_mc_mac_tbl_size; + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + priv_mc_mac_tbl_size = unic_dev->caps.mc_mac_tbl_size; + priv_uc_mac_tbl_size = unic_dev->caps.uc_mac_tbl_size; + mac_tbl_size = priv_mc_mac_tbl_size + priv_uc_mac_tbl_size; + + seq_printf(s, "mac_tbl_size\t: %u\n", mac_tbl_size); + seq_printf(s, "priv_uc_mac_tbl_size\t: %u\n", priv_uc_mac_tbl_size); + seq_printf(s, "priv_mc_mac_tbl_size\t: %u\n", priv_mc_mac_tbl_size); + + return 0; +} + int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) { struct unic_dev *unic_dev = dev_get_drvdata(s->private); @@ -83,7 +141,7 @@ int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) list = &ip_tbl->ip_list; spin_lock_bh(&ip_tbl->ip_list_lock); list_for_each_entry(ip_node, list, node) { - seq_printf(s, "%-4d", i++); + seq_printf(s, "%-4u", i++); seq_printf(s, "%-43pI6c", &ip_node->ip_addr.s6_addr); seq_printf(s, "%-9s", unic_entry_state_str[ip_node->state]); seq_printf(s, "%-3u", ip_node->node_mask); diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h index d4d25c41d42b..706180f70078 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h @@ -71,6 +71,9 @@ struct unic_dbg_comm_addr_node { }; int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data); +int unic_dbg_dump_mac_tbl_spec(struct seq_file *s, void *data); +int unic_dbg_dump_mc_mac_tbl_list(struct seq_file *s, void *data); +int unic_dbg_dump_uc_mac_tbl_list(struct seq_file *s, void *data); int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data); int unic_dbg_dump_vlan_tbl_list_hw(struct seq_file *s, void *data); int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data); diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 2d1830af72f0..aa74795d3534 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -166,7 +166,7 @@ struct unic_caps { u32 uc_mac_tbl_size; u32 mc_mac_tbl_size; u32 vlan_tbl_size; - u32 rsvd0[1]; + u32 mng_tbl_size; u16 max_trans_unit; u16 min_trans_unit; u32 vport_buf_size; /* unit: byte */ diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 02033c4637d5..b50cf20eed2f 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -537,6 +537,7 @@ static void unic_parse_dev_caps(struct unic_dev *unic_dev, caps->uc_mac_tbl_size = le32_to_cpu(resp->uc_mac_tbl_size); caps->mc_mac_tbl_size = le32_to_cpu(resp->mc_mac_tbl_size); caps->vlan_tbl_size = le32_to_cpu(resp->vlan_tbl_size); + caps->mng_tbl_size = le32_to_cpu(resp->mng_tbl_size); caps->max_trans_unit = le16_to_cpu(resp->max_trans_unit); caps->min_trans_unit = le16_to_cpu(resp->min_trans_unit); caps->vport_buf_size = le16_to_cpu(resp->vport_buf_size) * KB; -- Gitee From 85414220874c10a77c08e969af6dd24c228ab3e9 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 20:43:02 +0800 Subject: [PATCH 202/243] net: unic: Add Priority Flow Control (PFC) support commit 50cb1a91b23660bad64730306b1fb0d8fd46cd80 openEuler This patch adds support for Priority Flow Control (PFC) in the UNIC driver. It introduces new functions to get and set PFC parameters, as well as display PFC statistics. The patch also integrates these new functions with the existing DCB and ethtool operations. Key changes include: 1. Adding new functions to get and set PFC parameters. 2. Implementing a function to display PFC statistics. 3. Integrating the new functions with the DCB and ethtool operations. This enhancement provides a comprehensive way to manage PFC in UNIC driver, allowing for better flow control and network performance. Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 7 ++ .../net/ub/unic/debugfs/unic_qos_debugfs.c | 44 +++++++ .../net/ub/unic/debugfs/unic_qos_debugfs.h | 1 + drivers/net/ub/unic/unic_cmd.h | 14 +++ drivers/net/ub/unic/unic_dcbnl.c | 107 +++++++++++++++++- drivers/net/ub/unic/unic_dcbnl.h | 22 ++++ drivers/net/ub/unic/unic_dev.c | 17 +++ drivers/net/ub/unic/unic_dev.h | 11 ++ drivers/net/ub/unic/unic_ethtool.h | 5 + drivers/net/ub/unic/unic_qos_hw.c | 52 +++++++++ drivers/net/ub/unic/unic_qos_hw.h | 3 + drivers/net/ub/unic/unic_stats.c | 104 +++++++++++++++++ drivers/net/ub/unic/unic_stats.h | 13 +++ drivers/ub/ubase/ubase_stats.c | 35 ++++++ drivers/ub/ubase/ubase_stats.h | 2 + include/ub/ubase/ubase_comm_stats.h | 2 + 16 files changed, 438 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index e66d6fe37fcd..75fd5b11632c 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -529,6 +529,13 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_prio_vl_map, + }, { + .name = "pfc_info", + .dentry_index = UNIC_DBG_DENTRY_QOS, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_pfc_param, }, { .name = "dscp_prio", .dentry_index = UNIC_DBG_DENTRY_QOS, diff --git a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c index 85b3288f0bec..8b56fb4b7c8d 100644 --- a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c @@ -146,3 +146,47 @@ int unic_dbg_dump_dscp_prio(struct seq_file *s, void *data) return 0; } + +int unic_dbg_dump_pfc_param(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct ubase_eth_mac_stats eth_stats = {0}; + u64 stats_tx[IEEE_8021QAZ_MAX_TCS]; + u64 stats_rx[IEEE_8021QAZ_MAX_TCS]; + u8 pfc_cap, pfc_en; + int i, ret; + + if (!unic_dev_pfc_supported(unic_dev)) + return -EOPNOTSUPP; + + if (__unic_resetting(unic_dev)) + return -EBUSY; + + ret = ubase_get_eth_port_stats(unic_dev->comdev.adev, ð_stats); + if (ret) + return ret; + + pfc_en = unic_dev->channels.vl.pfc_info.pfc_en; + pfc_cap = UNIC_MAX_PRIO_NUM; + + seq_printf(s, "mac_pfc_capacity: %d\n", pfc_cap); + seq_puts(s, "mac_pfc_enable: "); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + seq_printf(s, "%d", + (pfc_en >> (IEEE_8021QAZ_MAX_TCS - i - 1)) & 1); + + seq_puts(s, "\n"); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + stats_tx[i] = unic_get_pfc_tx_pkts(ð_stats, i); + seq_printf(s, "mac_tx_pri%d_pfc_pkts: %llu\n", i, stats_tx[i]); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + stats_rx[i] = unic_get_pfc_rx_pkts(ð_stats, i); + seq_printf(s, "mac_rx_pri%d_pfc_pkts: %llu\n", i, stats_rx[i]); + } + + return 0; +} diff --git a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h index f55616ab1617..91f136e99cff 100644 --- a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h @@ -11,5 +11,6 @@ int unic_dbg_dump_vl_queue(struct seq_file *s, void *data); int unic_dbg_dump_dscp_vl_map(struct seq_file *s, void *data); int unic_dbg_dump_prio_vl_map(struct seq_file *s, void *data); int unic_dbg_dump_dscp_prio(struct seq_file *s, void *data); +int unic_dbg_dump_pfc_param(struct seq_file *s, void *data); #endif diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index 4161deeaf2f1..92e051fc603c 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -213,6 +213,20 @@ struct unic_lb_en_cfg { u8 rsv[21]; }; +struct unic_cfg_mac_pause_cmd { + __le32 tx_en; + __le32 rx_en; + u8 rsv[16]; +}; + +struct unic_cfg_pfc_pause_cmd { + u8 tx_enable : 1; + u8 rx_enable : 1; + u8 rsvd0 : 6; + u8 pri_bitmap; + u8 rsv1[22]; +}; + struct unic_query_link_diagnosis_resp { __le32 status_code; u8 rsv[20]; diff --git a/drivers/net/ub/unic/unic_dcbnl.c b/drivers/net/ub/unic/unic_dcbnl.c index 9f85044aeb05..82746e3781f3 100644 --- a/drivers/net/ub/unic/unic_dcbnl.c +++ b/drivers/net/ub/unic/unic_dcbnl.c @@ -252,6 +252,108 @@ static int unic_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) return unic_setets_config(ndev, ets, changed, vl_num); } +static int unic_check_pfc_preconditions(struct net_device *net_dev) +{ + struct unic_dev *unic_dev = netdev_priv(net_dev); + + if (!unic_dev_pfc_supported(unic_dev)) + return -EOPNOTSUPP; + + if (unic_resetting(net_dev)) + return -EBUSY; + + if (!(unic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + return 0; +} + +static int unic_pfc_down(struct unic_dev *unic_dev, struct ieee_pfc *pfc) +{ + struct unic_pfc_info *pfc_info = &unic_dev->channels.vl.pfc_info; + u8 tx_pause, rx_pause; + int ret; + + ret = unic_pfc_pause_cfg(unic_dev, pfc->pfc_en); + if (ret) + return ret; + + pfc_info->fc_mode &= ~(UNIC_FC_PFC_EN); + pfc_info->pfc_en = pfc->pfc_en; + + tx_pause = pfc_info->fc_mode & UNIC_TX_PAUSE_EN ? 1 : 0; + rx_pause = pfc_info->fc_mode & UNIC_RX_PAUSE_EN ? 1 : 0; + + return unic_mac_pause_en_cfg(unic_dev, tx_pause, rx_pause); +} + +static int unic_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct ubase_eth_mac_stats eth_stats = {0}; + int i, ret; + + ret = unic_check_pfc_preconditions(ndev); + if (ret) + return ret; + + ret = ubase_get_eth_port_stats(unic_dev->comdev.adev, ð_stats); + if (ret) + return ret; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_en = unic_dev->channels.vl.pfc_info.pfc_en; + pfc->pfc_cap = UNIC_MAX_PRIO_NUM; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + pfc->requests[i] = unic_get_pfc_tx_pkts(ð_stats, i); + pfc->indications[i] = unic_get_pfc_rx_pkts(ð_stats, i); + } + + return 0; +} + +static int unic_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_pfc_info *pfc_info; + int ret; + + pfc_info = &unic_dev->channels.vl.pfc_info; + + ret = unic_check_pfc_preconditions(ndev); + if (ret) + return ret; + + if (pfc->pfc_en == pfc_info->pfc_en) + return 0; + + if (!pfc->pfc_en) + return unic_pfc_down(unic_dev, pfc); + + if (!(pfc_info->fc_mode & UNIC_FC_PFC_EN)) { + ret = unic_mac_pause_en_cfg(unic_dev, false, false); + if (ret) { + unic_info(unic_dev, "failed to disable pause, ret = %d.\n", + ret); + return ret; + } + } + + ret = unic_pfc_pause_cfg(unic_dev, pfc->pfc_en); + if (ret) { + unic_info(unic_dev, + "failed to set pfc tx rx enable or priority, ret = %d.\n", + ret); + return ret; + } + + pfc_info->fc_mode |= UNIC_FC_PFC_EN; + pfc_info->pfc_en = pfc->pfc_en; + + return ret; +} + static int unic_dscp_prio_check(struct net_device *netdev, struct dcb_app *app) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -481,6 +583,8 @@ static const struct dcbnl_rtnl_ops unic_dcbnl_ops = { .ieee_setets = unic_dcbnl_ieee_setets, .ieee_getmaxrate = unic_ieee_getmaxrate, .ieee_setmaxrate = unic_ieee_setmaxrate, + .ieee_getpfc = unic_dcbnl_ieee_getpfc, + .ieee_setpfc = unic_dcbnl_ieee_setpfc, .ieee_setapp = unic_dcbnl_ieee_setapp, .ieee_delapp = unic_dcbnl_ieee_delapp, .getdcbx = unic_dcbnl_getdcbx, @@ -491,7 +595,8 @@ void unic_set_dcbnl_ops(struct net_device *netdev) { struct unic_dev *unic_dev = netdev_priv(netdev); - if (!unic_dev_ets_supported(unic_dev)) + if (!unic_dev_ets_supported(unic_dev) && + !unic_dev_pfc_supported(unic_dev)) return; netdev->dcbnl_ops = &unic_dcbnl_ops; diff --git a/drivers/net/ub/unic/unic_dcbnl.h b/drivers/net/ub/unic/unic_dcbnl.h index a721fa51d6e4..fc8219ba40f5 100644 --- a/drivers/net/ub/unic/unic_dcbnl.h +++ b/drivers/net/ub/unic/unic_dcbnl.h @@ -8,6 +8,28 @@ #define __UNIC_DCBNL_H__ #include +#include + +#include "unic_stats.h" +#include "unic_ethtool.h" + +static inline u64 unic_get_pfc_tx_pkts(struct ubase_eth_mac_stats *eth_stats, + u32 pri) +{ + u16 offset = UNIC_ETH_MAC_STATS_FIELD_OFF(tx_pri0_pfc_pkts) + + pri * sizeof(eth_stats->tx_pri0_pfc_pkts); + + return UNIC_STATS_READ(eth_stats, offset); +} + +static inline u64 unic_get_pfc_rx_pkts(struct ubase_eth_mac_stats *eth_stats, + u32 pri) +{ + u16 offset = UNIC_ETH_MAC_STATS_FIELD_OFF(rx_pri0_pfc_pkts) + + pri * sizeof(eth_stats->rx_pri0_pfc_pkts); + + return UNIC_STATS_READ(eth_stats, offset); +} #ifdef CONFIG_UB_UNIC_DCB void unic_set_dcbnl_ops(struct net_device *netdev); diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index 23da49fa6777..c0f8ff33193d 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -234,6 +234,19 @@ static int unic_init_vl_maxrate(struct unic_dev *unic_dev) unic_dev->channels.vl.vl_bitmap); } +static int unic_init_pfc(struct unic_dev *unic_dev) +{ + if (!unic_dev_pfc_supported(unic_dev)) + return 0; + + return unic_pfc_pause_cfg(unic_dev, 0); +} + +static int unic_init_fc_mode(struct unic_dev *unic_dev) +{ + return unic_init_pfc(unic_dev); +} + static int unic_init_vl_info(struct unic_dev *unic_dev) { int ret; @@ -609,6 +622,10 @@ static int unic_init_mac(struct unic_dev *unic_dev) return ret; } + ret = unic_init_fc_mode(unic_dev); + if (ret) + return ret; + mutex_init(&record->lock); return 0; } diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index aa74795d3534..86e45314115a 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -126,6 +126,11 @@ struct unic_coal_txrx { struct unic_coalesce rx_coal; }; +struct unic_pfc_info { + u8 fc_mode; + u8 pfc_en; +}; + struct unic_vl { u8 vl_num; u8 dscp_app_cnt; @@ -138,6 +143,7 @@ struct unic_vl { u8 vl_sl[UBASE_MAX_VL_NUM]; u64 vl_maxrate[UBASE_MAX_VL_NUM]; u16 vl_bitmap; + struct unic_pfc_info pfc_info; }; struct unic_channels { @@ -305,6 +311,11 @@ static inline bool unic_dev_eth_mac_supported(struct unic_dev *unic_dev) return ubase_adev_eth_mac_supported(unic_dev->comdev.adev); } +static inline bool unic_dev_pfc_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PFC_B); +} + static inline bool unic_dev_ets_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_ETS_B); diff --git a/drivers/net/ub/unic/unic_ethtool.h b/drivers/net/ub/unic/unic_ethtool.h index 4ba10f0b54b2..0de82b0cb062 100644 --- a/drivers/net/ub/unic/unic_ethtool.h +++ b/drivers/net/ub/unic/unic_ethtool.h @@ -9,9 +9,14 @@ #include #include +#include #define UNIC_TXRX_MIN_DEPTH 64 +#define UNIC_TX_PAUSE_EN BIT(0) +#define UNIC_RX_PAUSE_EN BIT(1) +#define UNIC_FC_PFC_EN BIT(2) + struct unic_reset_type_map { enum ethtool_reset_flags reset_flags; enum ubase_reset_type reset_type; diff --git a/drivers/net/ub/unic/unic_qos_hw.c b/drivers/net/ub/unic/unic_qos_hw.c index bba05964156b..171de80357c3 100644 --- a/drivers/net/ub/unic/unic_qos_hw.c +++ b/drivers/net/ub/unic/unic_qos_hw.c @@ -6,6 +6,9 @@ #define dev_fmt(fmt) "unic: (pid %d) " fmt, current->pid +#include + +#include "unic_cmd.h" #include "unic_hw.h" #include "unic_qos_hw.h" @@ -77,3 +80,52 @@ int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, return ret; } + +int unic_mac_pause_en_cfg(struct unic_dev *unic_dev, u32 tx_pause, u32 rx_pause) +{ + struct unic_cfg_mac_pause_cmd req = {0}; + struct ubase_cmd_buf in; + int ret; + + req.tx_en = cpu_to_le32(tx_pause); + req.rx_en = cpu_to_le32(rx_pause); + + ubase_fill_inout_buf(&in, UBASE_OPC_CFG_MAC_PAUSE_EN, false, sizeof(req), &req); + + ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); + if (ret) + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to config pause on|off, ret = %d.\n", ret); + + return ret; +} + +int unic_pfc_pause_cfg(struct unic_dev *unic_dev, u8 pfc_en) +{ +#define UNIC_PFC_TX_RX_ON 1 +#define UNIC_PFC_TX_RX_OFF 0 + + struct unic_cfg_pfc_pause_cmd req = {0}; + struct ubase_cmd_buf in; + int ret; + + req.pri_bitmap = pfc_en; + + if (pfc_en) { + req.tx_enable = UNIC_PFC_TX_RX_ON; + req.rx_enable = UNIC_PFC_TX_RX_ON; + } else { + req.tx_enable = UNIC_PFC_TX_RX_OFF; + req.rx_enable = UNIC_PFC_TX_RX_OFF; + } + + ubase_fill_inout_buf(&in, UBASE_OPC_CFG_PFC_PAUSE_EN, false, sizeof(req), + &req); + + ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); + if (ret) + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to config pfc enable, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/net/ub/unic/unic_qos_hw.h b/drivers/net/ub/unic/unic_qos_hw.h index 82822cc871b6..67c76d5bfaac 100644 --- a/drivers/net/ub/unic/unic_qos_hw.h +++ b/drivers/net/ub/unic/unic_qos_hw.h @@ -17,5 +17,8 @@ int unic_query_vl_map(struct unic_dev *unic_dev, struct unic_config_vl_map_cmd *resp); int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, u16 vl_bitmap); +int unic_mac_pause_en_cfg(struct unic_dev *unic_dev, u32 tx_pause, + u32 rx_pause); +int unic_pfc_pause_cfg(struct unic_dev *unic_dev, u8 pfc_en); #endif diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index ec929328db37..29248a90675e 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -109,6 +109,110 @@ static const struct unic_stats_desc unic_rq_stats_str[] = { {"csum_complete", UNIC_RQ_STATS_FIELD_OFF(csum_complete)}, }; +static const struct unic_mac_stats_desc unic_eth_stats_str[] = { + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pause_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri0_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri1_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri2_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri3_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri4_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri5_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri6_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri7_pfc_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pause_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri0_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri1_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri2_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri3_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri4_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri5_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri6_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri7_pfc_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_64_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_65_127_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_128_255_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_256_511_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_512_1023_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1024_1518_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1519_2047_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_2048_4095_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_4096_8191_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_8192_9216_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_9217_12287_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_12288_16383_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1519_max_octets_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1519_max_octets_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_oversize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_jabber_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_bad_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_good_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_total_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_total_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_unicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_multicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_broadcast_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_fragment_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_undersize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_undermin_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_mac_ctrl_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_unfilter_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1588_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_err_all_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_from_app_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_from_app_bad_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_64_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_65_127_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_128_255_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_256_511_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_512_1023_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1024_1518_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1519_2047_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_2048_4095_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_4096_8191_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_8192_9216_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_9217_12287_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_12288_16383_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1519_max_octets_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1519_max_octets_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_oversize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_jabber_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_bad_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_good_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_total_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_total_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_unicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_multicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_broadcast_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_fragment_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_undersize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_undermin_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_mac_ctrl_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_unfilter_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_symbol_err_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_fcs_err_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_send_app_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_send_app_bad_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_merge_frame_ass_error_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_merge_frame_ass_ok_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_merge_frame_frag_count), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_ass_error_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_ass_ok_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_frag_count), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_smd_error_pkts), +}; + static int unic_get_dfx_reg_num(struct unic_dev *unic_dev, u32 *reg_num, u32 reg_arr_size) { diff --git a/drivers/net/ub/unic/unic_stats.h b/drivers/net/ub/unic/unic_stats.h index 623b732f3d8e..0aef76abe3b7 100644 --- a/drivers/net/ub/unic/unic_stats.h +++ b/drivers/net/ub/unic/unic_stats.h @@ -12,6 +12,13 @@ #include #include +struct unic_dev; + +#define UNIC_ETH_MAC_STATS_CAP_1 95 + +#define UNIC_ETH_MAC_STATS_FIELD_OFF(fld) offsetof(struct ubase_eth_mac_stats, fld) +#define UNIC_ETH_MAC_STATS_FLD_CAP_1(fld) {#fld, UNIC_ETH_MAC_STATS_CAP_1, \ + UNIC_ETH_MAC_STATS_FIELD_OFF(fld)} #define UNIC_SQ_STATS_FIELD_OFF(fld) (offsetof(struct unic_sq, stats) + \ offsetof(struct unic_sq_stats, fld)) #define UNIC_RQ_STATS_FIELD_OFF(fld) (offsetof(struct unic_rq, stats) + \ @@ -100,6 +107,12 @@ struct unic_stats_desc { u16 offset; }; +struct unic_mac_stats_desc { + char desc[ETH_GSTRING_LEN]; + u32 stats_num; + u16 offset; +}; + int unic_get_regs_len(struct net_device *netdev); void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, void *data); diff --git a/drivers/ub/ubase/ubase_stats.c b/drivers/ub/ubase/ubase_stats.c index 4d6e4678686d..ba0970867501 100644 --- a/drivers/ub/ubase/ubase_stats.c +++ b/drivers/ub/ubase/ubase_stats.c @@ -77,6 +77,41 @@ int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, } EXPORT_SYMBOL(ubase_get_ub_port_stats); +int __ubase_get_eth_port_stats(struct ubase_dev *udev, + struct ubase_eth_mac_stats *data) +{ + struct ubase_eth_mac_stats *eth_stats = &udev->stats.eth_stats; + u32 stats_num = sizeof(*eth_stats) / sizeof(u64); + int ret; + + mutex_lock(&udev->stats.stats_lock); + ret = ubase_update_mac_stats(udev, udev->caps.dev_caps.io_port_logic_id, + (u64 *)eth_stats, stats_num, true); + if (ret) { + mutex_unlock(&udev->stats.stats_lock); + return ret; + } + + memcpy(data, &udev->stats.eth_stats, sizeof(*data)); + mutex_unlock(&udev->stats.stats_lock); + + return 0; +} + +int ubase_get_eth_port_stats(struct auxiliary_device *adev, + struct ubase_eth_mac_stats *data) +{ + struct ubase_dev *udev; + + if (!adev || !data) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + + return __ubase_get_eth_port_stats(udev, data); +} +EXPORT_SYMBOL(ubase_get_eth_port_stats); + void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, int result) { diff --git a/drivers/ub/ubase/ubase_stats.h b/drivers/ub/ubase/ubase_stats.h index a6826dd461c7..6b8c70708177 100644 --- a/drivers/ub/ubase/ubase_stats.h +++ b/drivers/ub/ubase/ubase_stats.h @@ -15,6 +15,8 @@ struct ubase_query_mac_stats_cmd { __le64 stats_val[]; }; +int __ubase_get_eth_port_stats(struct ubase_dev *udev, + struct ubase_eth_mac_stats *data); void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, int result); diff --git a/include/ub/ubase/ubase_comm_stats.h b/include/ub/ubase/ubase_comm_stats.h index 52a766e7bab0..ac0ae25b8631 100644 --- a/include/ub/ubase/ubase_comm_stats.h +++ b/include/ub/ubase/ubase_comm_stats.h @@ -234,6 +234,8 @@ struct ubase_perf_stats_result { int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, struct ubase_ub_dl_stats *data); +int ubase_get_eth_port_stats(struct auxiliary_device *adev, + struct ubase_eth_mac_stats *data); int ubase_perf_stats(struct auxiliary_device *adev, u64 port_bitmap, u32 period, struct ubase_perf_stats_result *data, u32 data_size); -- Gitee From 4a8d92df2dd9a781f979b969d84fc8272058036b Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 17 Nov 2025 22:09:29 +0800 Subject: [PATCH 203/243] net: unic: Add pause frame support. commit 2a2a41b49d2830b3be3b2976cf9bcf60add76527 openEuler This patch adds support for pause frames in the UNIC driver. It introduces new functions to get and set pause parameters, as well as update the pause state. The patch also integrates these new functions with the existing ethtool operations. Key changes include: 1. Adding new functions to get and set pause parameters. 2. Implementing a function to update the pause state. 3. Integrating the new functions with the ethtool operations. This enhancement provides a convenient way to manage pause frames in UNIC driver, allowing for better flow control and network performance. Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_dev.c | 24 +++++++++ drivers/net/ub/unic/unic_dev.h | 5 ++ drivers/net/ub/unic/unic_ethtool.c | 84 ++++++++++++++++++++++++++++++ drivers/net/ub/unic/unic_ethtool.h | 3 ++ drivers/net/ub/unic/unic_hw.c | 23 ++++++++ 5 files changed, 139 insertions(+) diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index c0f8ff33193d..a93e4689a235 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -234,6 +234,24 @@ static int unic_init_vl_maxrate(struct unic_dev *unic_dev) unic_dev->channels.vl.vl_bitmap); } +static int unic_init_pause(struct unic_dev *unic_dev) +{ + struct unic_pfc_info *pfc_info = &unic_dev->channels.vl.pfc_info; + int ret; + + if (!unic_dev_pause_supported(unic_dev)) + return 0; + + ret = unic_mac_pause_en_cfg(unic_dev, UNIC_RX_TX_PAUSE_ON, + UNIC_RX_TX_PAUSE_ON); + if (ret) + return ret; + + pfc_info->fc_mode = UNIC_TX_PAUSE_EN | UNIC_RX_PAUSE_EN; + + return ret; +} + static int unic_init_pfc(struct unic_dev *unic_dev) { if (!unic_dev_pfc_supported(unic_dev)) @@ -244,6 +262,12 @@ static int unic_init_pfc(struct unic_dev *unic_dev) static int unic_init_fc_mode(struct unic_dev *unic_dev) { + int ret; + + ret = unic_init_pause(unic_dev); + if (ret) + return ret; + return unic_init_pfc(unic_dev); } diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 86e45314115a..a261ef9deeb3 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -326,6 +326,11 @@ static inline bool unic_dev_fec_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_B); } +static inline bool unic_dev_pause_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PAUSE_B); +} + static inline bool unic_dev_serial_serdes_lb_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_SERIAL_SERDES_LB_B); diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index a20bb3a05462..886f6d691733 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -188,6 +188,88 @@ static void unic_get_driver_info(struct net_device *netdev, u32_get_bits(fw_version, UBASE_FW_VERSION_BYTE0_MASK)); } +static void unic_update_pause_state(u8 pause_mode, + struct ethtool_pauseparam *eth_pauseparam) +{ + eth_pauseparam->rx_pause = UNIC_RX_TX_PAUSE_OFF; + eth_pauseparam->tx_pause = UNIC_RX_TX_PAUSE_OFF; + + if (pause_mode & UNIC_TX_PAUSE_EN) + eth_pauseparam->tx_pause = UNIC_RX_TX_PAUSE_ON; + + if (pause_mode & UNIC_RX_PAUSE_EN) + eth_pauseparam->rx_pause = UNIC_RX_TX_PAUSE_ON; +} + +static void unic_record_user_pauseparam(struct unic_dev *unic_dev, + struct ethtool_pauseparam *eth_pauseparam) +{ + struct unic_pfc_info *pfc_info = &unic_dev->channels.vl.pfc_info; + u32 rx_en = eth_pauseparam->rx_pause; + u32 tx_en = eth_pauseparam->tx_pause; + + pfc_info->fc_mode = 0; + + if (tx_en) + pfc_info->fc_mode = UNIC_TX_PAUSE_EN; + + if (rx_en) + pfc_info->fc_mode |= UNIC_RX_PAUSE_EN; +} + +static void unic_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *eth_pauseparam) +{ +#define PAUSE_AUTONEG_OFF 0 + + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (!unic_dev_pause_supported(unic_dev)) + return; + + eth_pauseparam->autoneg = PAUSE_AUTONEG_OFF; + + if (unic_dev->channels.vl.pfc_info.fc_mode & UNIC_FC_PFC_EN) { + eth_pauseparam->rx_pause = UNIC_RX_TX_PAUSE_OFF; + eth_pauseparam->tx_pause = UNIC_RX_TX_PAUSE_OFF; + return; + } + + unic_update_pause_state(unic_dev->channels.vl.pfc_info.fc_mode, + eth_pauseparam); +} + +static int unic_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *eth_pauseparam) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + if (!unic_dev_pause_supported(unic_dev)) + return -EOPNOTSUPP; + + if (eth_pauseparam->autoneg) { + unic_warn(unic_dev, + "failed to set pause, set autoneg not supported.\n"); + return -EOPNOTSUPP; + } + + if (unic_dev->channels.vl.pfc_info.fc_mode & UNIC_FC_PFC_EN) { + unic_warn(unic_dev, + "failed to set pause, priority flow control enabled.\n"); + return -EOPNOTSUPP; + } + + ret = unic_mac_pause_en_cfg(unic_dev, eth_pauseparam->tx_pause, + eth_pauseparam->rx_pause); + if (ret) + return ret; + + unic_record_user_pauseparam(unic_dev, eth_pauseparam); + + return ret; +} + static int unic_get_fecparam(struct net_device *ndev, struct ethtool_fecparam *fec) { @@ -509,6 +591,8 @@ static const struct ethtool_ops unic_ethtool_ops = { .get_link_ksettings = unic_get_link_ksettings, .set_link_ksettings = unic_set_link_ksettings, .get_drvinfo = unic_get_driver_info, + .get_pauseparam = unic_get_pauseparam, + .set_pauseparam = unic_set_pauseparam, .get_regs_len = unic_get_regs_len, .get_regs = unic_get_regs, .get_ethtool_stats = unic_get_stats, diff --git a/drivers/net/ub/unic/unic_ethtool.h b/drivers/net/ub/unic/unic_ethtool.h index 0de82b0cb062..fe9e06b3e4e9 100644 --- a/drivers/net/ub/unic/unic_ethtool.h +++ b/drivers/net/ub/unic/unic_ethtool.h @@ -13,6 +13,9 @@ #define UNIC_TXRX_MIN_DEPTH 64 +#define UNIC_RX_TX_PAUSE_ON 1 +#define UNIC_RX_TX_PAUSE_OFF 0 + #define UNIC_TX_PAUSE_EN BIT(0) #define UNIC_RX_PAUSE_EN BIT(1) #define UNIC_FC_PFC_EN BIT(2) diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index b50cf20eed2f..9d43c07569f0 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -295,6 +295,20 @@ static void unic_update_fec_advertising(struct unic_mac *mac) mac->advertising); } +static void unic_update_pause_advertising(struct unic_dev *unic_dev) +{ + u8 fc_mode = unic_dev->channels.vl.pfc_info.fc_mode; + struct unic_mac *mac = &unic_dev->hw.mac; + bool rx_en = false, tx_en = false; + + if (!(fc_mode & UNIC_FC_PFC_EN)) { + rx_en = !!(fc_mode & UNIC_RX_PAUSE_EN); + tx_en = !!(fc_mode & UNIC_TX_PAUSE_EN); + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); +} + static void unic_update_advertising(struct unic_dev *unic_dev) { struct unic_mac *mac = &unic_dev->hw.mac; @@ -303,6 +317,9 @@ static void unic_update_advertising(struct unic_dev *unic_dev) unic_update_speed_advertising(mac); unic_update_fec_advertising(mac); + + if (unic_dev_pause_supported(unic_dev)) + unic_update_pause_advertising(unic_dev); } static void unic_update_port_capability(struct unic_dev *unic_dev) @@ -462,6 +479,9 @@ static void unic_parse_fiber_link_mode(struct unic_dev *unic_dev, unic_set_linkmode_lr(speed_ability, mac->supported); unic_set_linkmode_cr(speed_ability, mac->supported); + if (unic_dev_pause_supported(unic_dev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -473,6 +493,9 @@ static void unic_parse_backplane_link_mode(struct unic_dev *unic_dev, unic_set_linkmode_kr(speed_ability, mac->supported); + if (unic_dev_pause_supported(unic_dev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } -- Gitee From 678a17ed0c7512f4ed21348d88c3740b57f58f74 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 26 Nov 2025 11:07:22 +0800 Subject: [PATCH 204/243] net: unic: Add support for MAC statistics commit ebdbff9694c44e432a83a5646f4bd21f3a629c62 openEuler This patch adds support for MAC statistics in the UNIC driver. It introduces new functions to get and display MAC statistics, as well as integrate these statistics with the existing ethtool operations. The patch also ensures that the new statistics are properly counted and displayed. Key changes include: 1. Adding new functions to get and display MAC statistics. 2. Integrating the new statistics with the ethtool operations. 3. Ensuring proper counting and display of the new statistics. This enhancement provides a comprehensive view of MAC statistics, allowing for better monitoring and troubleshooting of network performance in UNIC driver. Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/debugfs/unic_debugfs.c | 7 +- drivers/net/ub/unic/unic_dev.h | 5 + drivers/net/ub/unic/unic_netdev.c | 2 + drivers/net/ub/unic/unic_stats.c | 139 +++++++++++++++++++++ drivers/ub/ubase/ubase_dev.c | 29 ++++- drivers/ub/ubase/ubase_stats.c | 43 +++++++ drivers/ub/ubase/ubase_stats.h | 1 + include/ub/ubase/ubase_comm_dev.h | 1 + include/ub/ubase/ubase_comm_stats.h | 1 + 9 files changed, 226 insertions(+), 2 deletions(-) diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 75fd5b11632c..093e3de5e657 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -99,16 +99,21 @@ static const struct unic_dbg_cap_bit_info { bool (*get_bit)(struct unic_dev *dev); } unic_cap_bits[] = { {"\tsupport_ubl: %u\n", &unic_dev_ubl_supported}, + {"\tsupport_pfc: %u\n", &unic_dev_pfc_supported}, {"\tsupport_ets: %u\n", &unic_dev_ets_supported}, {"\tsupport_fec: %u\n", &unic_dev_fec_supported}, + {"\tsupport_pause: %u\n", &unic_dev_pause_supported}, + {"\tsupport_eth: %u\n", &unic_dev_eth_supported}, {"\tsupport_tc_speed_limit: %u\n", &unic_dev_tc_speed_limit_supported}, {"\tsupport_tx_csum_offload: %u\n", &unic_dev_tx_csum_offload_supported}, {"\tsupport_rx_csum_offload: %u\n", &unic_dev_rx_csum_offload_supported}, {"\tsupport_app_lb: %u\n", &unic_dev_app_lb_supported}, + {"\tsupport_external_lb: %u\n", &unic_dev_external_lb_supported}, {"\tsupport_serial_serdes_lb: %u\n", &unic_dev_serial_serdes_lb_supported}, - {"\tsupport_parallel_serdes_lb: %u\n", unic_dev_parallel_serdes_lb_supported}, + {"\tsupport_parallel_serdes_lb: %u\n", &unic_dev_parallel_serdes_lb_supported}, {"\tsupport_fec_stats: %u\n", &unic_dev_fec_stats_supported}, {"\tsupport_cfg_mac: %u\n", &unic_dev_cfg_mac_supported}, + {"\tsupport_cfg_vlan_filter: %u\n", &unic_dev_cfg_vlan_filter_supported}, }; static void unic_dbg_dump_caps_bits(struct unic_dev *unic_dev, diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index a261ef9deeb3..c8368040fa92 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -331,6 +331,11 @@ static inline bool unic_dev_pause_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PAUSE_B); } +static inline bool unic_dev_eth_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_ETH_B); +} + static inline bool unic_dev_serial_serdes_lb_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_SERIAL_SERDES_LB_B); diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index 53556f62c0b2..681709ed341a 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -208,6 +208,7 @@ void unic_link_status_change(struct net_device *netdev, bool linkup) netif_tx_wake_all_queues(netdev); netif_carrier_on(netdev); unic_clear_fec_stats(unic_dev); + ubase_clear_eth_port_stats(unic_dev->comdev.adev); } } else { netif_carrier_off(netdev); @@ -333,6 +334,7 @@ int unic_net_open_no_link_change(struct net_device *netdev) netif_tx_wake_all_queues(netdev); netif_carrier_on(netdev); unic_clear_fec_stats(unic_dev); + ubase_clear_eth_port_stats(unic_dev->comdev.adev); } return 0; diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index 29248a90675e..1685ac0639ef 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -6,6 +6,7 @@ #include #include +#include #include #include "unic.h" @@ -451,6 +452,36 @@ static u64 *unic_get_queues_stats(struct unic_dev *unic_dev, return data; } +static void unic_get_mac_stats(struct unic_dev *unic_dev, u64 *data) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + const struct unic_mac_stats_desc *stats_desc; + struct ubase_eth_mac_stats mac_stats = {0}; + u32 stats_num = caps->mac_stats_num; + u32 i, stats_desc_num; + u8 *stats; + int ret; + + if (unic_dev_ubl_supported(unic_dev)) + return; + + stats_desc = unic_eth_stats_str; + stats_desc_num = ARRAY_SIZE(unic_eth_stats_str); + ret = ubase_get_eth_port_stats(adev, &mac_stats); + if (ret) + return; + + stats = (u8 *)&mac_stats; + for (i = 0; i < stats_desc_num; i++) { + if (stats_desc[i].stats_num > stats_num) + continue; + + *data = UNIC_STATS_READ(stats, stats_desc[i].offset); + data++; + } +} + void unic_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { @@ -470,6 +501,7 @@ void unic_get_stats(struct net_device *netdev, p = unic_get_queues_stats(unic_dev, unic_rq_stats_str, ARRAY_SIZE(unic_rq_stats_str), UNIC_QUEUE_TYPE_RQ, p); + unic_get_mac_stats(unic_dev, p); } static u8 *unic_get_strings(u8 *data, const char *prefix, u32 num, @@ -510,20 +542,74 @@ static u8 *unic_get_queues_strings(struct unic_dev *unic_dev, u8 *data) return data; } +static void +unic_get_mac_strings(struct unic_dev *unic_dev, u8 *data, + const struct unic_mac_stats_desc *strs, u32 size) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + u32 stats_num = caps->mac_stats_num; + u32 i; + + if (!ubase_adev_mac_stats_supported(adev)) + return; + + for (i = 0; i < size; i++) { + if (strs[i].stats_num > stats_num) + continue; + + (void)snprintf(data, ETH_GSTRING_LEN, "%s", strs[i].desc); + data += ETH_GSTRING_LEN; + } +} + void unic_get_stats_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct unic_dev *unic_dev = netdev_priv(netdev); + char unic_test_strs[][ETH_GSTRING_LEN] = { + "App Loopback test ", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", + "External Loopback test", + }; u8 *p = data; switch (stringset) { case ETH_SS_STATS: p = unic_get_queues_strings(unic_dev, p); + if (unic_dev_ubl_supported(unic_dev)) + break; + + unic_get_mac_strings(unic_dev, p, unic_eth_stats_str, + ARRAY_SIZE(unic_eth_stats_str)); + break; + case ETH_SS_TEST: + memcpy(data, unic_test_strs, sizeof(unic_test_strs)); break; default: break; } } +static int unic_get_mac_count(struct unic_dev *unic_dev, + const struct unic_mac_stats_desc strs[], u32 size) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + u32 stats_num = caps->mac_stats_num; + int count = 0; + u32 i; + + if (!ubase_adev_mac_stats_supported(adev)) + return 0; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= stats_num) + count++; + + return count; +} + int unic_get_sset_count(struct net_device *netdev, int stringset) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -534,6 +620,11 @@ int unic_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_STATS: count = ARRAY_SIZE(unic_sq_stats_str) * channel_num; count += ARRAY_SIZE(unic_rq_stats_str) * channel_num; + if (unic_dev_ubl_supported(unic_dev)) + break; + + count += unic_get_mac_count(unic_dev, unic_eth_stats_str, + ARRAY_SIZE(unic_eth_stats_str)); break; case ETH_SS_TEST: count = unic_get_selftest_count(unic_dev); @@ -558,6 +649,32 @@ static void unic_get_fec_stats_total(struct unic_dev *unic_dev, u8 stats_flags, fec_stats->corrected_bits.total = total->corr_bits; } +static void unic_get_fec_stats_lanes(struct unic_dev *unic_dev, u8 stats_flags, + struct ethtool_fec_stats *fec_stats) +{ + u8 lane_num = unic_dev->stats.fec_stats.lane_num; + u8 i; + + if (lane_num == 0 || lane_num > UNIC_FEC_STATS_MAX_LANE) { + unic_err(unic_dev, + "fec stats lane number is invalid, lane_num = %u.\n", + lane_num); + return; + } + + for (i = 0; i < lane_num; i++) { + if (stats_flags & UNIC_FEC_CORR_BLOCKS) + fec_stats->corrected_blocks.lanes[i] = + unic_dev->stats.fec_stats.lane[i].corr_blocks; + if (stats_flags & UNIC_FEC_UNCORR_BLOCKS) + fec_stats->uncorrectable_blocks.lanes[i] = + unic_dev->stats.fec_stats.lane[i].uncorr_blocks; + if (stats_flags & UNIC_FEC_CORR_BITS) + fec_stats->corrected_bits.lanes[i] = + unic_dev->stats.fec_stats.lane[i].corr_bits; + } +} + static void unic_get_ubl_fec_stats(struct unic_dev *unic_dev, struct ethtool_fec_stats *fec_stats) { @@ -577,6 +694,26 @@ static void unic_get_ubl_fec_stats(struct unic_dev *unic_dev, } } +static void unic_get_eth_fec_stats(struct unic_dev *unic_dev, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_mode = unic_dev->hw.mac.fec_mode; + u8 stats_flags = 0; + + switch (fec_mode) { + case ETHTOOL_FEC_RS: + stats_flags = UNIC_FEC_CORR_BLOCKS | UNIC_FEC_UNCORR_BLOCKS; + unic_get_fec_stats_total(unic_dev, stats_flags, fec_stats); + unic_get_fec_stats_lanes(unic_dev, UNIC_FEC_CORR_BITS, fec_stats); + break; + default: + unic_err(unic_dev, + "fec stats is not supported in mode(0x%x).\n", + fec_mode); + break; + } +} + void unic_get_fec_stats(struct net_device *ndev, struct ethtool_fec_stats *fec_stats) { @@ -591,4 +728,6 @@ void unic_get_fec_stats(struct net_device *ndev, if (unic_dev_ubl_supported(unic_dev)) unic_get_ubl_fec_stats(unic_dev, fec_stats); + else + unic_get_eth_fec_stats(unic_dev, fec_stats); } diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 92d81a658576..4765610b7f09 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -300,6 +300,21 @@ static void ubase_uninit_aux_devices(struct ubase_dev *udev) mutex_destroy(&udev->priv.uadev_lock); } +static void ubase_update_stats_for_all(struct ubase_dev *udev) +{ + int ret; + + if (ubase_dev_unic_supported(udev) && + ubase_dev_eth_mac_supported(udev) && + ubase_dev_mac_stats_supported(udev)) { + ret = ubase_update_eth_stats_trylock(udev); + if (ret) + ubase_err(udev, + "failed to update stats for eth, ret = %d.\n", + ret); + } +} + static void ubase_cancel_period_service_task(struct ubase_dev *udev) { if (udev->period_service_task.service_task.work.func) @@ -322,7 +337,6 @@ static int ubase_enable_period_service_task(struct ubase_dev *udev) static void ubase_period_service_task(struct work_struct *work) { #define UBASE_STATS_TIMER_INTERVAL (300000 / (UBASE_PERIOD_100MS)) -#define UBASE_QUERY_SL_TIMER_INTERVAL (1000 / (UBASE_PERIOD_100MS)) struct ubase_delay_work *ubase_work = container_of(work, struct ubase_delay_work, service_task.work); @@ -334,6 +348,10 @@ static void ubase_period_service_task(struct work_struct *work) return; } + if (test_bit(UBASE_STATE_INITED_B, &udev->state_bits) && + !(udev->serv_proc_cnt % UBASE_STATS_TIMER_INTERVAL)) + ubase_update_stats_for_all(udev); + udev->serv_proc_cnt++; ubase_enable_period_service_task(udev); } @@ -1330,6 +1348,15 @@ struct ubase_adev_qos *ubase_get_adev_qos(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_adev_qos); +bool ubase_adev_mac_stats_supported(struct auxiliary_device *adev) +{ + if (!adev) + return false; + + return ubase_dev_mac_stats_supported(__ubase_get_udev_by_adev(adev)); +} +EXPORT_SYMBOL(ubase_adev_mac_stats_supported); + static void ubase_activate_notify(struct ubase_dev *udev, struct auxiliary_device *adev, bool activate) { diff --git a/drivers/ub/ubase/ubase_stats.c b/drivers/ub/ubase/ubase_stats.c index ba0970867501..78ce9b6835d1 100644 --- a/drivers/ub/ubase/ubase_stats.c +++ b/drivers/ub/ubase/ubase_stats.c @@ -51,6 +51,33 @@ static int ubase_update_mac_stats(struct ubase_dev *udev, u16 port_id, u64 *data return ret; } +/** + * ubase_clear_eth_port_stats() - clear eth port stats + * @adev: auxiliary device + * + * The function is used to clear eth port stats. + * + * Context: Process context. Takes and releases , BH-safe. Sleep. + * Return: 0 on success, negative error code otherwise + */ +void ubase_clear_eth_port_stats(struct auxiliary_device *adev) +{ + struct ubase_eth_mac_stats *eth_stats; + struct ubase_dev *udev; + + if (!adev) + return; + + udev = __ubase_get_udev_by_adev(adev); + eth_stats = &udev->stats.eth_stats; + if (ubase_dev_eth_mac_supported(udev)) { + mutex_lock(&udev->stats.stats_lock); + memset(eth_stats, 0, sizeof(*eth_stats)); + mutex_unlock(&udev->stats.stats_lock); + } +} +EXPORT_SYMBOL(ubase_clear_eth_port_stats); + /** * ubase_get_ub_port_stats() - get ub port stats * @adev: auxiliary device @@ -133,3 +160,19 @@ void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, mutex_unlock(&record->lock); } + +int ubase_update_eth_stats_trylock(struct ubase_dev *udev) +{ + struct ubase_eth_mac_stats *eth_stats = &udev->stats.eth_stats; + u32 stats_num = sizeof(*eth_stats) / sizeof(u64); + int ret; + + if (!mutex_trylock(&udev->stats.stats_lock)) + return 0; + + ret = ubase_update_mac_stats(udev, udev->caps.dev_caps.io_port_logic_id, + (u64 *)eth_stats, stats_num, true); + mutex_unlock(&udev->stats.stats_lock); + + return ret; +} diff --git a/drivers/ub/ubase/ubase_stats.h b/drivers/ub/ubase/ubase_stats.h index 6b8c70708177..7b221f29474f 100644 --- a/drivers/ub/ubase/ubase_stats.h +++ b/drivers/ub/ubase/ubase_stats.h @@ -17,6 +17,7 @@ struct ubase_query_mac_stats_cmd { int __ubase_get_eth_port_stats(struct ubase_dev *udev, struct ubase_eth_mac_stats *data); +int ubase_update_eth_stats_trylock(struct ubase_dev *udev); void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, int result); diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index c2fbd65268b9..d364201f09c0 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -356,6 +356,7 @@ struct ubase_bus_eid { bool ubase_adev_ubl_supported(struct auxiliary_device *adev); bool ubase_adev_ctrlq_supported(struct auxiliary_device *adev); bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev); +bool ubase_adev_mac_stats_supported(struct auxiliary_device *aux_dev); bool ubase_adev_prealloc_supported(struct auxiliary_device *aux_dev); struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev); struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev); diff --git a/include/ub/ubase/ubase_comm_stats.h b/include/ub/ubase/ubase_comm_stats.h index ac0ae25b8631..32b3d717d69b 100644 --- a/include/ub/ubase/ubase_comm_stats.h +++ b/include/ub/ubase/ubase_comm_stats.h @@ -232,6 +232,7 @@ struct ubase_perf_stats_result { u32 rx_vl_bw[UBASE_STATS_MAX_VL_NUM]; }; +void ubase_clear_eth_port_stats(struct auxiliary_device *adev); int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, struct ubase_ub_dl_stats *data); int ubase_get_eth_port_stats(struct auxiliary_device *adev, -- Gitee From 4e50a57e563e33a041e121fc5c185f60acdff6d0 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Tue, 18 Nov 2025 15:30:24 +0800 Subject: [PATCH 205/243] net: unic: Add support for uboe reset commit 23acf62787cf667770a4bf31b0c5c066a353a801 openEuler This patch adds support for handling port link status changes in uboe devices. It introduces a new function unic_port_reset to manage the link status for these devices. The patch also modifies the existing unic_port_handler function to differentiate between ub and uboe devices when handling link status changes. Key changes include: 1. Adding a new function unic_port_reset to handle link status changes for uboe devices. 2. Modifying unic_port_handler to call the appropriate function based on whether the device is ub or uboe. 3. This enhancement ensures that link status changes are handled correctly for all device types, improving the robustness and functionality of the UNIC driver. Signed-off-by: Yixi Shen Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_event.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index b26dd89c6c0b..200f6f6f03e7 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -167,6 +167,18 @@ static void unic_rack_port_reset(struct unic_dev *unic_dev, bool link_up) unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_DOWN; } +static void unic_port_reset(struct net_device *netdev, bool link_up) +{ + rtnl_lock(); + + if (link_up) + unic_net_open(netdev); + else + unic_net_stop(netdev); + + rtnl_unlock(); +} + static void unic_port_handler(struct auxiliary_device *adev, bool link_up) { struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); @@ -175,7 +187,10 @@ static void unic_port_handler(struct auxiliary_device *adev, bool link_up) if (!netif_running(netdev)) return; - unic_rack_port_reset(unic_dev, link_up); + if (unic_dev_ubl_supported(unic_dev)) + unic_rack_port_reset(unic_dev, link_up); + else + unic_port_reset(netdev, link_up); } static struct ubase_ctrlq_event_nb unic_ctrlq_events[] = { -- Gitee From 7e0905fefab52129551ec981bb0cd1f23f018500 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Tue, 25 Nov 2025 21:48:51 +0800 Subject: [PATCH 206/243] ub: ubase: Remove non-cluster mode code commit 5e059812d3570c6b15a3a68c341474e1c021fe36 openEuler The UB driver only supports cluster mode, and the non-cluster code needs to be removed, then the driver information will be controlled by control module. Signed-off-by: Guangwei Zhang Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/Makefile | 2 +- drivers/net/ub/unic/unic_dev.c | 8 +- drivers/net/ub/unic/unic_event.c | 2 +- .../net/ub/unic/{unic_rack_ip.c => unic_ip.c} | 8 +- .../net/ub/unic/{unic_rack_ip.h => unic_ip.h} | 10 +- drivers/net/ub/unic/unic_netdev.c | 2 +- drivers/net/ub/unic/unic_reset.c | 4 +- drivers/ub/ubase/Makefile | 4 +- drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c | 76 +----- drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h | 1 - drivers/ub/ubase/debugfs/ubase_debugfs.c | 12 - drivers/ub/ubase/ubase_ctrlq_tp.c | 226 ------------------ drivers/ub/ubase/ubase_ctrlq_tp.h | 16 -- drivers/ub/ubase/ubase_hw.c | 15 +- drivers/ub/ubase/ubase_hw.h | 22 +- drivers/ub/ubase/ubase_qos_hw.c | 20 +- drivers/ub/ubase/ubase_tp.c | 209 +++++++++++++++- drivers/ub/ubase/ubase_tp.h | 2 + include/ub/ubase/ubase_comm_dev.h | 9 - include/ub/ubase/ubase_comm_hw.h | 2 + 20 files changed, 262 insertions(+), 388 deletions(-) rename drivers/net/ub/unic/{unic_rack_ip.c => unic_ip.c} (99%) rename drivers/net/ub/unic/{unic_rack_ip.h => unic_ip.h} (88%) delete mode 100644 drivers/ub/ubase/ubase_ctrlq_tp.c delete mode 100644 drivers/ub/ubase/ubase_ctrlq_tp.h diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 3d8d175c2984..7d0a2632d00d 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -8,7 +8,7 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o -unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o +unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_ip.o unic_stats.o unic-objs += unic_lb.o unic_vlan.o unic_mac.o unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o debugfs/unic_entry_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index a93e4689a235..c63ea8116ca8 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -23,10 +23,10 @@ #include "unic_event.h" #include "unic_guid.h" #include "unic_hw.h" +#include "unic_ip.h" #include "unic_qos_hw.h" #include "unic_mac.h" #include "unic_netdev.h" -#include "unic_rack_ip.h" #include "unic_vlan.h" #include "unic_dev.h" @@ -711,7 +711,7 @@ static void unic_periodic_service_task(struct unic_dev *unic_dev) unic_link_status_update(unic_dev); unic_update_port_info(unic_dev); - unic_sync_rack_ip_table(unic_dev); + unic_sync_ip_table(unic_dev); if (unic_dev_eth_mac_supported(unic_dev)) unic_sync_mac_table(unic_dev); @@ -841,7 +841,7 @@ static int unic_init_vport(struct unic_dev *unic_dev) static void unic_uninit_vport(struct unic_dev *unic_dev) { - unic_uninit_rack_ip_table(unic_dev); + unic_uninit_ip_table(unic_dev); if (unic_dev_eth_mac_supported(unic_dev)) { unic_uninit_mac_table(unic_dev); @@ -1096,7 +1096,7 @@ int unic_dev_init(struct auxiliary_device *adev) goto err_unregister_event; } - unic_query_rack_ip(adev); + unic_query_ip_by_ctrlq(adev); unic_start_dev_period_task(netdev); return 0; diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index 200f6f6f03e7..5e0df058d067 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -20,10 +20,10 @@ #include "unic_dcbnl.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_ip.h" #include "unic_mac.h" #include "unic_netdev.h" #include "unic_qos_hw.h" -#include "unic_rack_ip.h" #include "unic_reset.h" #include "unic_event.h" diff --git a/drivers/net/ub/unic/unic_rack_ip.c b/drivers/net/ub/unic/unic_ip.c similarity index 99% rename from drivers/net/ub/unic/unic_rack_ip.c rename to drivers/net/ub/unic/unic_ip.c index 529856dcff73..e83baddded87 100644 --- a/drivers/net/ub/unic/unic_rack_ip.c +++ b/drivers/net/ub/unic/unic_ip.c @@ -13,7 +13,7 @@ #include "unic_comm_addr.h" #include "unic_trace.h" -#include "unic_rack_ip.h" +#include "unic_ip.h" static void unic_update_rack_addr_state(struct unic_vport *vport, struct unic_comm_addr_node *addr_node, @@ -334,7 +334,7 @@ static void unic_rack_sync_addr_table(struct unic_vport *vport, unic_sync_rack_ip_list(vport, &tmp_add_list, UNIC_CTRLQ_ADD_IP); } -void unic_sync_rack_ip_table(struct unic_dev *unic_dev) +void unic_sync_ip_table(struct unic_dev *unic_dev) { struct unic_vport *vport = &unic_dev->vport; @@ -655,7 +655,7 @@ static void unic_update_rack_ip_list(struct unic_vport *vport, spin_unlock_bh(&vport->addr_tbl.ip_list_lock); } -void unic_query_rack_ip(struct auxiliary_device *adev) +void unic_query_ip_by_ctrlq(struct auxiliary_device *adev) { #define UNIC_LOOP_COUNT(total_size, size) ((total_size) / (size) + 1) @@ -706,7 +706,7 @@ void unic_query_rack_ip(struct auxiliary_device *adev) } } -void unic_uninit_rack_ip_table(struct unic_dev *unic_dev) +void unic_uninit_ip_table(struct unic_dev *unic_dev) { struct unic_vport *vport = &unic_dev->vport; struct list_head *list = &vport->addr_tbl.ip_list; diff --git a/drivers/net/ub/unic/unic_rack_ip.h b/drivers/net/ub/unic/unic_ip.h similarity index 88% rename from drivers/net/ub/unic/unic_rack_ip.h rename to drivers/net/ub/unic/unic_ip.h index 48f62eb0fb70..a73e8490536c 100644 --- a/drivers/net/ub/unic/unic_rack_ip.h +++ b/drivers/net/ub/unic/unic_ip.h @@ -4,8 +4,8 @@ * */ -#ifndef __UNIC_RACK_IP_H__ -#define __UNIC_RACK_IP_H__ +#ifndef __UNIC_IP_H__ +#define __UNIC_IP_H__ #include "unic_dev.h" #include "unic_comm_addr.h" @@ -72,11 +72,11 @@ static inline void unic_format_masked_ip_addr(char *format_masked_ip_addr, ip_addr[IP_START_BYTE + 2], ip_addr[IP_START_BYTE + 3]); } -void unic_sync_rack_ip_table(struct unic_dev *unic_dev); +void unic_sync_ip_table(struct unic_dev *unic_dev); int unic_handle_notify_ip_event(struct auxiliary_device *adev, u8 service_ver, void *data, u16 len, u16 seq); -void unic_query_rack_ip(struct auxiliary_device *adev); -void unic_uninit_rack_ip_table(struct unic_dev *unic_dev); +void unic_query_ip_by_ctrlq(struct auxiliary_device *adev); +void unic_uninit_ip_table(struct unic_dev *unic_dev); int unic_add_ip_addr(struct unic_dev *unic_dev, struct sockaddr *addr, u16 ip_mask); int unic_rm_ip_addr(struct unic_dev *unic_dev, struct sockaddr *addr, diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index 681709ed341a..566496e8eb13 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -24,13 +24,13 @@ #include "unic_dev.h" #include "unic_event.h" #include "unic_hw.h" +#include "unic_ip.h" #include "unic_mac.h" #include "unic_rx.h" #include "unic_tx.h" #include "unic_txrx.h" #include "unic_vlan.h" #include "unic_netdev.h" -#include "unic_rack_ip.h" static int unic_netdev_set_tcs(struct net_device *netdev) { diff --git a/drivers/net/ub/unic/unic_reset.c b/drivers/net/ub/unic/unic_reset.c index 4035fa9cf15c..69995449b2df 100644 --- a/drivers/net/ub/unic/unic_reset.c +++ b/drivers/net/ub/unic/unic_reset.c @@ -9,9 +9,9 @@ #include "unic_cmd.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_ip.h" #include "unic_mac.h" #include "unic_netdev.h" -#include "unic_rack_ip.h" #include "unic_reset.h" static void unic_dev_suspend(struct unic_dev *unic_dev) @@ -94,7 +94,7 @@ static void unic_reset_init(struct auxiliary_device *adev) if (ret) goto err_unic_resume; - unic_query_rack_ip(adev); + unic_query_ip_by_ctrlq(adev); unic_start_period_task(netdev); if_running = netif_running(netdev); diff --git a/drivers/ub/ubase/Makefile b/drivers/ub/ubase/Makefile index 67440b08bcfe..f266263aa3af 100644 --- a/drivers/ub/ubase/Makefile +++ b/drivers/ub/ubase/Makefile @@ -12,8 +12,8 @@ MODULE_NAME := ubase UBASE_OBJS := ubase_main.o ubase_dev.o ubase_hw.o ubase_cmd.o ubase_ctrlq.o \ debugfs/ubase_debugfs.o ubase_eq.o ubase_mailbox.o ubase_ubus.o \ debugfs/ubase_ctx_debugfs.o debugfs/ubase_qos_debugfs.o \ - ubase_qos_hw.o ubase_tp.o ubase_ctrlq_tp.o ubase_reset.o \ - ubase_err_handle.o ubase_pmem.o ubase_stats.o ubase_arq.o + ubase_qos_hw.o ubase_tp.o ubase_reset.o ubase_err_handle.o \ + ubase_stats.o ubase_arq.o ubase_pmem.o $(MODULE_NAME)-objs := $(UBASE_OBJS) obj-$(CONFIG_UB_UBASE) := ubase.o diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c index 63f27093aec1..7f0e5ca18484 100644 --- a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c @@ -51,26 +51,6 @@ static void ubase_dump_ceq_ctx(struct seq_file *s, struct ubase_dev *udev, u32 i ubase_dump_eq_ctx(s, eq); } -static void ubase_tpg_ctx_titles_print(struct seq_file *s) -{ - seq_puts(s, "CHANNEL_ID TPGN TP_SHIFT VALID_TP "); - seq_puts(s, "START_TPN TPG_STATE TP_CNT\n"); -} - -static void ubase_dump_tpg_ctx(struct seq_file *s, struct ubase_dev *udev, u32 idx) -{ - struct ubase_tpg *tpg = &udev->tp_ctx.tpg[idx]; - - seq_printf(s, "%-12u", idx); - seq_printf(s, "%-9u", tpg->mb_tpgn); - seq_printf(s, "%-10u", tpg->tp_shift); - seq_printf(s, "%-10lu", tpg->valid_tp); - seq_printf(s, "%-11u", tpg->start_tpn); - seq_printf(s, "%-11u", tpg->tpg_state); - seq_printf(s, "%-8u", tpg->tp_cnt); - seq_puts(s, "\n"); -} - enum ubase_dbg_ctx_type { UBASE_DBG_AEQ_CTX = 0, UBASE_DBG_CEQ_CTX, @@ -118,26 +98,14 @@ static int ubase_dbg_dump_context(struct seq_file *s, } dbg_ctx[] = { {ubase_eq_ctx_titles_print, ubase_dump_aeq_ctx}, {ubase_eq_ctx_titles_print, ubase_dump_ceq_ctx}, - {ubase_tpg_ctx_titles_print, ubase_dump_tpg_ctx}, }; struct ubase_dev *udev = dev_get_drvdata(s->private); - struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; - unsigned long port_bitmap; - u32 tp_pos, i; + u32 i; dbg_ctx[ctx_type].print_ctx_titles(s); - port_bitmap = unic_caps->utp_port_bitmap; - for (i = 0; i < ubase_get_ctx_num(udev, ctx_type, UBASE_DEFAULT_CTXGN); i++) { - if (ctx_type != UBASE_DBG_TP_CTX) { - dbg_ctx[ctx_type].get_ctx(s, udev, i); - continue; - } - - tp_pos = (i % unic_caps->tpg.depth) * UBASE_TP_PORT_BITMAP_STEP; - if (test_bit(tp_pos, &port_bitmap)) - dbg_ctx[ctx_type].get_ctx(s, udev, i); - } + for (i = 0; i < ubase_get_ctx_num(udev, ctx_type, UBASE_DEFAULT_CTXGN); i++) + dbg_ctx[ctx_type].get_ctx(s, udev, i); return 0; } @@ -152,10 +120,8 @@ struct ubase_ctx_info { static inline u32 ubase_get_ctx_group_num(struct ubase_dev *udev, enum ubase_dbg_ctx_type ctx_type) { - if (ctx_type == UBASE_DBG_TP_CTX) - return udev->caps.unic_caps.tpg.max_cnt; - - return 1; + return ctx_type == UBASE_DBG_TP_CTX ? udev->caps.unic_caps.tpg.max_cnt : + 1; } static void ubase_get_ctx_info(struct ubase_dev *udev, @@ -177,7 +143,7 @@ static void ubase_get_ctx_info(struct ubase_dev *udev, break; case UBASE_DBG_TPG_CTX: ctx_info->start_idx = udev->caps.unic_caps.tpg.start_idx; - ctx_info->ctx_size = udev->ctx_buf.tpg.entry_size; + ctx_info->ctx_size = UBASE_TPG_CTX_SIZE; ctx_info->op = UBASE_MB_QUERY_TPG_CONTEXT; ctx_info->ctx_name = "tpg"; break; @@ -187,7 +153,7 @@ static void ubase_get_ctx_info(struct ubase_dev *udev, udev->tp_ctx.tpg[ctxgn].start_tpn : 0; spin_unlock(&udev->tp_ctx.tpg_lock); - ctx_info->ctx_size = udev->ctx_buf.tp.entry_size; + ctx_info->ctx_size = UBASE_TP_CTX_SIZE; ctx_info->op = UBASE_MB_QUERY_TP_CONTEXT; ctx_info->ctx_name = "tp"; break; @@ -348,31 +314,6 @@ int ubase_dbg_dump_ceq_context(struct seq_file *s, void *data) return ret; } -int ubase_dbg_dump_tpg_ctx(struct seq_file *s, void *data) -{ - struct ubase_dev *udev = dev_get_drvdata(s->private); - int ret; - - if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) - return -EBUSY; - - if (!ubase_get_ctx_num(udev, UBASE_DBG_TPG_CTX, UBASE_DEFAULT_CTXGN)) - return -EOPNOTSUPP; - - if (!spin_trylock(&udev->tp_ctx.tpg_lock)) - return -EBUSY; - - if (!udev->tp_ctx.tpg) { - spin_unlock(&udev->tp_ctx.tpg_lock); - return -EBUSY; - } - - ret = ubase_dbg_dump_context(s, UBASE_DBG_TPG_CTX); - spin_unlock(&udev->tp_ctx.tpg_lock); - - return ret; -} - int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data) { struct ubase_dev *udev = dev_get_drvdata(s->private); @@ -380,9 +321,6 @@ int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data) if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) return -EBUSY; - if (!ubase_get_ctx_num(udev, UBASE_DBG_TPG_CTX, UBASE_DEFAULT_CTXGN)) - return -EOPNOTSUPP; - return ubase_dbg_dump_ctx_hw(s, data, UBASE_DBG_TPG_CTX); } diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h index 532665141fc8..824c289b9700 100644 --- a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h @@ -11,7 +11,6 @@ struct device; int ubase_dbg_dump_aeq_context(struct seq_file *s, void *data); int ubase_dbg_dump_ceq_context(struct seq_file *s, void *data); -int ubase_dbg_dump_tpg_ctx(struct seq_file *s, void *data); int ubase_dbg_dump_tp_ctx_hw(struct seq_file *s, void *data); int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data); int ubase_dbg_dump_aeq_ctx_hw(struct seq_file *s, void *data); diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index a562ef8543b2..147bc8c0a3fe 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -130,12 +130,8 @@ static void ubase_dbg_dump_adev_caps(struct seq_file *s, {"\tjfr_depth: %u\n", caps->jfr.depth}, {"\tjfc_max_cnt: %u\n", caps->jfc.max_cnt}, {"\tjfc_depth: %u\n", caps->jfc.depth}, - {"\ttp_max_cnt: %u\n", caps->tp.max_cnt}, - {"\ttp_depth: %u\n", caps->tp.depth}, {"\ttpg_max_cnt: %u\n", caps->tpg.max_cnt}, - {"\ttpg_depth: %u\n", caps->tpg.depth}, {"\tcqe_size: %hu\n", caps->cqe_size}, - {"\tutp_port_bitmap: 0x%x\n", caps->utp_port_bitmap}, {"\tjtg_max_cnt: %u\n", caps->jtg_max_cnt}, {"\trc_max_cnt: %u\n", caps->rc_max_cnt}, {"\trc_depth: %u\n", caps->rc_que_depth}, @@ -535,14 +531,6 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_activate_record, }, - { - .name = "tpg_context", - .dentry_index = UBASE_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_URMA | UBASE_SUP_UBL_ETH, - .support = __ubase_dbg_dentry_support, - .init = __ubase_dbg_seq_file_init, - .read_func = ubase_dbg_dump_tpg_ctx, - }, { .name = "tp_context_hw", .dentry_index = UBASE_DBG_DENTRY_CONTEXT, diff --git a/drivers/ub/ubase/ubase_ctrlq_tp.c b/drivers/ub/ubase/ubase_ctrlq_tp.c deleted file mode 100644 index 5ebcb609a205..000000000000 --- a/drivers/ub/ubase/ubase_ctrlq_tp.c +++ /dev/null @@ -1,226 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. - * - */ - -#include - -#include "ubase_ctrlq.h" -#include "ubase_dev.h" -#include "ubase_tp.h" -#include "ubase_ctrlq_tp.h" - -#define UBASE_TRANS_TYPE_UM_TP 0x2 -#define UBASE_TPG_FLAG 0x1 - -int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tpn) -{ - struct ubase_tpg *tpg = udev->tp_ctx.tpg; - struct ubase_ctrlq_tp_fd_req req = {0}; - struct ubase_ctrlq_msg msg = {0}; - int ret, tmp_resp; - u32 i; - - spin_lock(&udev->tp_ctx.tpg_lock); - if (!tpg) { - spin_unlock(&udev->tp_ctx.tpg_lock); - ubase_warn(udev, - "ubase tpg res does not exist, tpn = %u.\n", tpn); - return 0; - } - - for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { - if (tpn >= tpg[i].start_tpn && - tpn < tpg[i].start_tpn + tpg[i].tp_cnt) { - ubase_dbg(udev, - "receive tp flush done AE, tpn:%u, tpgn:%u.\n", - tpn, i); - break; - } - } - spin_unlock(&udev->tp_ctx.tpg_lock); - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = UBASE_CTRLQ_OPC_TP_FLUSH_DONE; - msg.need_resp = 1; - msg.in_size = sizeof(req); - msg.in = &req; - msg.out_size = sizeof(tmp_resp); - msg.out = &tmp_resp; - req.tpn = cpu_to_le32(tpn); - - ret = __ubase_ctrlq_send(udev, &msg, NULL); - if (ret) - ubase_err(udev, "failed to notify tp flush done, ret = %d.\n", - ret); - - spin_lock(&udev->tp_ctx.tpg_lock); - if (udev->tp_ctx.tpg && i < udev->caps.unic_caps.tpg.max_cnt) - atomic_inc(&tpg[i].tp_fd_cnt); - else - ubase_warn(udev, - "ubase tpg res does not exist, tpn = %u.\n", tpn); - spin_unlock(&udev->tp_ctx.tpg_lock); - - return ret; -} - -static int ubase_create_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) -{ - struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; - struct ubase_ctrlq_create_tp_resp resp = {0}; - struct ubase_ctrlq_create_tp_req req = {0}; - struct ubase_ctrlq_msg msg = {0}; - int ret; - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = UBASE_CTRLQ_OPC_CREATE_TP; - msg.need_resp = 1; - msg.in_size = sizeof(req); - msg.in = &req; - msg.out_size = sizeof(resp); - msg.out = &resp; - - req.trans_type = UBASE_TRANS_TYPE_UM_TP; - req.vl = (u8)vl; - - ret = __ubase_ctrlq_send(udev, &msg, NULL); - if (ret && ret != -EEXIST) { - ubase_err(udev, "failed to alloc tp tpg, ret = %d.\n", ret); - return ret; - } - - tp_ctx->tpg[vl].mb_tpgn = le32_to_cpu(resp.tpgn); - tp_ctx->tpg[vl].start_tpn = le32_to_cpu(resp.start_tpn); - tp_ctx->tpg[vl].tp_cnt = resp.tpn_cnt; - - if (tp_ctx->tpg[vl].mb_tpgn != vl) - ubase_warn(udev, "unexpected tpgn, vl = %u, tpgn = %u.\n", - vl, tp_ctx->tpg[vl].mb_tpgn); - - return 0; -} - -static void ubase_wait_tp_flush_done_by_ctrlq(struct ubase_dev *udev, u32 vl) -{ - struct ubase_tpg *tpg = &udev->tp_ctx.tpg[vl]; - int i; - - for (i = 0; i < UBASE_WAIT_TP_FLUSH_TOTAL_STEPS; i++) { - msleep(1 << i); - - if (atomic_read(&tpg->tp_fd_cnt) == tpg->tp_cnt) - return; - } - - ubase_warn(udev, - "wait tp flush done timeout, tpgn = %u, tp_fd_cnt = %u.\n", - vl, atomic_read(&tpg->tp_fd_cnt)); -} - -static void ubase_destroy_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) -{ - struct ubase_ctrlq_destroy_tp_req req = {0}; - struct ubase_ctrlq_msg msg = {0}; - int tmp_resp, ret; - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = UBASE_CTRLQ_OPC_DESTROY_TP; - msg.need_resp = 1; - msg.in_size = sizeof(req); - msg.in = &req; - msg.out_size = sizeof(tmp_resp); - msg.out = &tmp_resp; - - req.vl = (u8)vl; - req.trans_type = UBASE_TRANS_TYPE_UM_TP; - - ret = __ubase_ctrlq_send(udev, &msg, NULL); - if (ret) { - ubase_err(udev, - "failed to send destroy tp tpg request, tpgn = %u, ret = %d.\n", - vl, ret); - return; - } - - ubase_wait_tp_flush_done_by_ctrlq(udev, vl); -} - -static void ubase_destroy_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 num) -{ - u32 idx; - - for (idx = 0; idx < num; idx++) - ubase_destroy_tp_tpg_by_ctrlq(udev, idx); -} - -static int ubase_create_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev) -{ - int ret; - u32 i; - - for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { - atomic_set(&udev->tp_ctx.tpg[i].tp_fd_cnt, 0); - ret = ubase_create_tp_tpg_by_ctrlq(udev, i); - if (ret) { - ubase_err(udev, "failed to create tp tpg, tpgn = %u, ret = %d.\n", - i, ret); - goto err_create_tp_tpg; - } - } - - return 0; - -err_create_tp_tpg: - ubase_destroy_multi_tp_tpg_by_ctrlq(udev, i); - - return ret; -} - -void ubase_dev_uninit_rack_tp_tpg(struct ubase_dev *udev) -{ - struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; - struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; - u32 num = unic_caps->tpg.max_cnt; - - if (!tp_ctx->tpg) - return; - - if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) - ubase_destroy_multi_tp_tpg_by_ctrlq(udev, num); - - spin_lock(&tp_ctx->tpg_lock); - devm_kfree(udev->dev, tp_ctx->tpg); - tp_ctx->tpg = NULL; - spin_unlock(&tp_ctx->tpg_lock); -} - -int ubase_dev_init_rack_tp_tpg(struct ubase_dev *udev) -{ - struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; - struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; - int ret; - - spin_lock(&tp_ctx->tpg_lock); - tp_ctx->tpg = devm_kcalloc(udev->dev, unic_caps->tpg.max_cnt, - sizeof(struct ubase_tpg), GFP_ATOMIC); - if (!tp_ctx->tpg) { - spin_unlock(&tp_ctx->tpg_lock); - return -ENOMEM; - } - spin_unlock(&tp_ctx->tpg_lock); - - ret = ubase_create_multi_tp_tpg_by_ctrlq(udev); - if (ret) { - spin_lock(&tp_ctx->tpg_lock); - devm_kfree(udev->dev, tp_ctx->tpg); - tp_ctx->tpg = NULL; - spin_unlock(&tp_ctx->tpg_lock); - } - - return ret; -} diff --git a/drivers/ub/ubase/ubase_ctrlq_tp.h b/drivers/ub/ubase/ubase_ctrlq_tp.h deleted file mode 100644 index 3d647d5d1625..000000000000 --- a/drivers/ub/ubase/ubase_ctrlq_tp.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. - * - */ - -#ifndef __UBASE_CTRLQ_TP_H__ -#define __UBASE_CTRLQ_TP_H__ - -#include "ubase_dev.h" - -int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tp_num); -void ubase_dev_uninit_rack_tp_tpg(struct ubase_dev *udev); -int ubase_dev_init_rack_tp_tpg(struct ubase_dev *udev); - -#endif diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index 2b68ba5884f5..c473d12ddf72 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -162,7 +162,6 @@ static void ubase_parse_dev_caps_unic(struct ubase_dev *udev, unic_caps->jfc.max_cnt = le32_to_cpu(resp->nic_jfc_max_cnt); unic_caps->jfc.depth = le32_to_cpu(resp->nic_jfc_depth); unic_caps->cqe_size = le16_to_cpu(resp->nic_cqe_size); - unic_caps->utp_port_bitmap = le32_to_cpu(resp->port_bitmap); } static void ubase_parse_dev_caps_udma(struct ubase_dev *udev, @@ -585,7 +584,7 @@ static void ubase_uninit_dma_buf(struct ubase_dev *udev, buf->addr = NULL; } -static int ubase_init_ta_tp_ext_buf(struct ubase_dev *udev) +static int ubase_init_ta_ext_buf(struct ubase_dev *udev) { UBASE_DEFINE_DMA_BUFS(udev); int i, ret; @@ -609,7 +608,7 @@ static int ubase_init_ta_tp_ext_buf(struct ubase_dev *udev) return ret; } -static void ubase_uninit_ta_tp_ext_buf(struct ubase_dev *udev) +static void ubase_uninit_ta_ext_buf(struct ubase_dev *udev) { UBASE_DEFINE_DMA_BUFS(udev); int i; @@ -860,9 +859,9 @@ int ubase_hw_init(struct ubase_dev *udev) return ret; } - ret = ubase_init_ta_tp_ext_buf(udev); + ret = ubase_init_ta_ext_buf(udev); if (ret) - goto err_init_ta_tp_ext_buf; + goto err_init_ta_ext_buf; ret = ubase_dev_init_tp_tpg(udev); if (ret) { @@ -875,8 +874,8 @@ int ubase_hw_init(struct ubase_dev *udev) return 0; err_init_tp_tpg: - ubase_uninit_ta_tp_ext_buf(udev); -err_init_ta_tp_ext_buf: + ubase_uninit_ta_ext_buf(udev); +err_init_ta_ext_buf: ubase_uninit_ctx_buf(udev); return ret; @@ -887,7 +886,7 @@ void ubase_hw_uninit(struct ubase_dev *udev) clear_bit(UBASE_STATE_CTX_READY_B, &udev->state_bits); ubase_dev_uninit_tp_tpg(udev); - ubase_uninit_ta_tp_ext_buf(udev); + ubase_uninit_ta_ext_buf(udev); if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) { ubase_ctrlq_disable_remote(udev); diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h index 2c7ed2264aab..e292053905ec 100644 --- a/drivers/ub/ubase/ubase_hw.h +++ b/drivers/ub/ubase/ubase_hw.h @@ -70,39 +70,33 @@ struct ubase_res_cmd_resp { __le32 nic_jfc_max_cnt; u8 rsvd11[4]; __le32 nic_jfc_depth; - __le32 nic_tp_max_cnt; - __le32 nic_tp_reserved_cnt; - __le32 nic_tp_depth; - __le32 nic_tpg_max_cnt; + u8 rsvd12[16]; - __le32 nic_tpg_reserved_cnt; - __le32 nic_tpg_depth; + u8 rsvd13[8]; __le32 total_ue_num; - u8 rsvd12[16]; + u8 rsvd14[16]; __le16 rsvd_jetty_cnt; __le16 mac_stats_num; __le32 ta_extdb_buf_size; __le32 ta_timer_buf_size; __le32 public_jetty_cnt; - __le32 tp_extdb_buf_size; - __le32 tp_timer_buf_size; - u8 resv13; + u8 rsvd15[9]; u8 udma_vl_num; u8 udma_tp_resp_vl_offset; u8 ue_num; __le32 port_bitmap; - u8 rsvd14[4]; + u8 rsvd16[4]; /* include udma tp and ctp req vl */ u8 udma_req_vl[UBASE_MAX_REQ_VL_NUM]; __le32 udma_rc_depth; - u8 rsvd15[4]; + u8 rsvd17[4]; __le32 jtg_max_cnt; __le32 rc_max_cnt_per_vl; - u8 rsvd16[8]; + u8 rsvd18[8]; - u8 rsvd17[32]; + u8 rsvd19[32]; }; struct ubase_query_oor_resp { diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index ca65878d9515..ca5051ce4c44 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -745,7 +745,7 @@ static int ubase_assign_urma_vl(struct ubase_dev *udev, u8 *urma_sl, return 0; } -static int ubase_parse_rack_nic_vl(struct ubase_dev *udev) +static int ubase_parse_nic_vl(struct ubase_dev *udev) { return ubase_assign_urma_vl(udev, udev->qos.nic_sl, udev->qos.nic_sl_num, udev->qos.nic_vl, &udev->qos.nic_vl_num); @@ -812,7 +812,7 @@ static int ubase_parse_rack_cdma_req_sl_vl(struct ubase_dev *udev) return 0; } -static int ubase_parse_rack_cdma_sl_vl(struct ubase_dev *udev) +static int ubase_parse_cdma_sl_vl(struct ubase_dev *udev) { int ret; @@ -828,16 +828,16 @@ static int ubase_parse_rack_cdma_sl_vl(struct ubase_dev *udev) return 0; } -static inline int ubase_parse_rack_nic_sl_vl(struct ubase_dev *udev) +static inline int ubase_parse_nic_sl_vl(struct ubase_dev *udev) { - return ubase_parse_rack_nic_vl(udev); + return ubase_parse_nic_vl(udev); } -static int ubase_parse_rack_urma_sl_vl(struct ubase_dev *udev) +static int ubase_parse_urma_sl_vl(struct ubase_dev *udev) { int ret; - ret = ubase_parse_rack_nic_sl_vl(udev); + ret = ubase_parse_nic_sl_vl(udev); if (ret) return ret; @@ -851,13 +851,13 @@ static int ubase_parse_rack_urma_sl_vl(struct ubase_dev *udev) return 0; } -static int ubase_parse_rack_adev_sl_vl(struct ubase_dev *udev) +static int ubase_parse_adev_sl_vl(struct ubase_dev *udev) { if (ubase_dev_cdma_supported(udev)) - return ubase_parse_rack_cdma_sl_vl(udev); + return ubase_parse_cdma_sl_vl(udev); if (ubase_dev_urma_supported(udev)) - return ubase_parse_rack_urma_sl_vl(udev); + return ubase_parse_urma_sl_vl(udev); return 0; } @@ -913,7 +913,7 @@ static int ubase_parse_sl_vl(struct ubase_dev *udev) if (ret) return ret; - ret = ubase_parse_rack_adev_sl_vl(udev); + ret = ubase_parse_adev_sl_vl(udev); if (ret) return ret; diff --git a/drivers/ub/ubase/ubase_tp.c b/drivers/ub/ubase/ubase_tp.c index 2b11b39fb6a4..f18854fdc319 100644 --- a/drivers/ub/ubase/ubase_tp.c +++ b/drivers/ub/ubase/ubase_tp.c @@ -4,10 +4,66 @@ * */ -#include "ubase_ctrlq_tp.h" +#include + +#include "ubase_ctrlq.h" #include "ubase_reset.h" #include "ubase_tp.h" +int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tpn) +{ + struct ubase_ctrlq_tp_fd_req req = {0}; + struct ubase_ctrlq_msg msg = {0}; + struct ubase_tpg *tpg; + int ret, tmp_resp; + u32 i; + + spin_lock(&udev->tp_ctx.tpg_lock); + tpg = udev->tp_ctx.tpg; + if (!tpg) { + spin_unlock(&udev->tp_ctx.tpg_lock); + ubase_warn(udev, + "ubase tpg res does not exist, tpn = %u.\n", tpn); + return 0; + } + + for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { + if (tpn >= tpg[i].start_tpn && + tpn < tpg[i].start_tpn + tpg[i].tp_cnt) { + ubase_dbg(udev, + "receive tp flush done AE, tpn:%u, tpgn:%u.\n", + tpn, i); + break; + } + } + spin_unlock(&udev->tp_ctx.tpg_lock); + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UBASE_CTRLQ_OPC_TP_FLUSH_DONE; + msg.need_resp = 1; + msg.in_size = sizeof(req); + msg.in = &req; + msg.out_size = sizeof(tmp_resp); + msg.out = &tmp_resp; + req.tpn = cpu_to_le32(tpn); + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret) + ubase_err(udev, "failed to notify tp flush done, ret = %d.\n", + ret); + + spin_lock(&udev->tp_ctx.tpg_lock); + if (udev->tp_ctx.tpg && i < udev->caps.unic_caps.tpg.max_cnt) + atomic_inc(&tpg[i].tp_fd_cnt); + else + ubase_warn(udev, + "ubase tpg res does not exist, tpn = %u.\n", tpn); + spin_unlock(&udev->tp_ctx.tpg_lock); + + return ret; +} + int ubase_ae_tp_flush_done(struct notifier_block *nb, unsigned long event, void *data) { @@ -40,18 +96,165 @@ int ubase_ae_tp_level_error(struct notifier_block *nb, unsigned long event, return 0; } +static int ubase_create_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) +{ + struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; + struct ubase_ctrlq_create_tp_resp resp = {0}; + struct ubase_ctrlq_create_tp_req req = {0}; + struct ubase_ctrlq_msg msg = {0}; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UBASE_CTRLQ_OPC_CREATE_TP; + msg.need_resp = 1; + msg.in_size = sizeof(req); + msg.in = &req; + msg.out_size = sizeof(resp); + msg.out = &resp; + + req.trans_type = UBASE_TRANS_TYPE_UM_TP; + req.vl = (u8)vl; + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret && ret != -EEXIST) { + ubase_err(udev, "failed to alloc tp tpg, ret = %d.\n", ret); + return ret; + } + + tp_ctx->tpg[vl].mb_tpgn = le32_to_cpu(resp.tpgn); + tp_ctx->tpg[vl].start_tpn = le32_to_cpu(resp.start_tpn); + tp_ctx->tpg[vl].tp_cnt = resp.tpn_cnt; + + if (tp_ctx->tpg[vl].mb_tpgn != vl) + ubase_warn(udev, "unexpected tpgn, vl = %u, tpgn = %u.\n", + vl, tp_ctx->tpg[vl].mb_tpgn); + + return 0; +} + +static void ubase_wait_tp_flush_done_by_ctrlq(struct ubase_dev *udev, u32 vl) +{ + struct ubase_tpg *tpg = &udev->tp_ctx.tpg[vl]; + int i; + + for (i = 0; i < UBASE_WAIT_TP_FLUSH_TOTAL_STEPS; i++) { + msleep(1 << i); + + if (atomic_read(&tpg->tp_fd_cnt) == tpg->tp_cnt) + return; + } + + ubase_warn(udev, + "wait tp flush done timeout, tpgn = %u, tp_fd_cnt = %u.\n", + vl, atomic_read(&tpg->tp_fd_cnt)); +} + +static void ubase_destroy_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) +{ + struct ubase_ctrlq_destroy_tp_req req = {0}; + struct ubase_ctrlq_msg msg = {0}; + int tmp_resp, ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UBASE_CTRLQ_OPC_DESTROY_TP; + msg.need_resp = 1; + msg.in_size = sizeof(req); + msg.in = &req; + msg.out_size = sizeof(tmp_resp); + msg.out = &tmp_resp; + + req.vl = (u8)vl; + req.trans_type = UBASE_TRANS_TYPE_UM_TP; + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret) { + ubase_err(udev, + "failed to send destroy tp tpg request, tpgn = %u, ret = %d.\n", + vl, ret); + return; + } + + ubase_wait_tp_flush_done_by_ctrlq(udev, vl); +} + +static void ubase_destroy_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 num) +{ + u32 idx; + + for (idx = 0; idx < num; idx++) + ubase_destroy_tp_tpg_by_ctrlq(udev, idx); +} + +static int ubase_create_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev) +{ + int ret; + u32 i; + + for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { + ret = ubase_create_tp_tpg_by_ctrlq(udev, i); + if (ret) { + ubase_err(udev, "failed to create tp tpg, tpgn = %u, ret = %d.\n", + i, ret); + goto err_create_tp_tpg; + } + } + + return 0; + +err_create_tp_tpg: + ubase_destroy_multi_tp_tpg_by_ctrlq(udev, i); + + return ret; +} + int ubase_dev_init_tp_tpg(struct ubase_dev *udev) { + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; + int ret; + if (!ubase_utp_supported(udev) || !ubase_dev_urma_supported(udev)) return 0; - return ubase_dev_init_rack_tp_tpg(udev); + spin_lock(&tp_ctx->tpg_lock); + tp_ctx->tpg = devm_kcalloc(udev->dev, unic_caps->tpg.max_cnt, + sizeof(struct ubase_tpg), GFP_ATOMIC); + if (!tp_ctx->tpg) { + spin_unlock(&tp_ctx->tpg_lock); + return -ENOMEM; + } + spin_unlock(&tp_ctx->tpg_lock); + + ret = ubase_create_multi_tp_tpg_by_ctrlq(udev); + if (ret) { + spin_lock(&tp_ctx->tpg_lock); + devm_kfree(udev->dev, tp_ctx->tpg); + tp_ctx->tpg = NULL; + spin_unlock(&tp_ctx->tpg_lock); + } + + return ret; } void ubase_dev_uninit_tp_tpg(struct ubase_dev *udev) { + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; + u32 num = unic_caps->tpg.max_cnt; + if (!ubase_utp_supported(udev) || !ubase_dev_urma_supported(udev)) return; - ubase_dev_uninit_rack_tp_tpg(udev); + if (!tp_ctx->tpg) + return; + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ubase_destroy_multi_tp_tpg_by_ctrlq(udev, num); + + spin_lock(&tp_ctx->tpg_lock); + devm_kfree(udev->dev, tp_ctx->tpg); + tp_ctx->tpg = NULL; + spin_unlock(&tp_ctx->tpg_lock); } diff --git a/drivers/ub/ubase/ubase_tp.h b/drivers/ub/ubase/ubase_tp.h index 0506e77c98f0..42a3cb4eb8c5 100644 --- a/drivers/ub/ubase/ubase_tp.h +++ b/drivers/ub/ubase/ubase_tp.h @@ -11,6 +11,8 @@ #include "ubase_dev.h" +#define UBASE_TRANS_TYPE_UM_TP 0x2 + #define UBASE_TP_PORT_BITMAP_STEP 2 #define UBASE_WAIT_TP_FLUSH_TOTAL_STEPS 12 diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index d364201f09c0..37950410345e 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -168,10 +168,8 @@ struct ubase_pmem_caps { * @jfs: jfs resource capabilities * @jfr: jfr resource capabilities * @jfc: jfc resource capabilities - * @tp: tp resource capabilities * @tpg: tp group resource capabilities * @pmem: physical memory capabilities - * @utp_port_bitmap: utp port bitmap * @jtg_max_cnt: jetty group max count * @rc_max_cnt: rc max count * @rc_que_depth: rc queue depth @@ -185,10 +183,8 @@ struct ubase_adev_caps { struct ubase_res_caps jfs; struct ubase_res_caps jfr; struct ubase_res_caps jfc; - struct ubase_res_caps tp; struct ubase_res_caps tpg; struct ubase_pmem_caps pmem; - u32 utp_port_bitmap; /* utp port bitmap */ u32 jtg_max_cnt; u32 rc_max_cnt; u32 rc_que_depth; @@ -222,8 +218,6 @@ struct ubase_ctx_buf_cap { * @jfc: jfc context buffer capability * @jtg: jetty group context buffer capability * @rc: rc context buffer capability - * @tp: tp context buffer capability - * @tpg: tp group context buffer capability */ struct ubase_ctx_buf { struct ubase_ctx_buf_cap jfs; @@ -231,9 +225,6 @@ struct ubase_ctx_buf { struct ubase_ctx_buf_cap jfc; struct ubase_ctx_buf_cap jtg; struct ubase_ctx_buf_cap rc; - - struct ubase_ctx_buf_cap tp; - struct ubase_ctx_buf_cap tpg; }; struct net_device; diff --git a/include/ub/ubase/ubase_comm_hw.h b/include/ub/ubase/ubase_comm_hw.h index 2efac24e3268..0a572b59fc10 100644 --- a/include/ub/ubase/ubase_comm_hw.h +++ b/include/ub/ubase/ubase_comm_hw.h @@ -17,6 +17,8 @@ #define UBASE_JFC_CTX_SIZE 128 #define UBASE_RC_CTX_SIZE 256 #define UBASE_JTG_CTX_SIZE 8 +#define UBASE_TP_CTX_SIZE 256 +#define UBASE_TPG_CTX_SIZE 64 #define UBASE_DESC_DATA_LEN 6 -- Gitee From 5218fc340a300e8297b19a4b097d055ffa354c3c Mon Sep 17 00:00:00 2001 From: Liming An Date: Tue, 9 Dec 2025 19:18:04 +0800 Subject: [PATCH 207/243] iommu/ummu: Delete unnecessary commands commit 8bc95ffa3088636af7537724f7f1b9e910e8b096 openEuler 1. The chip does not support the CMD_CFGI_TECTS_PIDM command on N6 and N7. 2. QEMU does not use the CMD_CFGI_TECTS_PIDM command. Fixes: 7de87cb06076 ("iommu/ummu: Support UMMU nested mode") Signed-off-by: Yanlong Zhu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/nested.c | 1 - drivers/iommu/hisilicon/queue.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/iommu/hisilicon/nested.c b/drivers/iommu/hisilicon/nested.c index 0eed1d9f8d25..23a233cbfa66 100644 --- a/drivers/iommu/hisilicon/nested.c +++ b/drivers/iommu/hisilicon/nested.c @@ -184,7 +184,6 @@ static int ummu_fix_user_cmd(struct ummu_device *ummu, case CMD_CFGI_TECT_RANGE: case CMD_CFGI_TCT: case CMD_CFGI_TCT_ALL: - case CMD_CFGI_TECTS_PIDM: cmd[2] &= ~CMD_CFGI_2_TECTE_TAG; cmd[2] |= FIELD_PREP(CMD_CFGI_2_TECTE_TAG, tecte_tag); break; diff --git a/drivers/iommu/hisilicon/queue.h b/drivers/iommu/hisilicon/queue.h index a1e3f28928ac..df82d58fac0f 100644 --- a/drivers/iommu/hisilicon/queue.h +++ b/drivers/iommu/hisilicon/queue.h @@ -204,7 +204,6 @@ struct ummu_mcmdq_ent { #define CMD_CFGI_TECT_RANGE 0x09 #define CMD_CFGI_TCT 0x0A #define CMD_CFGI_TCT_ALL 0x0B -#define CMD_CFGI_TECTS_PIDM 0x0C struct { bool leaf; u32 tid; -- Gitee From d6f152d601de7355583f750c745691f4c89d3ade Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 08:26:21 +0800 Subject: [PATCH 208/243] iommu/ummu: UMMU doesn't send tect sync after delete eid commit b451b1e0768d121cb16cd0ceae754cfffd4e0ace openEuler Fixes: 322b574b8b2a ("iommu/ummu: Add tct/tect ops for configuration table") Signed-off-by: Sihui Jiang Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/cfg_table.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/hisilicon/cfg_table.c b/drivers/iommu/hisilicon/cfg_table.c index 7c7a486a7bdd..31f9a81317c3 100644 --- a/drivers/iommu/hisilicon/cfg_table.c +++ b/drivers/iommu/hisilicon/cfg_table.c @@ -1174,7 +1174,12 @@ void ummu_build_s2_domain_tecte(struct ummu_domain *u_domain, static bool check_tecte_can_set(const struct ummu_tecte_data *tecte, const struct ummu_tecte_data *src) { - u32 st_mode = FIELD_GET(TECT_ENT0_ST_MODE, le64_to_cpu(tecte->data[0])); + u32 st_mode; + + if (!src->data[0]) + return true; + + st_mode = FIELD_GET(TECT_ENT0_ST_MODE, le64_to_cpu(tecte->data[0])); switch (st_mode) { case TECT_ENT0_ST_MODE_ABORT: @@ -1298,8 +1303,11 @@ void ummu_del_eid(struct ummu_core_device *core_dev, guid_t *guid, eid_t eid, en } ummu_device_delete_kvtbl(ummu, meta->tecte_tag, eid, kv_index); - if (kref_read(&meta->ref) == 1) + /* 2 indicates that only the last EID remains. */ + if (kref_read(&meta->ref) == 2) { ummu_device_write_tecte(ummu, meta->tecte_tag, &ummu_clear_tecte); + meta->valid = false; + } os_meta_del_eid(meta, eid); } -- Gitee From 499e5fbea7b6b5e43ce004618e13ea15992d046f Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 08:45:50 +0800 Subject: [PATCH 209/243] iommu/ummu: Fix compilation option configuration about CONFIG_ACPI commit 3a6d8249afbb244a4ce5308f9d2ea21f2ef2c1c9 openEuler Fixes: 0db2fc397b9d ("iommu/ummu: Support UMMU device") Signed-off-by: Jie Wang Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/perm_table.c | 16 +--------------- drivers/iommu/hisilicon/sva.h | 2 +- drivers/iommu/hisilicon/ummu_main.c | 10 ++++++++-- drivers/perf/hisilicon/ummu_pmu.c | 10 ++++++++-- drivers/ub/ubfi/ummu.c | 2 +- 5 files changed, 19 insertions(+), 21 deletions(-) diff --git a/drivers/iommu/hisilicon/perm_table.c b/drivers/iommu/hisilicon/perm_table.c index 3827fa7b017b..cc90d1e32364 100644 --- a/drivers/iommu/hisilicon/perm_table.c +++ b/drivers/iommu/hisilicon/perm_table.c @@ -75,20 +75,6 @@ static const u32 g_mapt_range_bits[MAPT_MAX_LVL_INDEX + 1][2] = { { 47, 39 }, (GET_BITS_MASK(g_mapt_range_bits[level][0] - \ g_mapt_range_bits[level][1] + 1))) -#define GET_LEVEL_INDEX_RANGE(base, limit, lvl, base_index, limit_index, \ - cross_level) \ - do { \ - (base_index) = GET_LEVEL_BLOCK_INDEX(base, lvl); \ - if ((limit) >> (g_mapt_range_bits[lvl][0] + 1) == \ - (base) >> (g_mapt_range_bits[lvl][0] + 1)) { \ - (limit_index) = GET_LEVEL_BLOCK_INDEX(limit, lvl); \ - cross_level = false; \ - } else { \ - (limit_index) = MAPT_MAX_ENTRY_INDEX - 1; \ - cross_level = true; \ - } \ - } while (0) - #define ENTRY_ADDR_LOW(addr) FIELD_GET(GENMASK(31, 0), (addr)) #define ENTRY_ADDR_HIGH(addr) FIELD_GET(GENMASK(47, 32), (addr)) @@ -945,7 +931,7 @@ static int ummu_table_clear_node_by_level(struct ummu_data_info *data_info, static int ummu_table_clear_head_node(struct ummu_data_info *data_info, u32 level, struct ummu_mapt_table_node *pre_node, struct ummu_mapt_table_node *cur_node, u64 node_base, - u64 node_limit) + u64 node_limit) { u16 loop_cnt, max_loop = MAPT_MAX_ENTRY_INDEX << MAPT_MAX_LVL_INDEX; u64 rest_node_base, cur_base, cur_limit; diff --git a/drivers/iommu/hisilicon/sva.h b/drivers/iommu/hisilicon/sva.h index e91fa1e11920..4b3189fcbcff 100644 --- a/drivers/iommu/hisilicon/sva.h +++ b/drivers/iommu/hisilicon/sva.h @@ -55,7 +55,7 @@ static inline int ummu_master_disable_sva(struct ummu_master *master, return -ENODEV; } -static void ummu_iopf_queue_free(struct ummu_device *ummu) +static inline void ummu_iopf_queue_free(struct ummu_device *ummu) { } diff --git a/drivers/iommu/hisilicon/ummu_main.c b/drivers/iommu/hisilicon/ummu_main.c index 53adf3d2fdcf..61e8f52a6a19 100644 --- a/drivers/iommu/hisilicon/ummu_main.c +++ b/drivers/iommu/hisilicon/ummu_main.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include "logic_ummu/logic_ummu.h" #include "ummu_impl.h" @@ -766,24 +768,28 @@ static void ummu_device_shutdown(struct platform_device *pdev) ummu_device_disable(ummu); } +#ifdef CONFIG_OF static const struct of_device_id hisi_ummu_of_match[] = { { .compatible = "ub,ummu", }, { } }; MODULE_DEVICE_TABLE(of, hisi_ummu_of_match); +#endif +#ifdef CONFIG_ACPI static const struct acpi_device_id hisi_ummu_acpi_match[] = { { "HISI0551", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, hisi_ummu_acpi_match); +#endif struct platform_driver ummu_driver = { .driver = { .name = UMMU_DRV_NAME, .suppress_bind_attrs = true, - .of_match_table = hisi_ummu_of_match, - .acpi_match_table = hisi_ummu_acpi_match, + .of_match_table = of_match_ptr(hisi_ummu_of_match), + .acpi_match_table = ACPI_PTR(hisi_ummu_acpi_match), }, .probe = ummu_device_probe, .remove = ummu_device_remove, diff --git a/drivers/perf/hisilicon/ummu_pmu.c b/drivers/perf/hisilicon/ummu_pmu.c index d145bcee10fa..69b79bf1efce 100644 --- a/drivers/perf/hisilicon/ummu_pmu.c +++ b/drivers/perf/hisilicon/ummu_pmu.c @@ -5,6 +5,8 @@ * Monitor Counter Groups (PMCG) associated with an UMMU node to monitor that node. */ +#include +#include #include #include #include @@ -1029,17 +1031,21 @@ static void ummu_pmu_shutdown(struct platform_device *pdev) ummu_pmu_disable(&ummu_pmu->pmu); } +#ifdef CONFIG_OF static const struct of_device_id hisi_ummu_pmu_of_match[] = { { .compatible = "ub,ummu_pmu", }, { } }; MODULE_DEVICE_TABLE(of, hisi_ummu_pmu_of_match); +#endif +#ifdef CONFIG_ACPI static const struct acpi_device_id hisi_ummu_pmu_acpi_match[] = { {"HISI0571", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, hisi_ummu_pmu_acpi_match); +#endif static ssize_t partid_store(struct device *kobj, struct device_attribute *attr, const char *buf, size_t count) @@ -1143,8 +1149,8 @@ static struct platform_driver ummu_pmu_driver = { .driver = { .name = UMMU_PMU_DRV_NAME, .suppress_bind_attrs = true, - .of_match_table = hisi_ummu_pmu_of_match, - .acpi_match_table = hisi_ummu_pmu_acpi_match, + .of_match_table = of_match_ptr(hisi_ummu_pmu_of_match), + .acpi_match_table = ACPI_PTR(hisi_ummu_pmu_acpi_match), .dev_groups = ummu_pmu_groups }, .probe = ummu_pmu_probe, diff --git a/drivers/ub/ubfi/ummu.c b/drivers/ub/ubfi/ummu.c index 93f6dcbf8aa6..a1f6dd61c51c 100644 --- a/drivers/ub/ubfi/ummu.c +++ b/drivers/ub/ubfi/ummu.c @@ -173,6 +173,7 @@ static int ummu_config_update(struct platform_device *pdev, return 0; } +#ifdef CONFIG_ACPI static acpi_status acpi_processor_ummu(acpi_handle handle, u32 lvl, void *context, void **rv) { @@ -239,7 +240,6 @@ static acpi_status acpi_processor_ummu(acpi_handle handle, u32 lvl, return status; } -#ifdef CONFIG_ACPI static int acpi_update_ummu_config(struct ummu_node *ummu_node, u32 index) { acpi_status status; -- Gitee From 5addc146f476f5b310e01ccd090469acf6ba78fa Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 10:33:13 +0800 Subject: [PATCH 210/243] iommu/ummu: NO plbi for grant, config permq need dma_wmb commit dc81fdc0cc1c6e5ee0c7f7eb53f66bcafafac115 openEuler Fixes: f8bb769aa5fd ("iommu/ummu: Add UMMU permission queue") Signed-off-by: Lizhi He Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/perm_queue.c | 1 + drivers/iommu/hisilicon/perm_table.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/iommu/hisilicon/perm_queue.c b/drivers/iommu/hisilicon/perm_queue.c index ae85563bce78..a9eed282ebfc 100644 --- a/drivers/iommu/hisilicon/perm_queue.c +++ b/drivers/iommu/hisilicon/perm_queue.c @@ -335,6 +335,7 @@ int ummu_domain_config_permq(struct ummu_domain *domain) domain->qid = qid; ummu_init_permq_ctxtbl_ent(domain, permq); + dma_wmb(); ummu_init_permq_ctrltbl_ent(ummu->ucmdq_ctrl_page, qid); return 0; diff --git a/drivers/iommu/hisilicon/perm_table.c b/drivers/iommu/hisilicon/perm_table.c index cc90d1e32364..f984ccd5a9e8 100644 --- a/drivers/iommu/hisilicon/perm_table.c +++ b/drivers/iommu/hisilicon/perm_table.c @@ -1239,6 +1239,11 @@ int ummu_perm_grant(struct iommu_domain *domain, void *va, size_t size, ret = ummu_update_info(data_info.op, mapt_info, &data_info); plb_gather->va = (void *)data_info.data_base; + if (data_info.op == UMMU_GRANT) + plb_gather->size = 0; + else + plb_gather->size = data_info.data_size; + plb_gather->size = data_info.data_size; data_info.tokenval = 0; return ret; -- Gitee From 787956d30d6c770f527bbbba5c1b1b7083953f33 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 10:49:24 +0800 Subject: [PATCH 211/243] iommu/ummu: Fix VM multi-instance problem commit 005bd47130be6f04dc315eb4322531714d1c64bf openEuler In the scenario of multiple virtual machine instances, there is an issue with traversing UMMU instances. Fixes: 7876e979bbdb ("iommu/ummu: Implement domain and core ops in logic UMMU framework") Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/iommu.c | 11 +++-------- drivers/iommu/hisilicon/logic_ummu/logic_ummu.c | 8 +++----- drivers/iommu/hisilicon/ummu.h | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/hisilicon/iommu.c b/drivers/iommu/hisilicon/iommu.c index 2a50d7bed835..b1f694ad10c3 100644 --- a/drivers/iommu/hisilicon/iommu.c +++ b/drivers/iommu/hisilicon/iommu.c @@ -676,7 +676,7 @@ static void ummu_cfg_sync(struct ummu_base_domain *base_domain) else u_domain = to_ummu_domain(&base_domain->domain); - ummu = core_to_ummu_device(u_domain->base_domain.core_dev); + ummu = core_to_ummu_device(base_domain->core_dev); tag = u_domain->cfgs.tecte_tag; tid = u_domain->base_domain.tid; @@ -713,18 +713,13 @@ static int ummu_sync_dom_cfg(struct ummu_base_domain *src, dst_domain->cfgs.tecte_tag = src_domain->cfgs.tecte_tag; dst_domain->cfgs.stage = src_domain->cfgs.stage; break; - case SYNC_NESTED_DOM_MUTI_CFG: - src_domain = to_nested_domain(&src->domain)->s2_parent; - dst_domain = to_nested_domain(&dst->domain)->s2_parent; - dst_domain->base_domain.tid = src_domain->base_domain.tid; - dst_domain->cfgs.tecte_tag = src_domain->cfgs.tecte_tag; - dst_domain->cfgs.stage = src_domain->cfgs.stage; - break; case SYNC_CLEAR_DOM_ALL_CFG: dst_domain = to_ummu_domain(&dst->domain); memset(&dst_domain->cfgs, 0, sizeof(dst_domain->cfgs)); dst_domain->base_domain.tid = UMMU_INVALID_TID; break; + case SYNC_TYPE_NONE: + break; default: return -EINVAL; } diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index fd66ca5fa05f..b5cd7999e9ab 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -192,11 +192,11 @@ static int logic_ummu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct logic_ummu_domain *logic_domain = iommu_to_logic_domain(domain); + const struct ummu_device_helper *helper = get_agent_helper(); const struct ummu_core_ops *core_ops = get_agent_core_ops(); struct ummu_base_domain *ummu_base_domain, *agent_domain; - const struct ummu_device_helper *helper = get_agent_helper(); + enum ummu_dom_cfg_sync_type sync_type = SYNC_TYPE_NONE; const struct iommu_domain_ops *ops; - enum ummu_dom_cfg_sync_type sync_type; int ret; agent_domain = logic_domain->agent_domain; @@ -217,9 +217,7 @@ static int logic_ummu_attach_dev(struct iommu_domain *domain, } /* the domain attributes might be changed, sync to logic domain */ logic_domain_update_attr(logic_domain); - if (domain->type == IOMMU_DOMAIN_NESTED) - sync_type = SYNC_NESTED_DOM_MUTI_CFG; - else + if (domain->type != IOMMU_DOMAIN_NESTED) sync_type = SYNC_DOM_MUTI_CFG; list_for_each_entry(ummu_base_domain, &logic_domain->base_domain.list, diff --git a/drivers/iommu/hisilicon/ummu.h b/drivers/iommu/hisilicon/ummu.h index 95ff2c927742..a378e39ce93b 100644 --- a/drivers/iommu/hisilicon/ummu.h +++ b/drivers/iommu/hisilicon/ummu.h @@ -240,9 +240,9 @@ struct ummu_hash_table_cfg { /* ummu device inner helper functions */ enum ummu_dom_cfg_sync_type { + SYNC_TYPE_NONE, SYNC_DOM_ALL_CFG, SYNC_DOM_MUTI_CFG, - SYNC_NESTED_DOM_MUTI_CFG, SYNC_CLEAR_DOM_ALL_CFG, }; -- Gitee From 68e673e5d6d1f5cf326d0b21e754148f3f3837e3 Mon Sep 17 00:00:00 2001 From: Jiashun Wang Date: Mon, 1 Dec 2025 14:38:50 +0800 Subject: [PATCH 212/243] iommu/ummu: Optimize chip generational compatibility feature commit d0e9af046f4d46c4941d2a3a02b0addbd22d02ef openEuler To accommodate the functionalities of different generations of chips, some redundant fields have been designed in the struct ummu_device. These additional features help reduce the bloated data members in the struct ummu_device. Fixes: 0db2fc397b9d ("iommu/ummu: Support UMMU device") Signed-off-by: Jiashun Wang Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/cfg_table.c | 2 +- drivers/iommu/hisilicon/flush.c | 20 +++++++------ drivers/iommu/hisilicon/nested.c | 4 +-- drivers/iommu/hisilicon/perm_queue.c | 9 +++--- drivers/iommu/hisilicon/perm_queue.h | 3 ++ drivers/iommu/hisilicon/perm_table.c | 2 +- drivers/iommu/hisilicon/queue.c | 3 +- drivers/iommu/hisilicon/sva.c | 2 +- drivers/iommu/hisilicon/ummu.h | 11 ++----- drivers/iommu/hisilicon/ummu_main.c | 45 +++++++++++++++++----------- 10 files changed, 55 insertions(+), 46 deletions(-) diff --git a/drivers/iommu/hisilicon/cfg_table.c b/drivers/iommu/hisilicon/cfg_table.c index 31f9a81317c3..fe208646a8f3 100644 --- a/drivers/iommu/hisilicon/cfg_table.c +++ b/drivers/iommu/hisilicon/cfg_table.c @@ -1125,7 +1125,7 @@ static void ummu_device_make_default_tecte(struct ummu_device *ummu, TECT_ENT0_TCR_EL2 : TECT_ENT0_TCR_NSEL1; target->data[0] = cpu_to_le64( TECT_ENT0_V | FIELD_PREP(TECT_ENT0_TCRC_SEL, tcr_sel) | - (ummu->cap.support_mapt ? TECT_ENT0_MAPT_EN : 0) | + ((ummu->cap.features & UMMU_FEAT_MAPT) ? TECT_ENT0_MAPT_EN : 0) | FIELD_PREP(TECT_ENT0_ST_MODE, TECT_ENT0_ST_MODE_S1) | FIELD_PREP(TECT_ENT0_PRIV_SEL, TECT_ENT0_PRIV_SEL_PRIV)); diff --git a/drivers/iommu/hisilicon/flush.c b/drivers/iommu/hisilicon/flush.c index ea2aa23100d3..064f964b51be 100644 --- a/drivers/iommu/hisilicon/flush.c +++ b/drivers/iommu/hisilicon/flush.c @@ -328,17 +328,19 @@ void ummu_sync_tct(struct ummu_device *ummu, u32 tecte_tag, u32 tid, .deid_0 = tecte_tag, }, }; - struct ummu_mcmdq_ent cmd_plbi_all = { - .opcode = CMD_PLBI_OS_EIDTID, - .plbi = { - .tid = tid, - .tecte_tag = tecte_tag, - }, - }; - trace_ummu_sync_tct(dev_name(ummu->dev), tecte_tag, tid, leaf); - if (!ummu->cap.prod_ver) + if (ummu->cap.options & UMMU_OPT_SYNC_WITH_PLBI) { + struct ummu_mcmdq_ent cmd_plbi_all = { + .opcode = CMD_PLBI_OS_EIDTID, + .plbi = { + .tid = tid, + .tecte_tag = tecte_tag, + }, + }; ummu_mcmdq_issue_cmd(ummu, &cmd_plbi_all); + } + + trace_ummu_sync_tct(dev_name(ummu->dev), tecte_tag, tid, leaf); ummu_mcmdq_issue_cmd_with_sync(ummu, &cmd_cfgi_tct); } diff --git a/drivers/iommu/hisilicon/nested.c b/drivers/iommu/hisilicon/nested.c index 23a233cbfa66..f7804811a5ed 100644 --- a/drivers/iommu/hisilicon/nested.c +++ b/drivers/iommu/hisilicon/nested.c @@ -31,8 +31,8 @@ static void ummu_build_nested_domain_tct(struct ummu_domain *u_domain, tcr_sel = (ummu->cap.features & UMMU_FEAT_E2H) ? TECT_ENT0_TCR_EL2 : TECT_ENT0_TCR_NSEL1; target->data[0] |= cpu_to_le64( - FIELD_PREP(TECT_ENT0_TCRC_SEL, tcr_sel) | - (ummu->cap.support_mapt ? TECT_ENT0_MAPT_EN : 0)); + FIELD_PREP(TECT_ENT0_TCRC_SEL, tcr_sel) | + ((ummu->cap.features & UMMU_FEAT_MAPT) ? TECT_ENT0_MAPT_EN : 0)); } static void ummu_build_nested_domain_tecte( diff --git a/drivers/iommu/hisilicon/perm_queue.c b/drivers/iommu/hisilicon/perm_queue.c index a9eed282ebfc..2e80dec5eaf9 100644 --- a/drivers/iommu/hisilicon/perm_queue.c +++ b/drivers/iommu/hisilicon/perm_queue.c @@ -12,10 +12,7 @@ #include "regs.h" #include "perm_queue.h" -#define PCMDQ_ENT_BYTES 16U -#define PCPLQ_ENT_BYTES 4U #define PERMQ_CTXTBL_BYTES 64U - #define PERMQ_CTXTBL_STATUS GENMASK(1, 0) #define PERMQ_CTXTBL_RESET 0x0 #define PERMQ_CTXTBL_READY 0x1 @@ -44,8 +41,10 @@ void ummu_device_uninit_permqs(struct ummu_device *ummu) { - if (ummu->cap.support_mapt) - xa_destroy(&ummu->permq_ctx_cfg.permq_xa); + if (!(ummu->cap.features & UMMU_FEAT_MAPT)) + return; + + xa_destroy(&ummu->permq_ctx_cfg.permq_xa); mutex_destroy(&ummu->permq_ctx_cfg.permq_rel_mutex); } diff --git a/drivers/iommu/hisilicon/perm_queue.h b/drivers/iommu/hisilicon/perm_queue.h index d3e8c580e5b6..6f9ac919af29 100644 --- a/drivers/iommu/hisilicon/perm_queue.h +++ b/drivers/iommu/hisilicon/perm_queue.h @@ -10,6 +10,9 @@ #define UMMU_INVALID_QID ((u32)-1) +#define PCMDQ_ENT_BYTES 16U +#define PCPLQ_ENT_BYTES 4U + #define PQ_WRAP(idx, size) ((idx) & (size)) #define PQ_IDX(idx, size) ((idx) & ((size) - 1)) diff --git a/drivers/iommu/hisilicon/perm_table.c b/drivers/iommu/hisilicon/perm_table.c index f984ccd5a9e8..ec14575b3cba 100644 --- a/drivers/iommu/hisilicon/perm_table.c +++ b/drivers/iommu/hisilicon/perm_table.c @@ -203,7 +203,7 @@ static int ummu_alloc_mapt_mem_for_table(struct ummu_domain *ummu_domain, goto err_out; } - if (ummu->cap.prod_ver == NO_PROD_ID) { + if (ummu->cap.options & UMMU_OPT_CHK_MAPT_CONTINUITY) { ret = ummu_device_check_pa_continuity(ummu, virt_to_phys(alloc_ptr), PAGE_ORDER_TO_MAPT_ORDER(blk_para->block_size_order), diff --git a/drivers/iommu/hisilicon/queue.c b/drivers/iommu/hisilicon/queue.c index 1f23c54734c6..7d3640e6e9f3 100644 --- a/drivers/iommu/hisilicon/queue.c +++ b/drivers/iommu/hisilicon/queue.c @@ -251,7 +251,8 @@ static int ummu_mcmdq_init(struct ummu_device *ummu) int cpu, ret; ummu->nr_mcmdq = 1UL << ummu->cap.mcmdq_log2num; - ummu->nr_mcmdq -= 1; + if (ummu->cap.options & UMMU_OPT_MCMDQ_DECREASE) + ummu->nr_mcmdq -= 1; shift = order_base_2(num_possible_cpus() / ummu->nr_mcmdq); ummu->mcmdq = devm_alloc_percpu(ummu->dev, struct ummu_mcmdq *); diff --git a/drivers/iommu/hisilicon/sva.c b/drivers/iommu/hisilicon/sva.c index f9a63544ace0..192771e2304d 100644 --- a/drivers/iommu/hisilicon/sva.c +++ b/drivers/iommu/hisilicon/sva.c @@ -313,7 +313,7 @@ static int ummu_sva_collect_domain_cfg(struct ummu_domain *domain, ioasid_t id) domain->cfgs.sva_mode = UMMU_MODE_SVA; } - if (ummu->cap.support_mapt && + if ((ummu->cap.features & UMMU_FEAT_MAPT) && domain->cfgs.sva_mode != UMMU_MODE_SVA_DISABLE_PTB) { domain->cfgs.s1_cfg.io_pt_cfg.mode = mode; if (!ksva) { diff --git a/drivers/iommu/hisilicon/ummu.h b/drivers/iommu/hisilicon/ummu.h index a378e39ce93b..ea391498c97b 100644 --- a/drivers/iommu/hisilicon/ummu.h +++ b/drivers/iommu/hisilicon/ummu.h @@ -31,11 +31,6 @@ struct ummu_l1_tct_desc { phys_addr_t l2ptr_phys; }; -enum ummu_ver { - NO_PROD_ID = 0, - MAX_VER, -}; - enum ummu_device_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, @@ -172,7 +167,6 @@ struct ummu_capability { #define UMMU_FEAT_TOKEN_CHK BIT(26) #define UMMU_FEAT_PERMQ BIT(27) #define UMMU_FEAT_NESTING BIT(28) - u32 features; u32 deid_bits; u32 tid_bits; @@ -183,6 +177,9 @@ struct ummu_capability { #define UMMU_OPT_MSIPOLL (1UL << 0) #define UMMU_OPT_DOUBLE_PLBI (1UL << 1) #define UMMU_OPT_KCMD_PLBI (1UL << 2) +#define UMMU_OPT_CHK_MAPT_CONTINUITY (1UL << 3) +#define UMMU_OPT_MCMDQ_DECREASE (1UL << 4) +#define UMMU_OPT_SYNC_WITH_PLBI (1UL << 5) u32 options; #define UMMU_MAX_ASIDS (1UL << 16) @@ -190,7 +187,6 @@ struct ummu_capability { #define UMMU_MAX_VMIDS (1UL << 16) unsigned int vmid_bits; - bool support_mapt; u32 mcmdq_log2num; u32 mcmdq_log2size; u32 evtq_log2num; @@ -202,7 +198,6 @@ struct ummu_capability { } permq_ent_num; u32 mtm_gp_max; u32 mtm_id_max; - u16 prod_ver; }; struct ummu_permq_addr { diff --git a/drivers/iommu/hisilicon/ummu_main.c b/drivers/iommu/hisilicon/ummu_main.c index 61e8f52a6a19..14d6e08b2c2c 100644 --- a/drivers/iommu/hisilicon/ummu_main.c +++ b/drivers/iommu/hisilicon/ummu_main.c @@ -30,7 +30,7 @@ #define UMMU_DRV_NAME "ummu" #define HISI_VENDOR_ID 0xCC08 -static bool ummu_special_identify; +static u16 ummu_chip_identifier; int ummu_write_reg_sync(struct ummu_device *ummu, u32 val, u32 reg_off, u32 ack_off) @@ -125,7 +125,7 @@ static int ummu_init_structures(struct ummu_device *ummu) if (ret) goto resource_release; - if (ummu->cap.support_mapt) { + if (ummu->cap.features & UMMU_FEAT_MAPT) { /* ctrl page is private for every ummu hardware */ ummu_device_init_permq_ctrl_page(ummu); /* ctx table is common for every ummu hardware */ @@ -141,21 +141,26 @@ static int ummu_init_structures(struct ummu_device *ummu) return ret; } -static void ummu_device_hw_probe_ver(struct ummu_device *ummu) +static void ummu_device_hw_probe_iidr(struct ummu_device *ummu) { u32 reg = readl_relaxed(ummu->base + UMMU_IIDR); - ummu->cap.prod_ver = (u16)FIELD_GET(IIDR_PROD_ID, reg); /* - * On the hisi chip with IIDR_PROD_ID set to 0, - * ummu enables special_identify to perform some - * specialized operations. + * In the 1st generation On the hisi chip, IIDR_PROD_ID is set to 0, + * ummu enables chip_identifier to perform some specialized operations. */ - if (ummu_special_identify && !ummu->cap.prod_ver) { + if ((ummu_chip_identifier == HISI_VENDOR_ID) && + !FIELD_GET(IIDR_PROD_ID, reg)) { ummu->cap.options |= UMMU_OPT_DOUBLE_PLBI; ummu->cap.options |= UMMU_OPT_KCMD_PLBI; + ummu->cap.options |= UMMU_OPT_CHK_MAPT_CONTINUITY; + ummu->cap.options |= UMMU_OPT_MCMDQ_DECREASE; + ummu->cap.options |= UMMU_OPT_SYNC_WITH_PLBI; ummu->cap.features &= ~UMMU_FEAT_STALLS; } + + dev_notice(ummu->dev, "features 0x%08x, options 0x%08x.\n", + ummu->cap.features, ummu->cap.options); } static void ummu_device_hw_probe_cap0(struct ummu_device *ummu) @@ -178,7 +183,8 @@ static void ummu_device_hw_probe_cap0(struct ummu_device *ummu) ubrt_pasids = ummu->core_dev.iommu.max_pasids; cap_pasids = 1 << ummu->cap.tid_bits; if (ubrt_pasids > cap_pasids) - pr_warn("ubrt max_pasids[%u] beyond capacity.\n", ubrt_pasids); + dev_warn(ummu->dev, "ubrt max_pasids[%u] beyond capacity.\n", + ubrt_pasids); pasids = min(cap_pasids, (1UL << UB_MAX_TID_BITS)); ummu->core_dev.iommu.max_pasids = min(ubrt_pasids, pasids); /* TECTE_TAG size */ @@ -434,10 +440,14 @@ static int ummu_device_hw_probe_cap4(struct ummu_device *ummu) int hw_permq_ent; hw_permq_ent = 1 << FIELD_GET(CAP4_UCMDQ_LOG2SIZE, reg); - ummu->cap.permq_ent_num.cmdq_num = hw_permq_ent; + ummu->cap.permq_ent_num.cmdq_num = + min_t(int, round_up(PAGE_SIZE / PCMDQ_ENT_BYTES, PCMDQ_ENT_BYTES), + hw_permq_ent); hw_permq_ent = 1 << FIELD_GET(CAP4_UCPLQ_LOG2SIZE, reg); - ummu->cap.permq_ent_num.cplq_num = hw_permq_ent; + ummu->cap.permq_ent_num.cplq_num = + min_t(int, round_up(PAGE_SIZE / PCPLQ_ENT_BYTES, PCPLQ_ENT_BYTES), + hw_permq_ent); if (ummu->impl_ops && ummu->impl_ops->hw_probe) return ummu->impl_ops->hw_probe(ummu); @@ -452,7 +462,7 @@ static void ummu_device_hw_probe_cap5(struct ummu_device *ummu) ummu->cap.features |= UMMU_FEAT_RANGE_PLBI; if (reg & CAP5_MAPT_SUPPORT) - ummu->cap.support_mapt = true; + ummu->cap.features |= UMMU_FEAT_MAPT; if (reg & CAP5_PT_GRAN4K_BIT) ummu->cap.ptsize_bitmap |= SZ_4K; @@ -472,8 +482,8 @@ static void ummu_device_hw_probe_cap5(struct ummu_device *ummu) if (ummu_sva_supported(ummu)) ummu->cap.features |= UMMU_FEAT_SVA; - dev_info(ummu->dev, "ias = %u-bit, oas = %u-bit, features = 0x%08x.\n", - ummu->cap.ias, ummu->cap.oas, ummu->cap.features); + dev_info(ummu->dev, "ias %u-bit, oas %u-bit.\n", + ummu->cap.ias, ummu->cap.oas); } static void ummu_device_hw_probe_cap6(struct ummu_device *ummu) @@ -510,7 +520,7 @@ static int ummu_device_hw_init(struct ummu_device *ummu) ummu_device_hw_probe_cap5(ummu); ummu_device_hw_probe_cap6(ummu); - ummu_device_hw_probe_ver(ummu); + ummu_device_hw_probe_iidr(ummu); return 0; } @@ -603,7 +613,7 @@ static int ummu_device_reset(struct ummu_device *ummu) if (ret) return ret; - if (ummu->cap.support_mapt) { + if (ummu->cap.features & UMMU_FEAT_MAPT) { ummu_device_set_permq_ctxtbl(ummu); ret = ummu_device_mapt_enable(ummu); if (ret) @@ -646,8 +656,7 @@ static int ummu_device_ubrt_probe(struct ummu_device *ummu) } node = (struct ummu_node *)fw->ubrt_node; - if (node->vendor_id == HISI_VENDOR_ID) - ummu_special_identify = true; + ummu_chip_identifier = node->vendor_id; ummu->core_dev.iommu.min_pasids = node->min_tid; ummu->core_dev.iommu.max_pasids = node->max_tid; -- Gitee From c3110b8097bd2b8f2839d3fc6dabb267704fc9a4 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 13:24:15 +0800 Subject: [PATCH 213/243] iommu/ummu: Fixing the issue of uninitialized resources in logic_ummu_viommu commit 30d93a98226eee1c07a8b40a78f45c75d8bd6c8e openEuler Fixes: 7876e979bbdb ("iommu/ummu: Implement domain and core ops in logic UMMU framework") Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- .../iommu/hisilicon/logic_ummu/logic_ummu.c | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index b5cd7999e9ab..7dad1e2f4ca8 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -21,11 +21,6 @@ #include "../ummu_cfg_v1.h" #include "logic_ummu.h" -struct logic_ummu_domain { - struct ummu_base_domain base_domain; - struct ummu_base_domain *agent_domain; -}; - struct logic_ummu_device { struct ummu_core_device core_dev; struct ummu_device *agent_device; @@ -41,6 +36,12 @@ struct logic_ummu_viommu { struct iommu_domain *nested; }; +struct logic_ummu_domain { + struct ummu_base_domain base_domain; + struct ummu_base_domain *agent_domain; + struct logic_ummu_viommu *logic_viommu; +}; + struct eid_info { enum eid_type type; eid_t eid; @@ -514,10 +515,12 @@ static void logic_ummu_free(struct iommu_domain *domain) return; } - if (domain->type != IOMMU_DOMAIN_NESTED) + if (domain->type != IOMMU_DOMAIN_NESTED) { logic_domain_free(logic_domain, ops); - else + } else { logic_nested_domain_free(logic_domain, ops); + logic_domain->logic_viommu->nested = NULL; + } kfree(logic_domain); } @@ -1018,6 +1021,7 @@ logic_ummu_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, } } logic_vummu->nested = &logic_domain->base_domain.domain; + logic_domain->logic_viommu = logic_vummu; return &logic_domain->base_domain.domain; error_handle: list_for_each_entry_safe(nested_base_domain, iter, &logic_domain->base_domain.list, list) { @@ -1040,8 +1044,10 @@ logic_ummu_viommu_cache_invalidate(struct iommufd_viommu *viommu, u32 cmd_num, succ_cnt; int err, ret = 0; - if (!logic_vummu->nested || !array) + if (!logic_vummu->nested || !array) { + pr_debug("invalid viommu.\n"); return -EINVAL; + } if (!helper || !helper->cache_invalidate_user) return -EOPNOTSUPP; -- Gitee From 3626b88edc81d378703e283f0efe65e8ed016d34 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 13:45:06 +0800 Subject: [PATCH 214/243] iommu/ummu: Move tid_type attr to logic ummu commit 11e53fb0223606e325a54134bc3bdfc491d61bf3 openEuler 1. Move tid_type attr to logic ummu. 2. Modify the device IDs of the UMMU and PMU. Fixes: 1dc959aea0fd ("iommu/ummu: Support UMMU attribute show and store operations") Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- .../ABI/testing/sysfs-class-iommu-ummu-iommu | 2 +- drivers/iommu/hisilicon/attribute.c | 39 ------------- .../iommu/hisilicon/logic_ummu/logic_ummu.c | 57 ++++++++++++++++++- drivers/ub/ubfi/ummu.c | 9 ++- 4 files changed, 61 insertions(+), 46 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu index 48ba4d6d4c60..e5b576672af8 100644 --- a/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu @@ -104,7 +104,7 @@ Contact: Jingbin Wu Description: Maximum TokenID bit width supported in non-secure state. -What: /sys/class/iommu/ummu./ummu-iommu/tid_type +What: /sys/class/iommu/logic_ummu/tid_type Date: Oct 2025 KernelVersion: 6.6 Contact: Jingbin Wu diff --git a/drivers/iommu/hisilicon/attribute.c b/drivers/iommu/hisilicon/attribute.c index adb360ea541c..3ee3c523bb41 100644 --- a/drivers/iommu/hisilicon/attribute.c +++ b/drivers/iommu/hisilicon/attribute.c @@ -187,44 +187,6 @@ static ssize_t eid_list_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(eid_list); -static const char *get_domain_type_str(u32 domain_type) -{ - switch (domain_type) { - case IOMMU_DOMAIN_DMA: - return "IOMMU_DOMAIN_DMA"; - case IOMMU_DOMAIN_SVA: - return "IOMMU_DOMAIN_SVA"; - default: - return "UNKNOWN DOMAIN TYPE"; - } -} - -static ssize_t tid_type_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ummu_core_device *ummu_core; - u32 tid = 0, tid_type; - int ret; - - ret = kstrtouint(buf, 0, &tid); - if (ret < 0 || tid >= UMMU_INVALID_TID) - return -EINVAL; - - ummu_core = to_ummu_core(dev_to_iommu_device(dev)); - ret = ummu_core_get_tid_type(ummu_core, tid, &tid_type); - if (ret) { - pr_err("Invalid tid = 0x%x, ret = %d.\n", tid, ret); - return ret; - } - - pr_info("tid = 0x%x, domain_type = %s.\n", tid, - get_domain_type_str(tid_type)); - - return (ssize_t)count; -} -static DEVICE_ATTR_WO(tid_type); - static struct attribute *ummu_iommu_attrs[] = { &dev_attr_features.attr, &dev_attr_tid_bits.attr, @@ -240,7 +202,6 @@ static struct attribute *ummu_iommu_attrs[] = { &dev_attr_permq_num.attr, &dev_attr_permq_ent_num.attr, &dev_attr_eid_list.attr, - &dev_attr_tid_type.attr, NULL, }; diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index 7dad1e2f4ca8..ef33014e48ce 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -2064,6 +2064,61 @@ static inline struct fwnode_handle *logic_ummu_alloc_fwnode_static(void) return handle; } +static const char *get_domain_type_str(u32 domain_type) +{ + switch (domain_type) { + case IOMMU_DOMAIN_DMA: + return "IOMMU_DOMAIN_DMA"; + case IOMMU_DOMAIN_IDENTITY: + return "IOMMU_DOMAIN_IDENTITY"; + case IOMMU_DOMAIN_SVA: + return "IOMMU_DOMAIN_SVA"; + default: + return "UNKNOWN DOMAIN TYPE"; + } +} + +static ssize_t tid_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ummu_core_device *ummu_core; + u32 tid = 0, tid_type; + int ret; + + ret = kstrtouint(buf, 0, &tid); + if (ret < 0 || tid >= UMMU_INVALID_TID) + return -EINVAL; + + ummu_core = to_ummu_core(dev_to_iommu_device(dev)); + ret = ummu_core_get_tid_type(ummu_core, tid, &tid_type); + if (ret) { + pr_err("Invalid tid = 0x%x, ret = %d.\n", tid, ret); + return ret; + } + + pr_info("tid = 0x%x, domain_type = %s.\n", tid, + get_domain_type_str(tid_type)); + + return (ssize_t)count; +} +static DEVICE_ATTR_WO(tid_type); + +static struct attribute *logic_ummu_attrs[] = { + &dev_attr_tid_type.attr, + NULL, +}; + +static struct attribute_group logic_ummu_group = { + .name = NULL, + .attrs = logic_ummu_attrs, +}; + +const struct attribute_group *logic_ummu_groups[] = { + &logic_ummu_group, + NULL, +}; + int logic_ummu_device_init(void) { int ret; @@ -2088,7 +2143,7 @@ int logic_ummu_device_init(void) pr_err("add logic ummu device failed\n"); goto out_free_fwnode; } - ret = iommu_device_sysfs_add(&logic_ummu.core_dev.iommu, NULL, NULL, + ret = iommu_device_sysfs_add(&logic_ummu.core_dev.iommu, NULL, logic_ummu_groups, "%s", "logic_ummu"); if (ret) { pr_err("register logic ummu to sysfs failed.\n"); diff --git a/drivers/ub/ubfi/ummu.c b/drivers/ub/ubfi/ummu.c index a1f6dd61c51c..b1e3618b8318 100644 --- a/drivers/ub/ubfi/ummu.c +++ b/drivers/ub/ubfi/ummu.c @@ -109,14 +109,15 @@ static int __init ummu_add_resources(struct platform_device *pdev, static int ummu_rename_device(struct platform_device *pdev, enum ubrt_node_type type) { - static int device_count; + static int device_ummu_count; + static int device_pmu_count; char new_name[32]; int ret; if (type == UBRT_UMMU) - ret = snprintf(new_name, sizeof(new_name), "ummu.%d", device_count); + ret = snprintf(new_name, sizeof(new_name), "ummu.%d", device_ummu_count++); else - ret = snprintf(new_name, sizeof(new_name), "ummu_pmu.%d", device_count); + ret = snprintf(new_name, sizeof(new_name), "ummu_pmu.%d", device_pmu_count++); if (ret < 0 || ret >= sizeof(new_name)) { dev_err(&pdev->dev, "failed to generate new device name\n"); @@ -130,8 +131,6 @@ static int ummu_rename_device(struct platform_device *pdev, enum ubrt_node_type } pdev->name = pdev->dev.kobj.name; - device_count++; - return 0; } -- Gitee From 9111296b6b9b719405f9e8e07429f290e5dab155 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 13:55:58 +0800 Subject: [PATCH 215/243] iommu/ummu-core: Duplicate EID are not allowed commit 304f42ff1e9f79bcc1bc06598a646ea1140ee014 openEuler GUID represents a globally unique identifier, while EID is a dynamically assigned unique identifier that may be reused. In certain usage scenarios, if the HOST's GUID is all zeros, it will bind to two different eid values, both of type EID_NONE. For a Virtual Machine, the GUID will only bind to one unique EID, with the type being EID_BYPASS. Fixes: 2778c6bb9286 ("iommu/ummu-core: add UMMU EID operation interfaces") Signed-off-by: Yanlong Zhu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/ummu-core/core_eid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/hisilicon/ummu-core/core_eid.c b/drivers/iommu/hisilicon/ummu-core/core_eid.c index fd3360d400d0..9877bb892904 100644 --- a/drivers/iommu/hisilicon/ummu-core/core_eid.c +++ b/drivers/iommu/hisilicon/ummu-core/core_eid.c @@ -55,7 +55,7 @@ int ummu_core_add_eid(guid_t *guid, eid_t eid, enum eid_type type) /* cached the eid */ spin_lock(&eid_func_lock); list_for_each_entry(info, &eid_pre_insmode, list) - if (guid_equal(guid, &info->guid) && info->eid == eid) { + if (info->eid == eid) { ret = -EEXIST; goto out_unlock_spin; } @@ -95,7 +95,7 @@ void ummu_core_del_eid(guid_t *guid, eid_t eid, enum eid_type type) /* uncache the eid */ spin_lock(&eid_func_lock); list_for_each_entry_safe(info, next, &eid_pre_insmode, list) - if (guid_equal(guid, &info->guid) && info->eid == eid) { + if (info->eid == eid) { list_del(&info->list); kfree(info); } -- Gitee From c5a70d8b12c0ab70653d41e572328d3f7032f7b2 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 17:52:16 +0800 Subject: [PATCH 216/243] iommu/ummu: Remove redundant CONFIG_UB_UBRT_PLAT_DEV and code commit fa49aa1b3104a1abe0a76d4f3c82994a33447382 openEuler In the ACPI method, during the process of UMMU requesting interrupts, the interrupt ID can be directly obtained from the IORT table, eliminating the need to obtain it from the UBR table. Therefore, the CONFIG_UB_UBRT_PLAT_DEV and ubrt_pmsi_get_interrupt_id() functions, which were used to obtain the interrupt ID from the UBR table, have become redundant code. Fixes: 010c6364261c ("ub: ubfi: Parsing ummu node in the ubrt table") Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- arch/arm64/configs/tencent.config | 1 - drivers/iommu/hisilicon/Kconfig | 2 +- drivers/irqchip/irq-gic-v3-its-platform-msi.c | 7 ++--- drivers/ub/ubfi/Kconfig | 12 -------- drivers/ub/ubfi/irq.c | 29 ------------------- include/ub/ubfi/ubfi.h | 7 ----- 6 files changed, 3 insertions(+), 55 deletions(-) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 7ca1fa88f877..3fe817902154 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1819,7 +1819,6 @@ CONFIG_UB_UBUS=y # UB Bus Core Driver CONFIG_UB_UBUS_BUS=m CONFIG_UB_UBFI=m -CONFIG_UB_UBRT_PLAT_DEV=y CONFIG_UB_UBUS_USI=y CONFIG_ARM_GIC_V3_ITS_UBUS=y CONFIG_VFIO_UB=m diff --git a/drivers/iommu/hisilicon/Kconfig b/drivers/iommu/hisilicon/Kconfig index e41f492a7ca6..60e612b57ca6 100644 --- a/drivers/iommu/hisilicon/Kconfig +++ b/drivers/iommu/hisilicon/Kconfig @@ -18,7 +18,7 @@ config UB_UMMU_BASE config UB_UMMU tristate "Hisilicon UB MMU Support" depends on ARM64 && ARCH_HISI - depends on UB_UBUS && UB_UBFI && UB_UBRT_PLAT_DEV + depends on UB_UBUS && UB_UBFI default n select IOMMU_API select IOMMU_IO_PGTABLE_LPAE diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 1ca7ef6186a2..884b088f4873 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -80,13 +80,10 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, } #endif - if (dev->of_node) { + if (dev->of_node) ret = of_pmsi_get_dev_id(domain, dev, &dev_id); - } else { + else ret = iort_pmsi_get_dev_id(dev, &dev_id); - if (ret) - ret = ubrt_pmsi_get_interrupt_id(dev, &dev_id); - } if (ret) return ret; diff --git a/drivers/ub/ubfi/Kconfig b/drivers/ub/ubfi/Kconfig index d3889afb2452..4cb8a264fe9a 100644 --- a/drivers/ub/ubfi/Kconfig +++ b/drivers/ub/ubfi/Kconfig @@ -15,16 +15,4 @@ config UB_UBFI within Linux. To compile this driver as a module, choose M here. Say 'M' here unless you know what you are doing -config UB_UBRT_PLAT_DEV - bool "Enable UBRT platform device support" - depends on UB_UBUS - default n - help - This option enables the configuration of platform devices related to - the ub ubrt table. - If enabled, the UBRT-related platform device will obtain the - interrupt ID from the ubrt table instead of the IORT table. - The obtained interrupt ID will be used for the MSI interrupt of the - UBRT-related platform device. - endif diff --git a/drivers/ub/ubfi/irq.c b/drivers/ub/ubfi/irq.c index 5835bc8421b3..3f449ff5aa0b 100644 --- a/drivers/ub/ubfi/irq.c +++ b/drivers/ub/ubfi/irq.c @@ -77,32 +77,3 @@ void ubrt_unregister_gsi(u32 hwirq) } EXPORT_SYMBOL_GPL(ubrt_unregister_gsi); -#if IS_ENABLED(CONFIG_UB_UBRT_PLAT_DEV) -int ubrt_pmsi_get_interrupt_id(struct device *dev, u32 *interrupt_id) -{ - struct ubrt_fwnode *fw; - struct ummu_node *node; - - if (!dev->fwnode) - return -EINVAL; - - fw = ubrt_fwnode_get(dev->fwnode); - if (!fw) - return -ENODEV; - - switch (fw->type) { - case UBRT_UMMU: - node = (struct ummu_node *)fw->ubrt_node; - *interrupt_id = node->intr_id; - break; - case UBRT_UMMU_PMU: - node = (struct ummu_node *)fw->ubrt_node; - *interrupt_id = node->pmu_intr_id; - break; - default: - return -ENODEV; - } - dev_info(dev, "ubct pmsi successfully obtained interrupt id[0x%x].\n", *interrupt_id); - return 0; -} -#endif diff --git a/include/ub/ubfi/ubfi.h b/include/ub/ubfi/ubfi.h index 5e9a3c2a287c..35d7b195ca16 100644 --- a/include/ub/ubfi/ubfi.h +++ b/include/ub/ubfi/ubfi.h @@ -219,11 +219,4 @@ extern u8 ubc_feature; void ubrt_iommu_get_resv_regions(struct device *dev, struct list_head *list); #endif /* CONFIG_UB_UBFI */ -#if IS_ENABLED(CONFIG_UB_UBRT_PLAT_DEV) -int ubrt_pmsi_get_interrupt_id(struct device *dev, u32 *interrupt_id); -#else -static inline int ubrt_pmsi_get_interrupt_id(struct device *dev, u32 *interrupt_id) -{ return -ENODEV; } -#endif /* CONFIG_UB_UBRT_PLAT_DEV */ - #endif /* _UB_UBFI_UBFI_H_ */ -- Gitee From 4d336e80664a8c8846dca1b76d050c425e8af3bf Mon Sep 17 00:00:00 2001 From: Yixi Shen Date: Wed, 3 Dec 2025 15:03:40 +0800 Subject: [PATCH 217/243] net: unic: Resolve the bandwidth jitter problem commit 728b60b6400ddf7bef6cfade3468a447b5142d1b openEuler In the IO process, frequent logging in the rx direction caused bandwidth jitter. The logging method was modified to trace. Fixes: d8164d3745d4 ("net: unic: add io basic Rx/Tx functionality for unic") Signed-off-by: Yixi Shen Signed-off-by: huwentao --- drivers/net/ub/unic/unic_event.c | 8 -------- drivers/net/ub/unic/unic_rx.c | 6 +----- drivers/net/ub/unic/unic_trace.h | 23 +++++++++++++++++++++++ drivers/net/ub/unic/unic_txrx.h | 1 - 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index 5e0df058d067..c72614e70640 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -32,7 +32,6 @@ int unic_comp_handler(struct notifier_block *nb, unsigned long jfcn, void *data) struct auxiliary_device *adev = (struct auxiliary_device *)data; struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); struct unic_channels *channels = &unic_dev->channels; - struct unic_cq *cq; u32 index; if (test_bit(UNIC_STATE_CHANNEL_INVALID, &unic_dev->state)) @@ -42,13 +41,6 @@ int unic_comp_handler(struct notifier_block *nb, unsigned long jfcn, void *data) if (index >= channels->num) return -EINVAL; - if (jfcn > channels->num) - cq = channels->c[index].rq->cq; - else - cq = channels->c[index].sq->cq; - - cq->event_cnt++; - napi_schedule(&channels->c[index].napi); return 0; diff --git a/drivers/net/ub/unic/unic_rx.c b/drivers/net/ub/unic/unic_rx.c index 34095fbac40d..dfc108380186 100644 --- a/drivers/net/ub/unic/unic_rx.c +++ b/drivers/net/ub/unic/unic_rx.c @@ -1024,15 +1024,11 @@ static int unic_create_skb(struct unic_rq *rq, struct napi_struct *napi, static void unic_fix_rq_ci(struct unic_rq *rq, union unic_cqe *cqe) { - struct unic_dev *unic_dev = netdev_priv(rq->netdev); u32 start_rqe_idx = cqe->rx.start_rqe_idx; u32 put_page_num; if (unlikely(rq->ci != cqe->rx.start_rqe_idx)) { - if (net_ratelimit()) - unic_warn(unic_dev, - "queue_index(%u) sw_rq_ci(%hu) hw_rq_ci(%u) do not match.\n", - rq->queue_index, rq->ci, start_rqe_idx); + trace_unic_rq_ci_mismatch(rq, start_rqe_idx); if (start_rqe_idx < rq->ci) start_rqe_idx += UNIC_RQ_CI_REVERSE; put_page_num = start_rqe_idx - rq->ci; diff --git a/drivers/net/ub/unic/unic_trace.h b/drivers/net/ub/unic/unic_trace.h index 5b6bc7f19bd2..3ef2695635d4 100644 --- a/drivers/net/ub/unic/unic_trace.h +++ b/drivers/net/ub/unic/unic_trace.h @@ -146,6 +146,29 @@ TRACE_EVENT(unic_ip_req_skb, sizeof(u32)) ) ); + +TRACE_EVENT(unic_rq_ci_mismatch, + TP_PROTO(struct unic_rq *rq, const u32 start_rqe_idx), + TP_ARGS(rq, start_rqe_idx), + + TP_STRUCT__entry(__field(u16, queue_index) + __field(u16, ci) + __field(u32, start_rqe_idx) + __string(devname, rq->netdev->name) + ), + + TP_fast_assign(__entry->queue_index = rq->queue_index; + __entry->ci = rq->ci; + __entry->start_rqe_idx = start_rqe_idx; + __assign_str(devname, rq->netdev->name); + ), + + TP_printk("%s rq_ci: queue_index=%u sw_rq_ci=%u hw_rq_ci=%u", + __get_str(devname), __entry->queue_index, __entry->ci, + __entry->start_rqe_idx + ) +); + #endif /* _UNIC_TRACE_H_ */ /* This must be outside ifdef _UNIC_TRACE_H */ diff --git a/drivers/net/ub/unic/unic_txrx.h b/drivers/net/ub/unic/unic_txrx.h index b52354c8989f..b6229d1d0d04 100644 --- a/drivers/net/ub/unic/unic_txrx.h +++ b/drivers/net/ub/unic/unic_txrx.h @@ -238,7 +238,6 @@ struct unic_cq { struct unic_jfc_ctx jfc_ctx; u32 jfcn; u32 ci; /* the start of next to consume */ - u64 event_cnt; }; static inline u8 unic_get_cqe_size(void) -- Gitee From bbb7d7c0bc88fc2b8aafbb2491106748d029d2a1 Mon Sep 17 00:00:00 2001 From: Guangwei Zhang Date: Tue, 9 Dec 2025 17:53:23 +0800 Subject: [PATCH 218/243] ub: ubase: Remove non-cluster mode code. commit f568241d59838b2f9723db1c1d387ed530e8e42e openEuler This patch fixes the following issues: 1. Remove non-cluster mode code. 2. Provide an interface for accessing netdev to the UDMA auxiliary device. 3. Fixing the issue of debugfs querying TP VL as 0 in the uboe scenario Fixes: acb96f3cf8c8 ("ub: ubase: support querying sl information in initialization phase") Signed-off-by: Guangwei Zhang Signed-off-by: Haiqing Fang Signed-off-by: huwentao --- drivers/net/ub/unic/unic_dev.c | 2 +- drivers/net/ub/unic/unic_event.c | 8 +- drivers/net/ub/unic/unic_ip.c | 67 +++++----- drivers/net/ub/unic/unic_ip.h | 2 +- drivers/net/ub/unic/unic_reset.c | 2 +- drivers/ub/ubase/debugfs/ubase_debugfs.c | 38 +++--- drivers/ub/ubase/debugfs/ubase_qos_debugfs.c | 12 +- drivers/ub/ubase/debugfs/ubase_qos_debugfs.h | 2 +- drivers/ub/ubase/ubase_dev.c | 30 +++++ drivers/ub/ubase/ubase_dev.h | 1 - drivers/ub/ubase/ubase_hw.c | 8 +- drivers/ub/ubase/ubase_hw.h | 19 +-- drivers/ub/ubase/ubase_mailbox.c | 13 +- drivers/ub/ubase/ubase_qos_hw.c | 130 +++++++++---------- drivers/ub/ubase/ubase_tp.c | 29 +++-- drivers/ub/ubase/ubase_tp.h | 2 - include/ub/ubase/ubase_comm_dev.h | 26 +--- 17 files changed, 182 insertions(+), 209 deletions(-) diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index c63ea8116ca8..7d0405e751b0 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -1096,7 +1096,7 @@ int unic_dev_init(struct auxiliary_device *adev) goto err_unregister_event; } - unic_query_ip_by_ctrlq(adev); + unic_query_ip_addr(adev); unic_start_dev_period_task(netdev); return 0; diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index c72614e70640..796966703751 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -151,7 +151,7 @@ static void unic_activate_handler(struct auxiliary_device *adev, bool activate) unic_deactivate_event_process(unic_dev); } -static void unic_rack_port_reset(struct unic_dev *unic_dev, bool link_up) +static void unic_ub_port_reset(struct unic_dev *unic_dev, bool link_up) { if (link_up) unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_UP; @@ -159,7 +159,7 @@ static void unic_rack_port_reset(struct unic_dev *unic_dev, bool link_up) unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_DOWN; } -static void unic_port_reset(struct net_device *netdev, bool link_up) +static void unic_eth_port_reset(struct net_device *netdev, bool link_up) { rtnl_lock(); @@ -180,9 +180,9 @@ static void unic_port_handler(struct auxiliary_device *adev, bool link_up) return; if (unic_dev_ubl_supported(unic_dev)) - unic_rack_port_reset(unic_dev, link_up); + unic_ub_port_reset(unic_dev, link_up); else - unic_port_reset(netdev, link_up); + unic_eth_port_reset(netdev, link_up); } static struct ubase_ctrlq_event_nb unic_ctrlq_events[] = { diff --git a/drivers/net/ub/unic/unic_ip.c b/drivers/net/ub/unic/unic_ip.c index e83baddded87..dff8b2e87201 100644 --- a/drivers/net/ub/unic/unic_ip.c +++ b/drivers/net/ub/unic/unic_ip.c @@ -15,10 +15,10 @@ #include "unic_trace.h" #include "unic_ip.h" -static void unic_update_rack_addr_state(struct unic_vport *vport, - struct unic_comm_addr_node *addr_node, - enum UNIC_COMM_ADDR_STATE state, - const u8 *addr) +static void unic_update_addr_state(struct unic_vport *vport, + struct unic_comm_addr_node *addr_node, + enum UNIC_COMM_ADDR_STATE state, + const u8 *addr) { struct auxiliary_device *adev = vport->back->comdev.adev; struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); @@ -26,7 +26,7 @@ static void unic_update_rack_addr_state(struct unic_vport *vport, /* update the state of address node by stack in rack server. * if ip node exist in ip_list and receive the ack form stack, - * update_rack_addr_state and handle accidental deletion. + * update_addr_state and handle accidental deletion. */ switch (state) { case UNIC_COMM_ADDR_TO_ADD: @@ -68,7 +68,7 @@ static int unic_update_stack_ip_addr(struct unic_vport *vport, addr_node = unic_comm_find_addr_node(list, addr, ip_mask); if (addr_node) { - unic_update_rack_addr_state(vport, addr_node, state, addr); + unic_update_addr_state(vport, addr_node, state, addr); goto finish_update_state; } @@ -274,9 +274,8 @@ static int unic_sync_stack_ip(struct unic_vport *vport, return ret; } -static void unic_sync_rack_ip_list(struct unic_vport *vport, - struct list_head *list, - enum unic_ctrlq_ip_event state) +static void unic_sync_ip_list(struct unic_vport *vport, struct list_head *list, + enum unic_ctrlq_ip_event state) { struct unic_comm_addr_node *ip_node, *tmp; int ret; @@ -291,9 +290,9 @@ static void unic_sync_rack_ip_list(struct unic_vport *vport, } } -static void unic_rack_sync_addr_table(struct unic_vport *vport, - struct list_head *list, - spinlock_t *addr_list_lock) +static void unic_sync_addr_table(struct unic_vport *vport, + struct list_head *list, + spinlock_t *addr_list_lock) { struct auxiliary_device *adev = vport->back->comdev.adev; struct unic_comm_addr_node *addr_node, *tmp, *new_node; @@ -330,8 +329,8 @@ static void unic_rack_sync_addr_table(struct unic_vport *vport, stop_traverse: spin_unlock_bh(addr_list_lock); - unic_sync_rack_ip_list(vport, &tmp_del_list, UNIC_CTRLQ_DEL_IP); - unic_sync_rack_ip_list(vport, &tmp_add_list, UNIC_CTRLQ_ADD_IP); + unic_sync_ip_list(vport, &tmp_del_list, UNIC_CTRLQ_DEL_IP); + unic_sync_ip_list(vport, &tmp_add_list, UNIC_CTRLQ_ADD_IP); } void unic_sync_ip_table(struct unic_dev *unic_dev) @@ -341,8 +340,8 @@ void unic_sync_ip_table(struct unic_dev *unic_dev) if (!test_bit(UNIC_VPORT_STATE_IP_TBL_CHANGE, &vport->state)) return; - unic_rack_sync_addr_table(vport, &vport->addr_tbl.ip_list, - &vport->addr_tbl.ip_list_lock); + unic_sync_addr_table(vport, &vport->addr_tbl.ip_list, + &vport->addr_tbl.ip_list_lock); } static void unic_build_stack_ip_info(struct unic_ctrlq_ip_notify_req *req, @@ -358,10 +357,10 @@ static void unic_build_stack_ip_info(struct unic_ctrlq_ip_notify_req *req, st_ip->ip_addr[3] = le32_to_be32(req->ip_addr[0]); } -static int unic_update_rack_addr_list(struct list_head *list, - spinlock_t *addr_list_lock, - enum UNIC_COMM_ADDR_STATE state, - const u8 *addr, u16 ip_mask) +static int unic_update_addr_list(struct list_head *list, + spinlock_t *addr_list_lock, + enum UNIC_COMM_ADDR_STATE state, + const u8 *addr, u16 ip_mask) { struct unic_comm_addr_node *addr_node; @@ -489,17 +488,17 @@ int unic_handle_notify_ip_event(struct auxiliary_device *adev, u8 service_ver, } if (st_ip.ip_cmd == UNIC_CTRLQ_ADD_IP) { - ret = unic_update_rack_addr_list(&vport->addr_tbl.ip_list, - &vport->addr_tbl.ip_list_lock, - UNIC_COMM_ADDR_TO_ADD, - (u8 *)&st_ip.ip_addr, - st_ip.ip_mask); + ret = unic_update_addr_list(&vport->addr_tbl.ip_list, + &vport->addr_tbl.ip_list_lock, + UNIC_COMM_ADDR_TO_ADD, + (u8 *)&st_ip.ip_addr, + st_ip.ip_mask); } else if (st_ip.ip_cmd == UNIC_CTRLQ_DEL_IP) { - ret = unic_update_rack_addr_list(&vport->addr_tbl.ip_list, - &vport->addr_tbl.ip_list_lock, - UNIC_COMM_ADDR_TO_DEL, - (u8 *)&st_ip.ip_addr, - st_ip.ip_mask); + ret = unic_update_addr_list(&vport->addr_tbl.ip_list, + &vport->addr_tbl.ip_list_lock, + UNIC_COMM_ADDR_TO_DEL, + (u8 *)&st_ip.ip_addr, + st_ip.ip_mask); } else { ret = -EINVAL; unic_err(priv, "invalid ip cmd by ctrlq, cmd = %u.\n", st_ip.ip_cmd); @@ -618,8 +617,8 @@ static int unic_ctrlq_query_ip(struct auxiliary_device *adev, u16 *ip_index, return ret; } -static void unic_update_rack_ip_list(struct unic_vport *vport, - struct list_head *list) +static void unic_update_ip_list(struct unic_vport *vport, + struct list_head *list) { struct unic_comm_addr_node *ip_node, *tmp, *new_node; @@ -655,7 +654,7 @@ static void unic_update_rack_ip_list(struct unic_vport *vport, spin_unlock_bh(&vport->addr_tbl.ip_list_lock); } -void unic_query_ip_by_ctrlq(struct auxiliary_device *adev) +void unic_query_ip_addr(struct auxiliary_device *adev) { #define UNIC_LOOP_COUNT(total_size, size) ((total_size) / (size) + 1) @@ -690,7 +689,7 @@ void unic_query_ip_by_ctrlq(struct auxiliary_device *adev) spin_lock_bh(&vport->addr_tbl.tmp_ip_lock); - unic_update_rack_ip_list(vport, &tmp_list); + unic_update_ip_list(vport, &tmp_list); clear_bit(UNIC_VPORT_STATE_IP_QUERYING, &vport->state); list_for_each_entry_safe(ip_node, tmp, &vport->addr_tbl.tmp_ip_list, node) { diff --git a/drivers/net/ub/unic/unic_ip.h b/drivers/net/ub/unic/unic_ip.h index a73e8490536c..ac95a26c0df5 100644 --- a/drivers/net/ub/unic/unic_ip.h +++ b/drivers/net/ub/unic/unic_ip.h @@ -75,7 +75,7 @@ static inline void unic_format_masked_ip_addr(char *format_masked_ip_addr, void unic_sync_ip_table(struct unic_dev *unic_dev); int unic_handle_notify_ip_event(struct auxiliary_device *adev, u8 service_ver, void *data, u16 len, u16 seq); -void unic_query_ip_by_ctrlq(struct auxiliary_device *adev); +void unic_query_ip_addr(struct auxiliary_device *adev); void unic_uninit_ip_table(struct unic_dev *unic_dev); int unic_add_ip_addr(struct unic_dev *unic_dev, struct sockaddr *addr, u16 ip_mask); diff --git a/drivers/net/ub/unic/unic_reset.c b/drivers/net/ub/unic/unic_reset.c index 69995449b2df..6946e8976da0 100644 --- a/drivers/net/ub/unic/unic_reset.c +++ b/drivers/net/ub/unic/unic_reset.c @@ -94,7 +94,7 @@ static void unic_reset_init(struct auxiliary_device *adev) if (ret) goto err_unic_resume; - unic_query_ip_by_ctrlq(adev); + unic_query_ip_addr(adev); unic_start_period_task(netdev); if_running = netif_running(netdev); diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 147bc8c0a3fe..4e6eec73abf9 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -36,24 +36,24 @@ static int ubase_dbg_dump_rst_info(struct seq_file *s, void *data) static void ubase_dbg_dump_caps_bits(struct seq_file *s, struct ubase_dev *udev) { #define CAP_FMT(name) "\tsupport_" #name ": %d\n" -#define PRTINT_CAP(name, func) seq_printf(s, CAP_FMT(name), func(udev)) - - PRTINT_CAP(ub_link, ubase_dev_ubl_supported); - PRTINT_CAP(ta_extdb_buffer_config, ubase_dev_ta_extdb_buf_supported); - PRTINT_CAP(ta_timer_buffer_config, ubase_dev_ta_timer_buf_supported); - PRTINT_CAP(err_handle, ubase_dev_err_handle_supported); - PRTINT_CAP(ctrlq, ubase_dev_ctrlq_supported); - PRTINT_CAP(eth_mac, ubase_dev_eth_mac_supported); - PRTINT_CAP(mac_stats, ubase_dev_mac_stats_supported); - PRTINT_CAP(prealloc, __ubase_dev_prealloc_supported); - PRTINT_CAP(udma, ubase_dev_udma_supported); - PRTINT_CAP(unic, ubase_dev_unic_supported); - PRTINT_CAP(uvb, ubase_dev_uvb_supported); - PRTINT_CAP(ip_over_urma, ubase_ip_over_urma_supported); +#define PRINT_CAP(name, func) seq_printf(s, CAP_FMT(name), func(udev)) + + PRINT_CAP(ub_link, ubase_dev_ubl_supported); + PRINT_CAP(ta_extdb_buffer_config, ubase_dev_ta_extdb_buf_supported); + PRINT_CAP(ta_timer_buffer_config, ubase_dev_ta_timer_buf_supported); + PRINT_CAP(err_handle, ubase_dev_err_handle_supported); + PRINT_CAP(ctrlq, ubase_dev_ctrlq_supported); + PRINT_CAP(eth_mac, ubase_dev_eth_mac_supported); + PRINT_CAP(mac_stats, ubase_dev_mac_stats_supported); + PRINT_CAP(prealloc, __ubase_dev_prealloc_supported); + PRINT_CAP(udma, ubase_dev_udma_supported); + PRINT_CAP(unic, ubase_dev_unic_supported); + PRINT_CAP(uvb, ubase_dev_uvb_supported); + PRINT_CAP(ip_over_urma, ubase_ip_over_urma_supported); if (ubase_ip_over_urma_supported(udev)) - PRTINT_CAP(ip_over_urma_utp, ubase_ip_over_urma_utp_supported); - PRTINT_CAP(activate_proxy, ubase_activate_proxy_supported); - PRTINT_CAP(utp, ubase_utp_supported); + PRINT_CAP(ip_over_urma_utp, ubase_ip_over_urma_utp_supported); + PRINT_CAP(activate_proxy, ubase_activate_proxy_supported); + PRINT_CAP(utp, ubase_utp_supported); } static void ubase_dbg_dump_caps_info(struct seq_file *s, struct ubase_dev *udev) @@ -612,12 +612,12 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .read_func = ubase_dbg_dump_perf_stats, }, { - .name = "rack_vl_bitmap", + .name = "vl_bitmap", .dentry_index = UBASE_DBG_DENTRY_QOS, .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_UBL, .support = __ubase_dbg_dentry_support, .init = __ubase_dbg_seq_file_init, - .read_func = ubase_dbg_dump_rack_vl_bitmap, + .read_func = ubase_dbg_dump_vl_bitmap, }, { .name = "adev_qos", diff --git a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c index 91e05df180bb..070b0d363f61 100644 --- a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.c @@ -156,7 +156,7 @@ int ubase_dbg_dump_ets_port_info(struct seq_file *s, void *data) return 0; } -int ubase_dbg_dump_rack_vl_bitmap(struct seq_file *s, void *data) +int ubase_dbg_dump_vl_bitmap(struct seq_file *s, void *data) { struct ubase_dev *udev = dev_get_drvdata(s->private); struct ubase_ctrlq_query_vl_resp resp = {0}; @@ -184,7 +184,7 @@ int ubase_dbg_dump_rack_vl_bitmap(struct seq_file *s, void *data) vl_bitmap = le16_to_cpu(resp.vl_bitmap); - seq_printf(s, "rack vl bitmap : 0x%x", vl_bitmap); + seq_printf(s, "vl bitmap : 0x%x", vl_bitmap); return 0; } @@ -192,9 +192,6 @@ int ubase_dbg_dump_rack_vl_bitmap(struct seq_file *s, void *data) static void ubase_dbg_dump_adev_vl_info(struct seq_file *s, struct ubase_adev_qos *qos) { - seq_puts(s, "vl:"); - ubase_dbg_dump_arr_info(s, qos->vl, qos->vl_num); - seq_puts(s, "tp_req_vl:"); ubase_dbg_dump_arr_info(s, qos->tp_req_vl, qos->tp_vl_num); @@ -208,9 +205,6 @@ static void ubase_dbg_dump_adev_vl_info(struct seq_file *s, static void ubase_dbg_dump_adev_sl_info(struct seq_file *s, struct ubase_adev_qos *qos) { - seq_puts(s, "sl:"); - ubase_dbg_dump_arr_info(s, qos->sl, qos->sl_num); - seq_puts(s, "tp_sl:"); ubase_dbg_dump_arr_info(s, qos->tp_sl, qos->tp_sl_num); @@ -229,12 +223,10 @@ int ubase_dbg_dump_adev_qos_info(struct seq_file *s, void *data) const char *format; u8 qos_info; } adev_qos_info[] = { - {"vl_num: %u\n", qos->vl_num}, {"tp_vl_num: %u\n", qos->tp_vl_num}, {"ctp_vl_num: %u\n", qos->ctp_vl_num}, {"tp_resp_vl_offset: %u\n", qos->tp_resp_vl_offset}, {"ctp_resp_vl_offset: %u\n", qos->ctp_resp_vl_offset}, - {"sl_num: %u\n", qos->sl_num}, {"tp_sl_num: %u\n", qos->tp_sl_num}, {"ctp_sl_num: %u\n", qos->ctp_sl_num}, {"nic_sl_num: %u\n", qos->nic_sl_num}, diff --git a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h index e44b4cacd21e..060e4b847283 100644 --- a/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h +++ b/drivers/ub/ubase/debugfs/ubase_qos_debugfs.h @@ -14,7 +14,7 @@ int ubase_dbg_dump_udma_dscp_vl_map(struct seq_file *s, void *data); int ubase_dbg_dump_ets_tc_info(struct seq_file *s, void *data); int ubase_dbg_dump_ets_tcg_info(struct seq_file *s, void *data); int ubase_dbg_dump_ets_port_info(struct seq_file *s, void *data); -int ubase_dbg_dump_rack_vl_bitmap(struct seq_file *s, void *data); +int ubase_dbg_dump_vl_bitmap(struct seq_file *s, void *data); int ubase_dbg_dump_adev_qos_info(struct seq_file *s, void *data); int ubase_dbg_dump_fsv_fvt_rqmt(struct seq_file *s, void *data); int ubase_dbg_dump_tm_queue_info(struct seq_file *s, void *data); diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 4765610b7f09..086e16cef4b6 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -1010,6 +1010,36 @@ struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_dev_caps); +/** + * ubase_get_mdrv_data() - get unic netdev + * @adev: auxiliary device + * + * The function is used to get unic netdev. + * + * Context: Any context. + * Return: NULL if the adev is empty or does not support the unic device, + * otherwise the pointer to struct ubase_adev_com + */ +const struct ubase_adev_com *ubase_get_mdrv_data(struct auxiliary_device *adev) +{ + struct auxiliary_device *unic_adev; + struct ubase_priv *priv; + struct ubase_dev *udev; + + if (!adev) + return NULL; + + udev = __ubase_get_udev_by_adev(adev); + if (!ubase_dev_unic_supported(udev)) + return NULL; + + priv = &udev->priv; + unic_adev = &priv->uadev[UBASE_DRV_UNIC]->adev; + + return dev_get_drvdata(&unic_adev->dev); +} +EXPORT_SYMBOL(ubase_get_mdrv_data); + /** * ubase_get_udma_caps() - get udma auxiliary device capabilities * @adev: udma auxiliary device pointer diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index ee7c5f605e65..5270246540e7 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -70,7 +70,6 @@ struct ubase_dev_caps { struct ubase_adev_caps udma_caps; struct ubase_adev_caps unic_caps; struct ubase_caps dev_caps; - struct ubase_ue_caps ue_caps; }; struct ubase_mbox_cmd { diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index c473d12ddf72..d58fd69cd25f 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -24,7 +24,7 @@ struct ubase_dma_buf_desc { bool (*is_supported)(struct ubase_dev *dev); }; -#define UBASE_DEFINE_DMA_BUFS(udev) \ +#define UBASE_DEFINE_TA_DMA_BUFS(udev) \ struct ubase_dma_buf_desc bufs[] = { \ { &(udev)->ta_ctx.extdb_buf, UBASE_OPC_TA_EXTDB_VA_CONFIG, \ &ubase_dev_ta_extdb_buf_supported }, \ @@ -107,7 +107,7 @@ static void ubase_check_dev_caps_comm(struct ubase_dev *udev) static int ubase_check_dev_caps_extdb(struct ubase_dev *udev) { - UBASE_DEFINE_DMA_BUFS(udev); + UBASE_DEFINE_TA_DMA_BUFS(udev); int i; for (i = 0; i < ARRAY_SIZE(bufs); i++) { @@ -586,7 +586,7 @@ static void ubase_uninit_dma_buf(struct ubase_dev *udev, static int ubase_init_ta_ext_buf(struct ubase_dev *udev) { - UBASE_DEFINE_DMA_BUFS(udev); + UBASE_DEFINE_TA_DMA_BUFS(udev); int i, ret; for (i = 0; i < ARRAY_SIZE(bufs); i++) { @@ -610,7 +610,7 @@ static int ubase_init_ta_ext_buf(struct ubase_dev *udev) static void ubase_uninit_ta_ext_buf(struct ubase_dev *udev) { - UBASE_DEFINE_DMA_BUFS(udev); + UBASE_DEFINE_TA_DMA_BUFS(udev); int i; for (i = 0; i < ARRAY_SIZE(bufs); i++) { diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h index e292053905ec..1655331cb4d5 100644 --- a/drivers/ub/ubase/ubase_hw.h +++ b/drivers/ub/ubase/ubase_hw.h @@ -49,9 +49,7 @@ struct ubase_res_cmd_resp { __le32 udma_jfr_max_cnt; u8 rsvd3[4]; __le32 udma_jfr_depth; - u8 nic_vl_num; - u8 rsvd4[3]; - u8 nic_vl[UBASE_MAX_REQ_VL_NUM]; + u8 rsvd4[12]; __le32 udma_jfc_max_cnt; u8 rsvd5[4]; @@ -81,22 +79,19 @@ struct ubase_res_cmd_resp { __le32 ta_extdb_buf_size; __le32 ta_timer_buf_size; __le32 public_jetty_cnt; - u8 rsvd15[9]; - u8 udma_vl_num; + u8 rsvd15[10]; u8 udma_tp_resp_vl_offset; u8 ue_num; - __le32 port_bitmap; - u8 rsvd16[4]; + u8 rsvd16[8]; - /* include udma tp and ctp req vl */ - u8 udma_req_vl[UBASE_MAX_REQ_VL_NUM]; + u8 rsvd17[8]; __le32 udma_rc_depth; - u8 rsvd17[4]; + u8 rsvd18[4]; __le32 jtg_max_cnt; __le32 rc_max_cnt_per_vl; - u8 rsvd18[8]; + u8 rsvd19[8]; - u8 rsvd19[32]; + u8 rsvd20[32]; }; struct ubase_query_oor_resp { diff --git a/drivers/ub/ubase/ubase_mailbox.c b/drivers/ub/ubase/ubase_mailbox.c index a92993f5ac0b..b530d555af3b 100644 --- a/drivers/ub/ubase/ubase_mailbox.c +++ b/drivers/ub/ubase/ubase_mailbox.c @@ -401,8 +401,8 @@ static bool ubase_is_jfs_opcode(u8 op) } static struct ubase_ctx_buf_cap* -ubase_parse_ta_opcode_buf(struct ubase_dev *udev, struct ubase_mbx_attr *attr, - enum ubase_mb_type *type) +ubase_parse_opcode_buf(struct ubase_dev *udev, struct ubase_mbx_attr *attr, + enum ubase_mb_type *type) { struct mbx_op_match ta_matches[] = { {UBASE_MB_CREATE_JFS_CONTEXT, UBASE_MB_CREATE, &udev->ctx_buf.jfs}, @@ -428,18 +428,11 @@ ubase_parse_ta_opcode_buf(struct ubase_dev *udev, struct ubase_mbx_attr *attr, }; u32 size = ARRAY_SIZE(ta_matches); - return ubase_parse_common_buf(attr, ta_matches, type, size); -} - -static struct ubase_ctx_buf_cap* -ubase_parse_opcode_buf(struct ubase_dev *udev, struct ubase_mbx_attr *attr, - enum ubase_mb_type *type) -{ if (ubase_is_jfs_opcode(attr->op) && test_bit(UBASE_STATE_PREALLOC_OK_B, &udev->state_bits)) return NULL; - return ubase_parse_ta_opcode_buf(udev, attr, type); + return ubase_parse_common_buf(attr, ta_matches, type, size); } static int ubase_check_buf_ctx_page(struct ubase_dev *udev, diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index ca5051ce4c44..e7737e98e1e5 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -414,8 +414,12 @@ static unsigned long ubase_get_sl_bitmap(struct ubase_dev *udev) for (i = 0; i < qos->nic_sl_num; i++) sl_bitmap |= 1 << qos->nic_sl[i]; - for (i = 0; i < qos->sl_num; i++) - sl_bitmap |= 1 << qos->sl[i]; + + for (i = 0; i < qos->tp_sl_num; i++) + sl_bitmap |= 1 << qos->tp_sl[i]; + + for (i = 0; i < qos->ctp_sl_num; i++) + sl_bitmap |= 1 << qos->ctp_sl[i]; return sl_bitmap; } @@ -594,14 +598,6 @@ static int ubase_query_ctp_vl_offset(struct ubase_dev *udev, u8 *ctp_vl_offset) return 0; } -static inline void ubase_parse_udma_req_vl_uboe(struct ubase_dev *udev) -{ - struct ubase_adev_qos *qos = &udev->qos; - - qos->tp_vl_num = qos->vl_num; - memcpy(qos->tp_req_vl, qos->vl, qos->vl_num); -} - static int ubase_check_ctp_resp_vl(struct ubase_dev *udev, u8 ctp_vl_offset) { struct ubase_adev_qos *qos = &udev->qos; @@ -639,24 +635,18 @@ static int ubase_parse_ctp_resp_vl(struct ubase_dev *udev) return 0; } -static bool ubase_get_vl_sl(struct ubase_dev *udev, u8 vl, u8 *sl, u8 *sl_num) +static void ubase_get_vl_sl(struct ubase_dev *udev, u8 vl, u8 *sl, u8 *sl_num) { - bool sl_exist = false; u8 i; for (i = 0; i < UBASE_MAX_SL_NUM; i++) { - if (udev->qos.ue_sl_vl[i] == vl) { + if (udev->qos.ue_sl_vl[i] == vl) sl[(*sl_num)++] = i; - sl_exist = true; - } } - - return sl_exist; } -static void ubase_gather_udma_req_resp_vl(struct ubase_dev *udev, - u8 *req_vl, u8 req_vl_num, - u8 resp_vl_off) +static void ubase_gather_udma_req_resp_vl(struct ubase_dev *udev, u8 *req_vl, + u8 req_vl_num, u8 resp_vl_off) { struct ubase_caps *dev_caps = &udev->caps.dev_caps; struct ubase_adev_qos *qos = &udev->qos; @@ -689,11 +679,9 @@ static void ubase_gather_urma_req_resp_vl(struct ubase_dev *udev) dev_caps->vl_num = qos->nic_vl_num; /* Restriction: The unic vl can't be used as the dma resp vl. */ - ubase_gather_udma_req_resp_vl(udev, qos->tp_req_vl, - qos->tp_vl_num, + ubase_gather_udma_req_resp_vl(udev, qos->tp_req_vl, qos->tp_vl_num, qos->tp_resp_vl_offset); - ubase_gather_udma_req_resp_vl(udev, qos->ctp_req_vl, - qos->ctp_vl_num, + ubase_gather_udma_req_resp_vl(udev, qos->ctp_req_vl, qos->ctp_vl_num, qos->ctp_resp_vl_offset); /* dev_caps->vl_num is used for DCB tool configuration. Therefore, @@ -751,7 +739,7 @@ static int ubase_parse_nic_vl(struct ubase_dev *udev) udev->qos.nic_vl, &udev->qos.nic_vl_num); } -static int ubase_parse_rack_udma_req_vl_ub(struct ubase_dev *udev) +static int ubase_parse_udma_req_vl(struct ubase_dev *udev) { struct ubase_adev_qos *qos = &udev->qos; int ret; @@ -761,51 +749,40 @@ static int ubase_parse_rack_udma_req_vl_ub(struct ubase_dev *udev) if (ret) return ret; - return ubase_assign_urma_vl(udev, qos->ctp_sl, qos->ctp_sl_num, - qos->ctp_req_vl, &qos->ctp_vl_num); -} - -static int ubase_parse_rack_udma_req_vl(struct ubase_dev *udev) -{ if (ubase_dev_ubl_supported(udev)) - return ubase_parse_rack_udma_req_vl_ub(udev); - - ubase_parse_udma_req_vl_uboe(udev); + return ubase_assign_urma_vl(udev, qos->ctp_sl, qos->ctp_sl_num, + qos->ctp_req_vl, &qos->ctp_vl_num); return 0; } -static int ubase_parse_rack_udma_vl(struct ubase_dev *udev) +static int ubase_parse_udma_vl(struct ubase_dev *udev) { int ret; - ret = ubase_parse_rack_udma_req_vl(udev); + ret = ubase_parse_udma_req_vl(udev); if (ret) return ret; return ubase_parse_udma_resp_vl(udev); } -static int ubase_parse_rack_cdma_resp_vl(struct ubase_dev *udev) +static int ubase_parse_cdma_resp_vl(struct ubase_dev *udev) { return ubase_parse_ctp_resp_vl(udev); } -static int ubase_parse_rack_cdma_req_sl_vl(struct ubase_dev *udev) +static int ubase_parse_cdma_sl(struct ubase_dev *udev) { struct ubase_adev_qos *qos = &udev->qos; - bool exist = false; u8 i; - for (i = 0; i < qos->vl_num; i++) { - exist = ubase_get_vl_sl(udev, qos->vl[i], qos->ctp_sl, - &qos->ctp_sl_num); - if (exist) - qos->ctp_req_vl[qos->ctp_vl_num++] = qos->vl[i]; - } + for (i = 0; i < qos->ctp_vl_num; i++) + ubase_get_vl_sl(udev, qos->ctp_req_vl[i], qos->ctp_sl, + &qos->ctp_sl_num); - if (!qos->ctp_vl_num) { - ubase_err(udev, "cdma doesn't have any req vl.\n"); + if (!qos->ctp_sl_num) { + ubase_err(udev, "cdma doesn't have any sl.\n"); return -EINVAL; } @@ -816,11 +793,11 @@ static int ubase_parse_cdma_sl_vl(struct ubase_dev *udev) { int ret; - ret = ubase_parse_rack_cdma_req_sl_vl(udev); + ret = ubase_parse_cdma_sl(udev); if (ret) return ret; - ret = ubase_parse_rack_cdma_resp_vl(udev); + ret = ubase_parse_cdma_resp_vl(udev); if (ret) return ret; @@ -828,21 +805,16 @@ static int ubase_parse_cdma_sl_vl(struct ubase_dev *udev) return 0; } -static inline int ubase_parse_nic_sl_vl(struct ubase_dev *udev) -{ - return ubase_parse_nic_vl(udev); -} - static int ubase_parse_urma_sl_vl(struct ubase_dev *udev) { int ret; - ret = ubase_parse_nic_sl_vl(udev); + ret = ubase_parse_nic_vl(udev); if (ret) return ret; if (ubase_dev_udma_supported(udev)) { - ret = ubase_parse_rack_udma_vl(udev); + ret = ubase_parse_udma_vl(udev); if (ret) return ret; } @@ -875,23 +847,23 @@ static void ubase_parse_max_vl(struct ubase_dev *udev) { struct ubase_adev_caps *udma_caps = &udev->caps.udma_caps; struct ubase_adev_qos *qos = &udev->qos; - u8 i, max_vl = 0; + u8 i, ue_max_vl_id = 0; for (i = 0; i < qos->nic_vl_num; i++) - max_vl = max(qos->nic_vl[i], max_vl); + ue_max_vl_id = max(qos->nic_vl[i], ue_max_vl_id); for (i = 0; i < qos->tp_vl_num; i++) - max_vl = max(qos->tp_req_vl[i] + - qos->tp_resp_vl_offset, max_vl); + ue_max_vl_id = max(qos->tp_req_vl[i] + qos->tp_resp_vl_offset, + ue_max_vl_id); for (i = 0; i < qos->ctp_vl_num; i++) - max_vl = max(qos->ctp_req_vl[i] + - qos->ctp_resp_vl_offset, max_vl); + ue_max_vl_id = max(qos->ctp_req_vl[i] + qos->ctp_resp_vl_offset, + ue_max_vl_id); - qos->ue_max_vl_id = max_vl; + qos->ue_max_vl_id = ue_max_vl_id; if (ubase_dev_urma_supported(udev)) - udma_caps->rc_max_cnt *= (max_vl + 1); + udma_caps->rc_max_cnt *= (ue_max_vl_id + 1); } static int ubase_get_nic_max_vl(struct ubase_dev *udev) @@ -934,7 +906,7 @@ static int ubase_ctrlq_query_vl(struct ubase_dev *udev) struct ubase_ctrlq_query_vl_req req = {0}; struct ubase_ctrlq_msg msg = {0}; unsigned long vl_bitmap; - u8 i, vl_cnt = 0; + u8 i, cdma_vl_cnt = 0; int ret; msg.service_ver = UBASE_CTRLQ_SER_VER_01; @@ -958,18 +930,35 @@ static int ubase_ctrlq_query_vl(struct ubase_dev *udev) for (i = 0; i < UBASE_MAX_VL_NUM; i++) if (test_bit(i, &vl_bitmap)) - udev->qos.vl[vl_cnt++] = i; + udev->qos.ctp_req_vl[cdma_vl_cnt++] = i; - if (!vl_cnt) - return -EBUSY; + if (!cdma_vl_cnt) { + ubase_err(udev, "cdma doesn't have any vl.\n"); + return -EIO; + } - udev->qos.vl_num = vl_cnt; + udev->qos.ctp_vl_num = cdma_vl_cnt; ubase_dbg(udev, "ctrlq query vl_bitmap = %lx.\n", vl_bitmap); return 0; } +static bool ubase_check_udma_sl_valid(struct ubase_dev *udev, u8 udma_tp_sl_cnt, + u8 udma_ctp_sl_cnt) +{ + if (!ubase_dev_udma_supported(udev)) + return true; + + if (ubase_dev_ubl_supported(udev) && !(udma_tp_sl_cnt + udma_ctp_sl_cnt)) + return false; + + if (!ubase_dev_ubl_supported(udev) && !udma_tp_sl_cnt) + return false; + + return true; +} + static int ubase_ctrlq_query_sl(struct ubase_dev *udev) { unsigned long unic_sl_bitmap, udma_tp_sl_bitmap, udma_ctp_sl_bitmap; @@ -1015,8 +1004,7 @@ static int ubase_ctrlq_query_sl(struct ubase_dev *udev) return -EIO; } - if (ubase_dev_udma_supported(udev) && - !(udma_tp_sl_cnt + udma_ctp_sl_cnt)) { + if (!ubase_check_udma_sl_valid(udev, udma_tp_sl_cnt, udma_ctp_sl_cnt)) { ubase_err(udev, "udma doesn't have any sl.\n"); return -EIO; } diff --git a/drivers/ub/ubase/ubase_tp.c b/drivers/ub/ubase/ubase_tp.c index f18854fdc319..083ac656c457 100644 --- a/drivers/ub/ubase/ubase_tp.c +++ b/drivers/ub/ubase/ubase_tp.c @@ -7,10 +7,13 @@ #include #include "ubase_ctrlq.h" +#include "ubase_dev.h" #include "ubase_reset.h" #include "ubase_tp.h" -int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tpn) +#define UBASE_TRANS_TYPE_UM_TP 0x2 + +static int ubase_notify_tp_flush_done(struct ubase_dev *udev, u32 tpn) { struct ubase_ctrlq_tp_fd_req req = {0}; struct ubase_ctrlq_msg msg = {0}; @@ -74,7 +77,7 @@ int ubase_ae_tp_flush_done(struct notifier_block *nb, unsigned long event, tp_num = info->aeqe->event.queue_event.num; - return ubase_notify_tp_fd_by_ctrlq(udev, tp_num); + return ubase_notify_tp_flush_done(udev, tp_num); } int ubase_ae_tp_level_error(struct notifier_block *nb, unsigned long event, @@ -96,7 +99,7 @@ int ubase_ae_tp_level_error(struct notifier_block *nb, unsigned long event, return 0; } -static int ubase_create_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) +static int ubase_create_tp_tpg(struct ubase_dev *udev, u32 vl) { struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; struct ubase_ctrlq_create_tp_resp resp = {0}; @@ -133,7 +136,7 @@ static int ubase_create_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) return 0; } -static void ubase_wait_tp_flush_done_by_ctrlq(struct ubase_dev *udev, u32 vl) +static void ubase_wait_tp_flush_done(struct ubase_dev *udev, u32 vl) { struct ubase_tpg *tpg = &udev->tp_ctx.tpg[vl]; int i; @@ -150,7 +153,7 @@ static void ubase_wait_tp_flush_done_by_ctrlq(struct ubase_dev *udev, u32 vl) vl, atomic_read(&tpg->tp_fd_cnt)); } -static void ubase_destroy_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) +static void ubase_destroy_tp_tpg(struct ubase_dev *udev, u32 vl) { struct ubase_ctrlq_destroy_tp_req req = {0}; struct ubase_ctrlq_msg msg = {0}; @@ -176,24 +179,24 @@ static void ubase_destroy_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) return; } - ubase_wait_tp_flush_done_by_ctrlq(udev, vl); + ubase_wait_tp_flush_done(udev, vl); } -static void ubase_destroy_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 num) +static void ubase_destroy_multi_tp_tpg(struct ubase_dev *udev, u32 num) { u32 idx; for (idx = 0; idx < num; idx++) - ubase_destroy_tp_tpg_by_ctrlq(udev, idx); + ubase_destroy_tp_tpg(udev, idx); } -static int ubase_create_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev) +static int ubase_create_multi_tp_tpg(struct ubase_dev *udev) { int ret; u32 i; for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { - ret = ubase_create_tp_tpg_by_ctrlq(udev, i); + ret = ubase_create_tp_tpg(udev, i); if (ret) { ubase_err(udev, "failed to create tp tpg, tpgn = %u, ret = %d.\n", i, ret); @@ -204,7 +207,7 @@ static int ubase_create_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev) return 0; err_create_tp_tpg: - ubase_destroy_multi_tp_tpg_by_ctrlq(udev, i); + ubase_destroy_multi_tp_tpg(udev, i); return ret; } @@ -227,7 +230,7 @@ int ubase_dev_init_tp_tpg(struct ubase_dev *udev) } spin_unlock(&tp_ctx->tpg_lock); - ret = ubase_create_multi_tp_tpg_by_ctrlq(udev); + ret = ubase_create_multi_tp_tpg(udev); if (ret) { spin_lock(&tp_ctx->tpg_lock); devm_kfree(udev->dev, tp_ctx->tpg); @@ -251,7 +254,7 @@ void ubase_dev_uninit_tp_tpg(struct ubase_dev *udev) return; if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) - ubase_destroy_multi_tp_tpg_by_ctrlq(udev, num); + ubase_destroy_multi_tp_tpg(udev, num); spin_lock(&tp_ctx->tpg_lock); devm_kfree(udev->dev, tp_ctx->tpg); diff --git a/drivers/ub/ubase/ubase_tp.h b/drivers/ub/ubase/ubase_tp.h index 42a3cb4eb8c5..63ec0e61a79e 100644 --- a/drivers/ub/ubase/ubase_tp.h +++ b/drivers/ub/ubase/ubase_tp.h @@ -13,8 +13,6 @@ #define UBASE_TRANS_TYPE_UM_TP 0x2 -#define UBASE_TP_PORT_BITMAP_STEP 2 - #define UBASE_WAIT_TP_FLUSH_TOTAL_STEPS 12 struct ubase_tp_ctx { diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 37950410345e..9d0f52ab75c1 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -18,7 +18,6 @@ struct iova_slot; #define UBASE_IOVA_COMM_PFN_CNT 1 #define UBASE_MAX_DSCP (64) #define UBASE_MAX_SL_NUM (16U) -#define UBASE_MAX_REQ_VL_NUM (8U) #define UBASE_MAX_VL_NUM (16U) #if UBASE_MAX_VL_NUM < IEEE_8021QAZ_MAX_TCS #error "UBASE_MAX_VL_NUM can't less than IEEE_8021QAZ_MAX_TCS" @@ -251,7 +250,6 @@ struct ubase_resource_space { /** * struct ubase_adev_qos - ubase auxiliary device qos information - * @rdma_vl_num: rdma vl number * @rdma_tp_vl_num: rdma tp vl number * @rdma_ctp_vl_num: rdma ctp vl number * @rdma_tp_resp_vl_offset: rdma tp response vl offset, @@ -260,15 +258,12 @@ struct ubase_resource_space { * rdma_ctp_resp_vl = rdma_ctp_resp_vl + rdma_ctp_resp_vl_offset * @max_vl: max vl number * @resv: reserved bits - * @rdma_sl_num: rdma sl number * @rdma_tp_sl_num: rdma tp sl number * @rdma_ctp_sl_num: rdma ctp sl number * @nic_sl_num: nic sl number * @nic_vl_num: nic vl number - * @rdma_vl: rdma vl * @rdma_tp_req_vl: rdma tp request vl * @rdma_ctp_req_vl: rdma ctp request vl - * @rdma_sl: rdma sl * @rdma_tp_sl: rdma tp sl * @rdma_ctp_sl: rdma ctp sl * @nic_sl: nic sl @@ -278,15 +273,11 @@ struct ubase_resource_space { */ struct ubase_adev_qos { /* udma/cdma resource */ - u8 sl_num; - u8 sl[UBASE_MAX_SL_NUM]; u8 tp_sl_num; u8 tp_sl[UBASE_MAX_SL_NUM]; u8 ctp_sl_num; u8 ctp_sl[UBASE_MAX_SL_NUM]; - u8 vl_num; - u8 vl[UBASE_MAX_VL_NUM]; u8 tp_vl_num; u8 tp_resp_vl_offset; u8 tp_req_vl[UBASE_MAX_VL_NUM]; @@ -318,22 +309,6 @@ struct ubase_ue_node { u16 bus_ue_id; }; -struct ubase_ue_caps { - u8 ceq_vector_num; - u8 aeq_vector_num; - u32 aeqe_depth; - u32 ceqe_depth; - u32 jfs_max_cnt; - u32 jfs_depth; - u32 jfr_max_cnt; - u32 jfr_depth; - u32 jfc_max_cnt; - u32 jfc_depth; - u32 rc_max_cnt; - u32 rc_depth; - u32 jtg_max_cnt; -}; - #define UBASE_BUS_EID_LEN 4 /** @@ -352,6 +327,7 @@ bool ubase_adev_prealloc_supported(struct auxiliary_device *aux_dev); struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev); struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev); struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev); +const struct ubase_adev_com *ubase_get_mdrv_data(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_unic_caps(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_cdma_caps(struct auxiliary_device *adev); -- Gitee From 7236f574e3ffcda62e97254cd32500899948f7da Mon Sep 17 00:00:00 2001 From: Zhang Lei Date: Tue, 9 Dec 2025 19:29:52 +0800 Subject: [PATCH 219/243] ub: ubase: Fix the issue of deadlock by quantities of log in the ctrlq crq task. commit e911001387ed828279af9d3d962c9389c93ecbfc openEuler To avoid deadlock crash by quantities of log, this patch adds the rate limited log and limits the number of times the ctrlq crq task handles ctrlq crq queue. Fixes: d7ce08663cc5 ("ub: ubase: Supports for ctrl queue management.") Signed-off-by: Zhang Lei Signed-off-by: huwentao --- drivers/ub/ubase/ubase_ctrlq.c | 39 ++++++++++++++++++++++++---------- drivers/ub/ubase/ubase_dev.c | 16 ++++++++++++++ drivers/ub/ubase/ubase_dev.h | 31 +++++++++++++++++++++++++++ drivers/ub/ubase/ubase_eq.c | 2 +- drivers/ub/ubase/ubase_eq.h | 2 +- 5 files changed, 77 insertions(+), 13 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 8827318f97a8..5f6b6cf2559e 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -987,15 +987,17 @@ void ubase_ctrlq_handle_crq_msg(struct ubase_dev *udev, spin_lock_bh(&csq->lock); ctx = &udev->ctrlq.msg_queue[seq]; if (!ctx->valid) { - ubase_warn(udev, - "seq is invalid, opcode = 0x%x, service_type = 0x%x, seq = %u.\n", - head->opcode, head->service_type, seq); - goto unlock; + spin_unlock_bh(&csq->lock); + ubase_warn_rl(udev, udev->log_rs.ctrlq_self_seq_invalid_log_cnt, + "seq is invalid, opcode = 0x%x, service_type = 0x%x, seq = %u.\n", + head->opcode, head->service_type, seq); + return; } if (ctx->is_sync) { ubase_ctrlq_notify_completed(udev, head, seq, msg_data, data_len); - goto unlock; + spin_unlock_bh(&csq->lock); + return; } ctx->valid = 0; spin_unlock_bh(&csq->lock); @@ -1003,9 +1005,6 @@ void ubase_ctrlq_handle_crq_msg(struct ubase_dev *udev, ubase_ctrlq_crq_event_callback(udev, head, msg_data, data_len, seq); return; - -unlock: - spin_unlock_bh(&csq->lock); } static void ubase_ctrlq_handle_self_msg(struct ubase_dev *udev, @@ -1049,9 +1048,10 @@ static void ubase_ctrlq_handle_other_msg(struct ubase_dev *udev, spin_lock_bh(&csq->lock); ctx = udev->ctrlq.msg_queue[seq]; if (!ctx.valid) { - ubase_warn(udev, "invalid seq = %u, opcode = 0x%x, service_type = 0x%x.\n", - seq, head->opcode, head->service_type); spin_unlock_bh(&csq->lock); + ubase_warn_rl(udev, udev->log_rs.ctrlq_other_seq_invalid_log_cnt, + "invalid seq = %u, opcode = 0x%x, service_type = 0x%x.\n", + seq, head->opcode, head->service_type); return; } if (!ctx.is_sync) @@ -1096,14 +1096,18 @@ static inline void ubase_ctrlq_reset_crq_ci(struct ubase_dev *udev) static void ubase_ctrlq_crq_handler(struct ubase_dev *udev) { +#define UBASE_CTRLQ_CRQ_POLLING_BUDGET 256 + struct ubase_ctrlq_ring *crq = &udev->ctrlq.crq; struct ub_entity *ue = to_ub_entity(udev->dev); struct ubase_ctrlq_base_block head = {0}; + u32 cnt = 0; u8 bb_num; u8 *addr; u16 seq; - while (!ubase_ctrlq_crq_is_empty(udev, &udev->hw)) { + while (cnt++ < UBASE_CTRLQ_CRQ_POLLING_BUDGET && + !ubase_ctrlq_crq_is_empty(udev, &udev->hw)) { if (!test_bit(UBASE_CTRLQ_STATE_ENABLE, &udev->ctrlq.state)) { ubase_warn(udev, "ctrlq is disabled in crq.\n"); return; @@ -1135,6 +1139,19 @@ static void ubase_ctrlq_crq_handler(struct ubase_dev *udev) ubase_ctrlq_update_crq_ci(udev, bb_num); } + + if (udev->log_rs.ctrlq_self_seq_invalid_log_cnt || + udev->log_rs.ctrlq_other_seq_invalid_log_cnt) { + ubase_warn(udev, + "ubase log rate is limited, ctrlq_self_seq_invalid_log_cnt = %u, ctrlq_other_seq_invalid_log_cnt = %u.\n", + udev->log_rs.ctrlq_self_seq_invalid_log_cnt, + udev->log_rs.ctrlq_other_seq_invalid_log_cnt); + udev->log_rs.ctrlq_self_seq_invalid_log_cnt = 0; + udev->log_rs.ctrlq_other_seq_invalid_log_cnt = 0; + } + + if (!ubase_ctrlq_crq_is_empty(udev, &udev->hw)) + ubase_ctrlq_task_schedule(udev); } void ubase_ctrlq_service_task(struct ubase_delay_work *ubase_work) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 086e16cef4b6..e9c289ee0e29 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -657,7 +657,23 @@ static int ubase_notify_drv_capbilities(struct ubase_dev *udev) return __ubase_cmd_send_in(udev, &in); } +static int ubase_log_rs_init(struct ubase_dev *udev) +{ +#define UBASE_RATELIMIT_INTERVAL (2 * HZ) +#define UBASE_RATELIMIT_BURST 40 + + raw_spin_lock_init(&udev->log_rs.rs.lock); + udev->log_rs.rs.interval = UBASE_RATELIMIT_INTERVAL; + udev->log_rs.rs.burst = UBASE_RATELIMIT_BURST; + + return 0; +} + static const struct ubase_init_function ubase_init_func_map[] = { + { + "init log rs", UBASE_SUP_ALL, 0, + ubase_log_rs_init, NULL + }, { "init work queue", UBASE_SUP_ALL, 0, ubase_wq_init, ubase_wq_uninit diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index 5270246540e7..ce2cac4b0917 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -42,6 +42,30 @@ dev_warn(_udev->dev, "(pid %d) " fmt, \ current->pid, ##__VA_ARGS__) +#define ubase_err_rl(_udev, log_cnt, fmt, ...) do { \ + if (__ratelimit(&(_udev->log_rs.rs))) \ + dev_err(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__); \ + else \ + (log_cnt)++; \ +} while (0) + +#define ubase_info_rl(_udev, log_cnt, fmt, ...) do { \ + if (__ratelimit(&(_udev->log_rs.rs))) \ + dev_info(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__); \ + else \ + (log_cnt)++; \ +} while (0) + +#define ubase_warn_rl(_udev, log_cnt, fmt, ...) do { \ + if (__ratelimit(&(_udev->log_rs.rs))) \ + dev_warn(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__); \ + else \ + (log_cnt)++; \ +} while (0) + struct ubase_adev { struct auxiliary_device adev; struct ubase_dev *udev; @@ -252,6 +276,12 @@ struct ubase_prealloc_mem_info { struct ubase_pmem_ctx udma; }; +struct ubase_log_rs { + struct ratelimit_state rs; + u16 ctrlq_self_seq_invalid_log_cnt; + u16 ctrlq_other_seq_invalid_log_cnt; +}; + struct ubase_dev { struct device *dev; int dev_id; @@ -292,6 +322,7 @@ struct ubase_dev { struct ubase_arq_msg_ring arq; struct ubase_prealloc_mem_info pmem_info; u8 dev_mac[ETH_ALEN]; + struct ubase_log_rs log_rs; }; #define UBASE_ERR_MSG_LEN 128 diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index db1dc72f2caf..2afe9c3bf7fc 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -210,7 +210,7 @@ static void ubase_errhandle_task_schedule(struct ubase_dev *udev) &udev->service_task.service_task, 0); } -static void ubase_ctrlq_task_schedule(struct ubase_dev *udev) +void ubase_ctrlq_task_schedule(struct ubase_dev *udev) { if (!test_and_set_bit(UBASE_STATE_CTRLQ_SERVICE_SCHED, &udev->service_task.state)) { diff --git a/drivers/ub/ubase/ubase_eq.h b/drivers/ub/ubase/ubase_eq.h index a4a15c9144f4..59fa2c620720 100644 --- a/drivers/ub/ubase/ubase_eq.h +++ b/drivers/ub/ubase/ubase_eq.h @@ -204,5 +204,5 @@ void ubase_unregister_ae_event(struct ubase_dev *udev); void ubase_enable_misc_vector(struct ubase_dev *udev, bool enable); void ubase_disable_ce_irqs(struct ubase_dev *udev); int ubase_enable_ce_irqs(struct ubase_dev *udev); - +void ubase_ctrlq_task_schedule(struct ubase_dev *udev); #endif -- Gitee From 00a948240ca4ac157e475c5d23fac004a88af762 Mon Sep 17 00:00:00 2001 From: Chuan Wu Date: Tue, 9 Dec 2025 19:44:25 +0800 Subject: [PATCH 220/243] ub: ubase: optimize print format of fw_ver in debugfs. commit 51d1bd2812b6cc2e9d8d96631658706f5b54653f openEuler This patch optimized the print format of fw_version in ubase debugfs, make it the same as the log format used in initial printing and 'ethtool -i'. Fixes: a15280279576 ("ub: ubase: support debugfs public interface.") Signed-off-by: Chuan Wu Signed-off-by: huwentao --- drivers/ub/ubase/debugfs/ubase_debugfs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 4e6eec73abf9..72684dda40ba 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -95,13 +95,17 @@ static void ubase_dbg_dump_caps_info(struct seq_file *s, struct ubase_dev *udev) {"\tdie_id: %u\n", dev_caps->die_id}, {"\tue_id: %u\n", dev_caps->ue_id}, {"\tnl_id: %u\n", dev_caps->nl_id}, - {"\tfw_version: %u\n", dev_caps->fw_version}, }; int i; for (i = 0; i < ARRAY_SIZE(ubase_common_caps_info); i++) seq_printf(s, ubase_common_caps_info[i].format, ubase_common_caps_info[i].caps_info); + seq_printf(s, "\tfw_version: %u.%u.%u.%u\n", + u32_get_bits(dev_caps->fw_version, UBASE_FW_VERSION_BYTE3_MASK), + u32_get_bits(dev_caps->fw_version, UBASE_FW_VERSION_BYTE2_MASK), + u32_get_bits(dev_caps->fw_version, UBASE_FW_VERSION_BYTE1_MASK), + u32_get_bits(dev_caps->fw_version, UBASE_FW_VERSION_BYTE0_MASK)); } static void ubase_dbg_dump_common_caps(struct seq_file *s, struct ubase_dev *udev) -- Gitee From f13c852a1a7d81f84ea3717f665527ebc9b64270 Mon Sep 17 00:00:00 2001 From: Chuan Wu Date: Tue, 9 Dec 2025 19:55:33 +0800 Subject: [PATCH 221/243] ub: ubase: add ip_over_urma API for udma. commit d39249c75421ee9f3f8a10c77ea88af049b79eb8 openEuler This patch add two export API to query if ip over urma capabilities is supported or not for udma. Fixes: 313470c59d7f ("ub: ubase: add CMDQ&CTRLQ compatibility code") Signed-off-by: Chuan Wu Signed-off-by: huwentao --- drivers/ub/ubase/ubase_dev.c | 39 +++++++++++++++++++++++++++++++ include/ub/ubase/ubase_comm_dev.h | 8 +++---- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index e9c289ee0e29..efa220bacdff 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -1403,6 +1403,45 @@ bool ubase_adev_mac_stats_supported(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_adev_mac_stats_supported); +/** + * ubase_adev_ip_over_urma_supported() - determine whether to support IP over + * urma + * @adev: auxiliary device + * + * This function is used to determine whether to support IP over urma. + * + * Context: Any context. + * Return: true or false + */ +bool ubase_adev_ip_over_urma_supported(struct auxiliary_device *adev) +{ + if (!adev) + return false; + + return ubase_ip_over_urma_supported(__ubase_get_udev_by_adev(adev)); +} +EXPORT_SYMBOL(ubase_adev_ip_over_urma_supported); + +/** + * ubase_adev_ip_over_urma_utp_supported() - determine whether to support utp + * when IP over urma is supported + * @adev: auxiliary device + * + * This function is used to determine whether to support utp when IP over urma + * is supported + * + * Context: Any context. + * Return: true or false + */ +bool ubase_adev_ip_over_urma_utp_supported(struct auxiliary_device *adev) +{ + if (!adev) + return false; + + return ubase_ip_over_urma_utp_supported(__ubase_get_udev_by_adev(adev)); +} +EXPORT_SYMBOL(ubase_adev_ip_over_urma_utp_supported); + static void ubase_activate_notify(struct ubase_dev *udev, struct auxiliary_device *adev, bool activate) { diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 9d0f52ab75c1..fbec3624c87b 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -143,7 +143,6 @@ struct ubase_caps { * struct ubase_res_caps - ubase resource capbilities * @max_cnt: the resource max count * @start_idx: start index - * @reserved_cnt: reserved count * @depth: the queue depth of the resource */ struct ubase_res_caps { @@ -172,10 +171,6 @@ struct ubase_pmem_caps { * @jtg_max_cnt: jetty group max count * @rc_max_cnt: rc max count * @rc_que_depth: rc queue depth - * @ccc_max_cnt: ccc max count - * @dest_addr_max_cnt: dest addr max count - * @seid_upi_max_cnt:seid upi max count - * @tpm_max_cnt: tpm max count * @cqe_size: cqe size */ struct ubase_adev_caps { @@ -324,6 +319,9 @@ bool ubase_adev_ctrlq_supported(struct auxiliary_device *adev); bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev); bool ubase_adev_mac_stats_supported(struct auxiliary_device *aux_dev); bool ubase_adev_prealloc_supported(struct auxiliary_device *aux_dev); +bool ubase_adev_ip_over_urma_supported(struct auxiliary_device *adev); +bool ubase_adev_ip_over_urma_utp_supported(struct auxiliary_device *adev); + struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev); struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev); struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev); -- Gitee From 1bcb9d261afb23cac67aab8af64a60a610462a13 Mon Sep 17 00:00:00 2001 From: Zhang Lei Date: Mon, 8 Dec 2025 10:56:12 +0800 Subject: [PATCH 222/243] ub: ubase: Fix the issue of mismatch between the ubase aeqe structure and the protocol. commit 4d49ef0166a7fd6c3bf6e329f1e73c94e9094aff openEuler For ubase_aeqe structure, the field of num is 32 bits, but the protocol specifies that this field should be 20 bits. This patch modifies the bit width of the num field to be consistent with the protocol. Fixes: 9cc0f22b5d69 ("ub: ubase: support for async event process") Signed-off-by: Zhang Lei Signed-off-by: huwentao --- include/ub/ubase/ubase_comm_eq.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/ub/ubase/ubase_comm_eq.h b/include/ub/ubase/ubase_comm_eq.h index d1efad0a79b3..0ed03d7e6140 100644 --- a/include/ub/ubase/ubase_comm_eq.h +++ b/include/ub/ubase/ubase_comm_eq.h @@ -86,9 +86,10 @@ struct ubase_aeqe { union { struct { - u32 num; - u32 rsv0; + u32 num : 20; + u32 rsv0 : 12; u32 rsv1; + u32 rsv2; } queue_event; #pragma pack(push, 1) -- Gitee From 989d92c33779cc19bb9304808a78686092afc134 Mon Sep 17 00:00:00 2001 From: Zhang Lei Date: Mon, 8 Dec 2025 14:55:49 +0800 Subject: [PATCH 223/243] net: unic: Restore default queue count per tc when tc number changes. commit ec8fa86dd06264e9a83181a68668f1bd43d48d75 openEuler This patch restores the number of queues for each tc to the default value of 1 when the number of tc changes. Fixes: c20baa942215 ("net: unic: support config/query the mapping between dscp and tc") Signed-off-by: Zhang Lei Signed-off-by: huwentao --- drivers/net/ub/unic/unic_dev.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index 7d0405e751b0..24dd70339957 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -46,6 +46,7 @@ MODULE_PARM_DESC(debug, "enable unic debug log, 0:disable, others:enable, defaul #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) +#define DEFAULT_RSS_SIZE 1 static struct workqueue_struct *unic_wq; @@ -308,7 +309,7 @@ static int unic_init_channels_attr(struct unic_dev *unic_dev) channels->vl.vl_num = 1; channels->rss_vl_num = 1; - channels->rss_size = 1; + channels->rss_size = DEFAULT_RSS_SIZE; channels->num = channels->rss_size * channels->rss_vl_num; channels->sqebb_depth = unic_caps->jfs.depth; channels->rqe_depth = unic_caps->jfr.depth; @@ -994,9 +995,6 @@ int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, struct unic_channels *channels = &unic_dev->channels; int ret; - dev_info(unic_dev->comdev.adev->dev.parent, - "change rss_size from %u to %u.\n", org_rss_size, new_rss_size); - mutex_lock(&channels->mutex); __unic_uninit_channels(unic_dev); @@ -1017,17 +1015,12 @@ int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, int unic_update_channels(struct unic_dev *unic_dev, u8 vl_num) { - struct auxiliary_device *adev = unic_dev->comdev.adev; struct unic_channels *channels = &unic_dev->channels; - u32 new_rss_size, old_rss_size = channels->rss_size; + u32 old_rss_size = channels->rss_size; channels->rss_vl_num = unic_get_rss_vl_num(unic_dev, vl_num); - if (old_rss_size * channels->rss_vl_num > unic_channels_max_num(adev)) - new_rss_size = unic_get_max_rss_size(unic_dev); - else - new_rss_size = old_rss_size; - return unic_change_rss_size(unic_dev, new_rss_size, old_rss_size); + return unic_change_rss_size(unic_dev, DEFAULT_RSS_SIZE, old_rss_size); } static struct net_device *unic_alloc_netdev(struct auxiliary_device *adev) -- Gitee From c5566aead51a4bf5e5cc7545e0ea9972072af25c Mon Sep 17 00:00:00 2001 From: Shi Long Date: Tue, 9 Dec 2025 15:18:06 +0800 Subject: [PATCH 224/243] ub: ubase: add opcode to query ub port_bitmap. commit faa70acc5d7f1eaa902276086916019d3c2602e2 openEuler This patch fixes the following issues: 1.add 0x5105 opcode to query ub port_bitmap. Fixes: 3fb8b7d41539 ("ub: ubase: add support of ubase driver for ub network") Signed-off-by: Shi Long Signed-off-by: huwentao --- drivers/ub/ubase/ubase_hw.c | 21 ++++++++++++++++----- include/ub/ubase/ubase_comm_cmd.h | 1 + 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index d58fd69cd25f..534441fe5d69 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -649,16 +649,27 @@ int ubase_query_hw_oor_caps(struct ubase_dev *udev) int ubase_query_port_bitmap(struct ubase_dev *udev) { +#define OPCODE_CNT 2 + struct ubase_caps *dev_caps = &udev->caps.dev_caps; struct ubase_query_port_bitmap_resp resp = {0}; + enum ubase_opcode_type opcode[OPCODE_CNT]; struct ubase_cmd_buf in, out; - int ret; + int ret, i; - ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_PORT_BITMAP, true, 0, NULL); - ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_PORT_BITMAP, true, - sizeof(resp), &resp); + opcode[0] = UBASE_OPC_QUERY_UB_PORT_BITMAP; + opcode[1] = UBASE_OPC_QUERY_PORT_BITMAP; - ret = __ubase_cmd_send_inout(udev, &in, &out); + for (i = 0; i < OPCODE_CNT; i++) { + ubase_fill_inout_buf(&in, opcode[i], true, 0, NULL); + ubase_fill_inout_buf(&out, opcode[i], true, sizeof(resp), &resp); + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret != -EOPNOTSUPP) + break; + + dev_warn(udev->dev, + "The function of querying real-time traffic in UBOE mode is not supported.\n"); + } if (ret && ret != -EPERM) { dev_err(udev->dev, "failed to query port bitmap, ret = %d.\n", ret); diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 4efbf8402d9d..6a195bbdf699 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -87,6 +87,7 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_FLUSH_STATUS = 0x5102, UBASE_OPC_START_PERF_STATS = 0x5103, UBASE_OPC_STOP_PERF_STATS = 0x5104, + UBASE_OPC_QUERY_UB_PORT_BITMAP = 0x5105, /* PHY commands */ UBASE_OPC_CONFIG_SPEED_DUP = 0x6100, -- Gitee From 6c40bcf44a897ce2d43a4000ad62c68e6a901147 Mon Sep 17 00:00:00 2001 From: Xiaobo Zhang Date: Tue, 9 Dec 2025 20:55:07 +0800 Subject: [PATCH 225/243] ub: ubase: fix a issue of udma device's eid deleted when stop rx stream and reset concurrently commit 8f6eb77901de59f314e3db4dfd66a2beeafad1df openEuler This patch used to fix a issue of udma device's eid deleted when stop rx stream and reset concurrently. Currently, when udma deleted tp/jetty, ubase sends twice fe disabling messages to control plane. As a result, the management and control plane cannot obtain the device eid from the hardware in the second deactivate message. To avoid this problem, ubase needs to send fe deactivate message after the reset is complete in the rx flow stop interface. In the rx flow recovery interface, directly returns the ebusy when resetting. Fixes: 5515a4226467 ("ub: ubase: support for activate/deactivate dev interface") Signed-off-by: Xiaobo Zhang Signed-off-by: huwentao --- drivers/ub/ubase/ubase_arq.c | 4 --- drivers/ub/ubase/ubase_dev.c | 62 ++++++++++++++++++++++++++++++---- drivers/ub/ubase/ubase_dev.h | 1 + drivers/ub/ubase/ubase_reset.c | 3 ++ 4 files changed, 60 insertions(+), 10 deletions(-) diff --git a/drivers/ub/ubase/ubase_arq.c b/drivers/ub/ubase/ubase_arq.c index 10735a68d7aa..4ae77f3cccbc 100644 --- a/drivers/ub/ubase/ubase_arq.c +++ b/drivers/ub/ubase/ubase_arq.c @@ -52,8 +52,6 @@ static int ubase_activate_ue(struct ubase_dev *udev, struct ub_entity *ue, int ret; ret = ub_activate_entity(ue, bus_ue_id); - if (ret == -EBUSY) - ret = ubase_activate_handler(udev, bus_ue_id); if (ret) dev_err(udev->dev, "failed to activate ue dev, ue id = %u, msn = %u, ret = %d.\n", @@ -68,8 +66,6 @@ static int ubase_deactivate_ue(struct ubase_dev *udev, struct ub_entity *ue, int ret; ret = ub_deactivate_entity(ue, bus_ue_id); - if (ret == -EBUSY) - ret = ubase_deactivate_handler(udev, bus_ue_id); if (ret) dev_err(udev->dev, "failed to deactivate ue dev, ue id=%u, msn=%u, ret=%d.\n", diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index efa220bacdff..d63a045137e1 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -1626,26 +1626,39 @@ int ubase_activate_dev(struct auxiliary_device *adev) { struct ubase_dev *udev; struct ub_entity *ue; - int ret; + int ret = 0; if (!adev) return 0; udev = __ubase_get_udev_by_adev(adev); + ubase_info(udev, "ubase activate dev, state_bits = 0x%lx.\n", + udev->state_bits); + + if (test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) { + ubase_info(udev, "skip activate dev while resetting.\n"); + goto skip_activate_dev; + } + ue = container_of(udev->dev, struct ub_entity, dev); - if (ubase_activate_proxy_supported(udev) && - !test_bit(UBASE_STATE_DISABLED_B, &udev->state_bits)) + if (ubase_activate_proxy_supported(udev)) ret = ub_activate_entity(ue, ue->entity_idx); else ret = ubase_activate_handler(udev, ue->entity_idx); if (ret) { + if (test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) { + ubase_info(udev, "skip activate dev while resetting.\n"); + ret = 0; + goto skip_activate_dev; + } ubase_err(udev, "failed to activate ubase dev, ret = %d.\n", ret); goto activate_dev_err; } +skip_activate_dev: ubase_activate_notify(udev, adev, true); activate_dev_err: @@ -1655,6 +1668,27 @@ int ubase_activate_dev(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_activate_dev); +static int ubase_deactivate_wait_reset_done(struct ubase_dev *udev) +{ +#define UBASE_MAX_WAIT_RST_CNT 1000 +#define UBASE_WAIT_RST_TIME 10 + + u16 cnt = 0; + + while (test_bit(UBASE_STATE_RST_WAIT_DEACTIVE_B, &udev->state_bits)) { + if (!cnt) + ubase_info(udev, + "waitting for reset done in deactivate process.\n"); + msleep(UBASE_WAIT_RST_TIME); + if (++cnt >= UBASE_MAX_WAIT_RST_CNT) { + ubase_err(udev, "wait reset done timeout.\n"); + return -EBUSY; + } + } + + return 0; +} + /** * ubase_deactivate_dev() - deactivate device * @adev: auxiliary device @@ -1677,21 +1711,37 @@ int ubase_deactivate_dev(struct auxiliary_device *adev) udev = __ubase_get_udev_by_adev(adev); - ue = container_of(udev->dev, struct ub_entity, dev); + ubase_info(udev, "ubase deactivate dev, state_bits = 0x%lx.\n", + udev->state_bits); + + if (test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) { + ret = ubase_deactivate_wait_reset_done(udev); + if (ret) { + ubase_update_activate_stats(udev, false, ret); + return ret; + } + ubase_activate_notify(udev, adev, false); + goto out; + } + ubase_activate_notify(udev, adev, false); - if (ubase_activate_proxy_supported(udev) && - !test_bit(UBASE_STATE_DISABLED_B, &udev->state_bits)) + ue = container_of(udev->dev, struct ub_entity, dev); + if (ubase_activate_proxy_supported(udev)) ret = ub_deactivate_entity(ue, ue->entity_idx); else ret = ubase_deactivate_handler(udev, ue->entity_idx); + if (ret && test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ret = ubase_deactivate_wait_reset_done(udev); + if (ret) { ubase_err(udev, "failed to deactivate ubase dev, ret = %d.\n", ret); ubase_activate_notify(udev, adev, true); } +out: ubase_update_activate_stats(udev, false, ret); return ret; diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index ce2cac4b0917..69809bf1838d 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -148,6 +148,7 @@ enum ubase_dev_state_bit { UBASE_STATE_HIMAC_RESETTING_B, UBASE_STATE_CTX_READY_B, UBASE_STATE_PREALLOC_OK_B, + UBASE_STATE_RST_WAIT_DEACTIVE_B, }; struct ubase_crq_event_nbs { diff --git a/drivers/ub/ubase/ubase_reset.c b/drivers/ub/ubase/ubase_reset.c index b2fe6d9ef559..05c3b7005e31 100644 --- a/drivers/ub/ubase/ubase_reset.c +++ b/drivers/ub/ubase/ubase_reset.c @@ -217,6 +217,7 @@ void ubase_suspend(struct ubase_dev *udev) } set_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits); + set_bit(UBASE_STATE_RST_WAIT_DEACTIVE_B, &udev->state_bits); if (ubase_dev_pmu_supported(udev)) { __ubase_cmd_disable(udev); @@ -257,11 +258,13 @@ void ubase_resume(struct ubase_dev *udev) __ubase_cmd_enable(udev); udev->reset_stat.reset_done_cnt++; udev->reset_stat.hw_reset_done_cnt++; + clear_bit(UBASE_STATE_RST_WAIT_DEACTIVE_B, &udev->state_bits); clear_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits); clear_bit(UBASE_STATE_DISABLED_B, &udev->state_bits); return; } + clear_bit(UBASE_STATE_RST_WAIT_DEACTIVE_B, &udev->state_bits); udev->reset_stat.hw_reset_done_cnt++; ubase_suspend_aux_devices(udev); ubase_dev_reset_uninit(udev); -- Gitee From 75bc13724e251938175e4741396e4dc8b24f6711 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Tue, 9 Dec 2025 22:57:46 +0800 Subject: [PATCH 226/243] ub: ubase: obtain rc_max_cnt from MAMI commit 8ab9cc45a8e9141f20d4a8151bc4d3ab6b103a70 openEuler A VL uses only two RCT queues, which cannot meet the bandwidth requirements. Therefore, each FE needs to be configured with 8 RCT queues. To meet this requirement, UBASE obtains the number of RCT queues from the MAMI instead of the IMP. Fixes: 442b66e0c7b0 ("ub: ubase: Support to config and query the sl scheduling mode and weight in ETS and TM modes.") Signed-off-by: Xuanyu Pu Signed-off-by: Fengyan Mu Signed-off-by: huwentao --- drivers/ub/ubase/ubase_ctrlq.h | 2 +- drivers/ub/ubase/ubase_dev.h | 1 + drivers/ub/ubase/ubase_qos_hw.c | 9 ++++++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.h b/drivers/ub/ubase/ubase_ctrlq.h index 881f3342393a..e56728e4baae 100644 --- a/drivers/ub/ubase/ubase_ctrlq.h +++ b/drivers/ub/ubase/ubase_ctrlq.h @@ -69,7 +69,7 @@ struct ubase_ctrlq_query_vl_req { struct ubase_ctrlq_query_sl_resp { __le16 unic_sl_bitmap; - u8 rsv0[2]; + __le16 rc_max_cnt; __le16 udma_tp_sl_bitmap; __le16 udma_ctp_sl_bitmap; u8 rsv1[12]; diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index 69809bf1838d..f1cba478510a 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -289,6 +289,7 @@ struct ubase_dev { struct ubase_priv priv; struct ubase_hw hw; + bool use_fixed_rc_num; struct ubase_dev_caps caps; struct ubase_adev_qos qos; struct ubase_dbgfs dbgfs; diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index e7737e98e1e5..d7ce78489532 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -862,7 +862,7 @@ static void ubase_parse_max_vl(struct ubase_dev *udev) qos->ue_max_vl_id = ue_max_vl_id; - if (ubase_dev_urma_supported(udev)) + if (ubase_dev_urma_supported(udev) && !udev->use_fixed_rc_num) udma_caps->rc_max_cnt *= (ue_max_vl_id + 1); } @@ -966,6 +966,7 @@ static int ubase_ctrlq_query_sl(struct ubase_dev *udev) struct ubase_ctrlq_query_sl_resp resp = {0}; struct ubase_ctrlq_query_sl_req req = {0}; struct ubase_ctrlq_msg msg = {0}; + u16 rc_max_cnt; int ret; u8 i; @@ -986,6 +987,12 @@ static int ubase_ctrlq_query_sl(struct ubase_dev *udev) return ret; } + rc_max_cnt = le16_to_cpu(resp.rc_max_cnt); + if (rc_max_cnt != 0) { + udev->use_fixed_rc_num = true; + udev->caps.udma_caps.rc_max_cnt = rc_max_cnt; + } + unic_sl_bitmap = le16_to_cpu(resp.unic_sl_bitmap); udma_tp_sl_bitmap = le16_to_cpu(resp.udma_tp_sl_bitmap); udma_ctp_sl_bitmap = le16_to_cpu(resp.udma_ctp_sl_bitmap); -- Gitee From e3cf32b6824bc418a3a3aa1394a71e3a3e2873ea Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 10 Dec 2025 10:01:36 +0800 Subject: [PATCH 227/243] net: unic: Restore tx and rx stats when setting coalesce parameters commit fe48921c10e2ecb67a88061b9b1f28b2750bc1be openEuler Restore tx and rx stats when setting coalesce parameters. The driver will try to destroy channels and rebuild them when received a set coalesc parameter command, if we don't restore driver tx&rx stats, stats information will be dropped in destroying channels. Fixes: e0ccc63cc72e ("net: unic: support querying and configuring coalesce parameters.") Signed-off-by: Fengyan Mu Signed-off-by: huwentao --- drivers/net/ub/unic/unic_ethtool.c | 62 +++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 886f6d691733..337d77b16ff4 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -426,6 +426,49 @@ unic_check_coalesce_para(struct net_device *netdev, return ret; } +static int unic_backup_stats(struct unic_dev *unic_dev, + struct unic_sq_stats **sq_stats, + struct unic_rq_stats **rq_stats) +{ + u32 i; + + *sq_stats = kcalloc(unic_dev->channels.num, sizeof(**sq_stats), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(*sq_stats)) + return -ENOMEM; + + *rq_stats = kcalloc(unic_dev->channels.num, sizeof(**rq_stats), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(*rq_stats)) { + if (unic_tx_changed(unic_dev)) + kfree(*sq_stats); + return -ENOMEM; + } + + for (i = 0; i < unic_dev->channels.num; i++) { + memcpy(sq_stats[i], &unic_dev->channels.c[i].sq->stats, + sizeof(struct unic_sq_stats)); + memcpy(rq_stats[i], &unic_dev->channels.c[i].rq->stats, + sizeof(struct unic_rq_stats)); + } + + return 0; +} + +static void unic_restore_stats(struct unic_dev *unic_dev, + struct unic_sq_stats *sq_stats, + struct unic_rq_stats *rq_stats) +{ + u32 i; + + for (i = 0; i < unic_dev->channels.num; i++) { + memcpy(&unic_dev->channels.c[i].rq->stats, &rq_stats[i], + sizeof(struct unic_rq_stats)); + memcpy(&unic_dev->channels.c[i].sq->stats, &sq_stats[i], + sizeof(struct unic_sq_stats)); + } +} + static int unic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *cmd, struct kernel_ethtool_coalesce *kernel_coal, @@ -436,6 +479,8 @@ static int unic_set_coalesce(struct net_device *netdev, struct unic_coalesce *tx_coal = &unic_coal->tx_coal; struct unic_coalesce *rx_coal = &unic_coal->rx_coal; struct unic_coalesce old_tx_coal, old_rx_coal; + struct unic_sq_stats *sq_stats; + struct unic_rq_stats *rq_stats; int ret, ret1; if (netif_running(netdev)) { @@ -451,6 +496,13 @@ static int unic_set_coalesce(struct net_device *netdev, if (ret) return ret; + ret = unic_backup_stats(unic_dev, &sq_stats, &rq_stats); + if (ret) { + unic_err(unic_dev, "failed to backup txrx stats, ret = %d.\n", + ret); + return ret; + } + memcpy(&old_tx_coal, tx_coal, sizeof(struct unic_coalesce)); memcpy(&old_rx_coal, rx_coal, sizeof(struct unic_coalesce)); @@ -468,12 +520,20 @@ static int unic_set_coalesce(struct net_device *netdev, memcpy(tx_coal, &old_tx_coal, sizeof(struct unic_coalesce)); memcpy(rx_coal, &old_rx_coal, sizeof(struct unic_coalesce)); ret1 = unic_init_channels(unic_dev, unic_dev->channels.num); - if (ret1) + if (ret1) { unic_err(unic_dev, "failed to recover old channels, ret = %d.\n", ret1); + goto err_recover_channels; + } } + unic_restore_stats(unic_dev, sq_stats, rq_stats); + +err_recover_channels: + kfree(sq_stats); + kfree(rq_stats); + return ret; } -- Gitee From caa341e17471e2a1e07117e316343fabfae01585 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Wed, 10 Dec 2025 17:18:46 +0800 Subject: [PATCH 228/243] net: unic: Fixed the call trace issue triggered internally by the completion event interrupt commit 81d95911c812a24d4daa342a11f077b9316eba89 openEuler Solve the problem that in the scenario of modifying queue parameters at the same time during multi-network port MUE streaming, the unic completion event is interrupted when queue resources such as channel are destroyed. In the acquisition of queue resources such as rq sq, call trace hangs in this scenario. This submission fixes this problem by adding channel_invalid setting. Fixes: d8164d3745d4 ("net: unic: add io basic Rx/Tx functionality for unic") Signed-off-by: Yaoyao Tu Signed-off-by: Fengyan Mu Signed-off-by: huwentao --- drivers/net/ub/unic/unic_channel.c | 3 +++ drivers/net/ub/unic/unic_dev.c | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/drivers/net/ub/unic/unic_channel.c b/drivers/net/ub/unic/unic_channel.c index eb2d838035c7..66f70eebcfb9 100644 --- a/drivers/net/ub/unic/unic_channel.c +++ b/drivers/net/ub/unic/unic_channel.c @@ -235,11 +235,14 @@ static int unic_modify_channels(struct unic_dev *unic_dev, int ret; mutex_lock(&channels->mutex); + set_bit(UNIC_STATE_CHANNEL_INVALID, &unic_dev->state); unic_uninit_changed_channels(unic_dev); unic_dev_change_channels_param(unic_dev, new_param); ret = unic_init_changed_channels(unic_dev); + if (!ret) + clear_bit(UNIC_STATE_CHANNEL_INVALID, &unic_dev->state); mutex_unlock(&channels->mutex); return ret; diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index 24dd70339957..f99e0bef5642 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -996,6 +996,8 @@ int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, int ret; mutex_lock(&channels->mutex); + + set_bit(UNIC_STATE_CHANNEL_INVALID, &unic_dev->state); __unic_uninit_channels(unic_dev); channels->rss_size = new_rss_size; @@ -1007,6 +1009,8 @@ int unic_change_rss_size(struct unic_dev *unic_dev, u32 new_rss_size, if (ret) dev_err(unic_dev->comdev.adev->dev.parent, "failed to change rss_size, ret = %d.\n", ret); + else + clear_bit(UNIC_STATE_CHANNEL_INVALID, &unic_dev->state); mutex_unlock(&channels->mutex); -- Gitee From 314dacc95de41fb0e35b3ea284c85e00c44a3f97 Mon Sep 17 00:00:00 2001 From: Yaoyao Tu Date: Mon, 15 Dec 2025 21:06:30 +0800 Subject: [PATCH 229/243] net: unic: Fix the calltrace caused by modifying queue parameters in the MUE scenario commit 7df248c3f03d95decd61865b82bc76ef81354dc0 openEuler This issue occurs in the multi-network-port MUE streaming media scenario when queue parameters are modified simultaneously. In this scenario, the unicast completion event is interrupted when queue resources such as channels are destroyed. When obtaining queue resources like rq and sq, the call trace will be suspended in this situation. Fixes: d8164d3745d4 ("net: unic: add io basic Rx/Tx functionality for unic") Signed-off-by: Yaoyao Tu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_event.c | 12 +++++++++--- drivers/net/ub/unic/unic_reset.c | 8 ++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index 796966703751..9c4c8d9b80d3 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -153,6 +153,11 @@ static void unic_activate_handler(struct auxiliary_device *adev, bool activate) static void unic_ub_port_reset(struct unic_dev *unic_dev, bool link_up) { + struct net_device *netdev = unic_dev->comdev.netdev; + + if (!netif_running(netdev)) + return; + if (link_up) unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_UP; else @@ -163,11 +168,15 @@ static void unic_eth_port_reset(struct net_device *netdev, bool link_up) { rtnl_lock(); + if (!netif_running(netdev)) + goto unlock; + if (link_up) unic_net_open(netdev); else unic_net_stop(netdev); +unlock: rtnl_unlock(); } @@ -176,9 +185,6 @@ static void unic_port_handler(struct auxiliary_device *adev, bool link_up) struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); struct net_device *netdev = unic_dev->comdev.netdev; - if (!netif_running(netdev)) - return; - if (unic_dev_ubl_supported(unic_dev)) unic_ub_port_reset(unic_dev, link_up); else diff --git a/drivers/net/ub/unic/unic_reset.c b/drivers/net/ub/unic/unic_reset.c index 6946e8976da0..a0a8bf625993 100644 --- a/drivers/net/ub/unic/unic_reset.c +++ b/drivers/net/ub/unic/unic_reset.c @@ -23,7 +23,6 @@ static void unic_reset_down(struct auxiliary_device *adev) { struct unic_dev *priv = (struct unic_dev *)dev_get_drvdata(&adev->dev); struct net_device *netdev = priv->comdev.netdev; - bool if_running; int ret; if (!test_bit(UNIC_STATE_INITED, &priv->state) || @@ -33,7 +32,6 @@ static void unic_reset_down(struct auxiliary_device *adev) } set_bit(UNIC_STATE_RESETTING, &priv->state); - if_running = netif_running(netdev); unic_info(priv, "unic reset start.\n"); @@ -53,7 +51,7 @@ static void unic_reset_down(struct auxiliary_device *adev) set_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &priv->vport.state); rtnl_lock(); - ret = if_running ? unic_net_stop(netdev) : 0; + ret = netif_running(netdev) ? unic_net_stop(netdev) : 0; rtnl_unlock(); if (ret) unic_err(priv, "failed to stop unic net, ret = %d.\n", ret); @@ -84,7 +82,6 @@ static void unic_reset_init(struct auxiliary_device *adev) { struct unic_dev *priv = (struct unic_dev *)dev_get_drvdata(&adev->dev); struct net_device *netdev = priv->comdev.netdev; - bool if_running; int ret; if (!test_bit(UNIC_STATE_RESETTING, &priv->state)) @@ -97,11 +94,10 @@ static void unic_reset_init(struct auxiliary_device *adev) unic_query_ip_addr(adev); unic_start_period_task(netdev); - if_running = netif_running(netdev); clear_bit(UNIC_STATE_RESETTING, &priv->state); clear_bit(UNIC_STATE_DISABLED, &priv->state); rtnl_lock(); - ret = if_running ? unic_net_open(netdev) : 0; + ret = netif_running(netdev) ? unic_net_open(netdev) : 0; rtnl_unlock(); if (ret) unic_err(priv, "failed to up net, ret = %d.\n", ret); -- Gitee From 416d1b399327968866c17ae496856fdf42146b46 Mon Sep 17 00:00:00 2001 From: Guangwei Zhang Date: Fri, 12 Dec 2025 17:41:40 +0800 Subject: [PATCH 230/243] ub: ubase: CtrlQ retry message uses the same seq commit f9f7e0c614630b5b2fa86e18fd7a52cd1e2a99f9 openEuler This patch changes the sequence number for retransmission upon ctrlq timeout to be the same as the original sequence number. Fixes: d7ce08663cc5 ("ub: ubase: Supports for ctrl queue management.") Signed-off-by: Guangwei Zhang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_cmd.h | 2 +- drivers/ub/ubase/ubase_ctrlq.c | 124 ++++++++++++++++-------------- drivers/ub/ubase/ubase_ctrlq.h | 2 + include/ub/ubase/ubase_comm_dev.h | 34 ++++---- 4 files changed, 85 insertions(+), 77 deletions(-) diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index ae34dccfdd01..d6f2def22117 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -14,7 +14,7 @@ #define UBASE_CMDQ_DESC_NUM_S 3 #define UBASE_CMDQ_DESC_NUM 1024 #define UBASE_CMDQ_TX_TIMEOUT 300000 -#define UBASE_CMDQ_MBX_TX_TIMEOUT 50 +#define UBASE_CMDQ_MBX_TX_TIMEOUT 500 #define UBASE_CMDQ_CLEAR_WAIT_TIME 200 #define UBASE_CMDQ_WAIT_TIME 10 diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 5f6b6cf2559e..34a36843a5f6 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -554,11 +554,31 @@ static void ubase_ctrlq_send_to_csq(struct ubase_dev *udev, ubase_ctrlq_csq_report_irq(udev); } +static int ubase_ctrlq_check_csq_enough(struct ubase_dev *udev, u16 num) +{ + struct ubase_ctrlq_ring *csq = &udev->ctrlq.csq; + + csq->ci = (u16)ubase_read_dev(&udev->hw, UBASE_CTRLQ_CSQ_HEAD_REG); + if (num > ubase_ctrlq_remain_space(udev)) { + ubase_warn(udev, + "no enough space in ctrlq, ci = %u, num = %u.\n", + csq->ci, num); + return -EBUSY; + } + + return 0; +} + static int ubase_ctrlq_send_msg_to_sq(struct ubase_dev *udev, struct ubase_ctrlq_base_block *head, struct ubase_ctrlq_msg *msg, u8 num) { + int ret; + if (ubase_dev_ctrlq_supported(udev)) { + ret = ubase_ctrlq_check_csq_enough(udev, num); + if (ret) + return ret; ubase_ctrlq_send_to_csq(udev, head, msg, num); return 0; } @@ -710,19 +730,19 @@ static int ubase_ctrlq_msg_check(struct ubase_dev *udev, return -EINVAL; } -static int ubase_ctrlq_check_csq_enough(struct ubase_dev *udev, u16 num) +static int ubase_ctrlq_check_send_state(struct ubase_dev *udev, + struct ubase_ctrlq_msg *msg) { - struct ubase_ctrlq_ring *csq = &udev->ctrlq.csq; - - if (!ubase_dev_ctrlq_supported(udev)) - return 0; + if (udev->reset_stage == UBASE_RESET_STAGE_UNINIT && + !(msg->opcode == UBASE_CTRLQ_OPC_CTRLQ_CTRL && + msg->service_type == UBASE_CTRLQ_SER_TYPE_DEV_REGISTER)) { + ubase_dbg(udev, "ctrlq send is disabled.\n"); + return -EAGAIN; + } - csq->ci = (u16)ubase_read_dev(&udev->hw, UBASE_CTRLQ_CSQ_HEAD_REG); - if (num > ubase_ctrlq_remain_space(udev)) { - ubase_warn(udev, - "no enough space in ctrlq, ci = %u, num = %u.\n", - csq->ci, num); - return -EBUSY; + if (!test_bit(UBASE_CTRLQ_STATE_ENABLE, &udev->ctrlq.state)) { + ubase_warn(udev, "ctrlq is disabled in csq.\n"); + return -EAGAIN; } return 0; @@ -730,26 +750,22 @@ static int ubase_ctrlq_check_csq_enough(struct ubase_dev *udev, u16 num) static int ubase_ctrlq_send_real(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg, + u16 num, struct ubase_ctrlq_ue_info *ue_info) { struct ubase_ctrlq_ring *csq = &udev->ctrlq.csq; struct ubase_ctrlq_base_block head = {0}; - u16 seq, num; + u16 seq, retry = 0; int ret; - num = ubase_ctrlq_calc_bb_num(msg->in_size); - spin_lock_bh(&csq->lock); - ret = ubase_ctrlq_check_csq_enough(udev, num); - if (ret) - goto unlock; - if (!ubase_ctrlq_msg_is_resp(msg)) { ret = ubase_ctrlq_alloc_seq(udev, msg, &seq); if (ret) { ubase_warn(udev, "no enough seq in ctrlq.\n"); - goto unlock; + spin_unlock_bh(&csq->lock); + return ret; } } else { seq = msg->resp_seq; @@ -757,21 +773,32 @@ static int ubase_ctrlq_send_real(struct ubase_dev *udev, ubase_ctrlq_addto_msg_queue(udev, seq, msg, ue_info); + spin_unlock_bh(&csq->lock); + head.bb_num = num; head.seq = cpu_to_le16(seq); ubase_ctrlq_fill_first_bb(udev, &head, msg, ue_info); - ret = ubase_ctrlq_send_msg_to_sq(udev, &head, msg, num); - if (ret) { - spin_unlock_bh(&csq->lock); - if (!ubase_ctrlq_msg_is_resp(msg)) - ubase_ctrlq_free_seq(udev, seq); - return ret; - } - spin_unlock_bh(&csq->lock); + do { + if (retry) { + msleep(UBASE_CTRLQ_RETRY_INTERVAL); + ubase_info(udev, "Ctrlq send msg retry = %u.\n", retry); + } - if (ubase_ctrlq_msg_is_sync_req(msg)) - ret = ubase_ctrlq_wait_completed(udev, seq, msg); + ret = ubase_ctrlq_check_send_state(udev, msg); + if (ret) + goto free_seq; + spin_lock_bh(&csq->lock); + ret = ubase_ctrlq_send_msg_to_sq(udev, &head, msg, num); + spin_unlock_bh(&csq->lock); + if (ret == -ETIMEDOUT) + continue; + else if (ret) + goto free_seq; + + if (ubase_ctrlq_msg_is_sync_req(msg)) + ret = ubase_ctrlq_wait_completed(udev, seq, msg); + } while (ret == -ETIMEDOUT && retry++ < UBASE_CTRLQ_RETRY_TIMES); if (ubase_ctrlq_msg_is_sync_req(msg) || ubase_ctrlq_msg_is_notify_req(msg)) @@ -779,48 +806,27 @@ static int ubase_ctrlq_send_real(struct ubase_dev *udev, return ret; -unlock: - spin_unlock_bh(&csq->lock); +free_seq: + if (!ubase_ctrlq_msg_is_resp(msg)) + ubase_ctrlq_free_seq(udev, seq); return ret; } int __ubase_ctrlq_send(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg, struct ubase_ctrlq_ue_info *ue_info) { -#define UBASE_CTRLQ_RETRY_TIMES 3 -#define UBASE_RETRY_INTERVAL 100 - - int ret, retry_cnt = 0; + int ret; + u16 num; ret = ubase_ctrlq_msg_check(udev, msg); if (ret) return ret; - while (retry_cnt++ <= UBASE_CTRLQ_RETRY_TIMES) { - if (udev->reset_stage == UBASE_RESET_STAGE_UNINIT && - !(msg->opcode == UBASE_CTRLQ_OPC_CTRLQ_CTRL && - msg->service_type == UBASE_CTRLQ_SER_TYPE_DEV_REGISTER)) { - ubase_dbg(udev, "ctrlq send is disabled.\n"); - return -EAGAIN; - } - - if (!test_bit(UBASE_CTRLQ_STATE_ENABLE, &udev->ctrlq.state)) { - ubase_warn(udev, "ctrlq is disabled in csq.\n"); - return -EAGAIN; - } + num = ubase_ctrlq_calc_bb_num(msg->in_size); - atomic_inc(&udev->ctrlq.req_cnt); - ret = ubase_ctrlq_send_real(udev, msg, ue_info); - atomic_dec(&udev->ctrlq.req_cnt); - if (ret == -ETIMEDOUT && retry_cnt <= UBASE_CTRLQ_RETRY_TIMES) { - ubase_info(udev, - "Ctrlq send msg retry, retry cnt = %d.\n", - retry_cnt); - msleep(UBASE_RETRY_INTERVAL); - } else { - break; - } - } + atomic_inc(&udev->ctrlq.req_cnt); + ret = ubase_ctrlq_send_real(udev, msg, num, ue_info); + atomic_dec(&udev->ctrlq.req_cnt); return ret; } diff --git a/drivers/ub/ubase/ubase_ctrlq.h b/drivers/ub/ubase/ubase_ctrlq.h index e56728e4baae..93960c181e3b 100644 --- a/drivers/ub/ubase/ubase_ctrlq.h +++ b/drivers/ub/ubase/ubase_ctrlq.h @@ -21,6 +21,8 @@ #define UBASE_CTRLQ_DEAD_TIME 40000 #define UBASE_CTRLQ_CHAN_DISABLE_OPC 0x1 #define UBASE_CTRL_PLANE_INIT_RES BIT(0) +#define UBASE_CTRLQ_RETRY_TIMES 3 +#define UBASE_CTRLQ_RETRY_INTERVAL 100 enum ubase_ctrlq_state { UBASE_CTRLQ_STATE_ENABLE, diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index fbec3624c87b..6006019d4b8c 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -245,26 +245,26 @@ struct ubase_resource_space { /** * struct ubase_adev_qos - ubase auxiliary device qos information - * @rdma_tp_vl_num: rdma tp vl number - * @rdma_ctp_vl_num: rdma ctp vl number - * @rdma_tp_resp_vl_offset: rdma tp response vl offset, - * rdma_tp_resp_vl = rdma_ctp_resp_vl + rdma_tp_resp_vl_offset - * @rdma_ctp_resp_vl_offset: rdma ctp response vl offset, - * rdma_ctp_resp_vl = rdma_ctp_resp_vl + rdma_ctp_resp_vl_offset - * @max_vl: max vl number - * @resv: reserved bits - * @rdma_tp_sl_num: rdma tp sl number - * @rdma_ctp_sl_num: rdma ctp sl number + * @tp_sl_num: tp sl number + * @tp_sl: tp sl + * @ctp_sl_num: ctp sl number + * @ctp_sl: ctp sl + * @tp_vl_num: tp vl number + * + * @tp_resp_vl_offset: tp response vl offset, + * tp_resp_vl = tp_resp_vl + tp_resp_vl_offset + * @tp_req_vl: tp request number + * @ctp_vl_num: ctp vl number + * @ctp_resp_vl_offset: ctp response vl offset, + * ctp_resp_vl = ctp_resp_vl + ctp_resp_vl_offset + * @ctp_req_vl: ctp request vl + * @dscp_vl: dscp to vl mapping * @nic_sl_num: nic sl number - * @nic_vl_num: nic vl number - * @rdma_tp_req_vl: rdma tp request vl - * @rdma_ctp_req_vl: rdma ctp request vl - * @rdma_tp_sl: rdma tp sl - * @rdma_ctp_sl: rdma ctp sl * @nic_sl: nic sl + * @nic_vl_num: nic vl number * @nic_vl: nic vl - * @sl_vl: sl to vl mapping - * @rdma_dscp_vl: rdma dscp to vl mapping + * @ue_max_vl_id: ue max vl index + * @ue_sl_vl: ue sl to vl mapping */ struct ubase_adev_qos { /* udma/cdma resource */ -- Gitee From 7f25d7788728c7d19be99f24cb8bc32926f915a9 Mon Sep 17 00:00:00 2001 From: Yixi Shen Date: Mon, 22 Dec 2025 13:43:27 +0800 Subject: [PATCH 231/243] ub: ubase: Resolve mailbox timeout issue. commit 16affa21f48797ba971641b9be8f6636fc528a39 openEuler In the current mailbox processing, after the task is processed, the hardware reports an EQ event and the driver program schedules a delayed tasks for processing. However, when the CPU load is high, scheduling delays may occur, resulting in timeouts Modify the handling of EQ events and handle hardware EQ events in the top half of interrupts. Fixes: 8d68017f37fa ("ub: ubase: support for command process") Signed-off-by: Yixi Shen Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_cmd.c | 25 ------------------------- drivers/ub/ubase/ubase_cmd.h | 5 +---- drivers/ub/ubase/ubase_eq.c | 28 +++++++++++++++++----------- drivers/ub/ubase/ubase_eq.h | 2 +- 4 files changed, 19 insertions(+), 41 deletions(-) diff --git a/drivers/ub/ubase/ubase_cmd.c b/drivers/ub/ubase/ubase_cmd.c index ab554343136f..47695e5c7ef7 100644 --- a/drivers/ub/ubase/ubase_cmd.c +++ b/drivers/ub/ubase/ubase_cmd.c @@ -666,31 +666,6 @@ static bool ubase_cmd_is_mbx_avail(struct ubase_dev *udev) return true; } -int ubase_cmd_mbx_event_cb(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct ubase_event_nb *ev_nb = container_of(nb, struct ubase_event_nb, nb); - struct ubase_aeq_notify_info *info = data; - struct ubase_aeqe *aeqe = info->aeqe; - struct ubase_dev *udev = ev_nb->back; - struct ubase_mbx_event_context *ctx; - - ctx = &udev->mb_cmd.ctx; - if (aeqe->event.cmd.seq_num != ctx->seq_num) { - ubase_err(udev, - "mbx seq num is different, cmd seq_num = %u, ctx seq_num = %u.\n", - aeqe->event.cmd.seq_num, ctx->seq_num); - return NOTIFY_DONE; - } - - ctx->result = aeqe->event.cmd.status == 0 ? 0 : -EIO; - ctx->out_param = aeqe->event.cmd.out_param; - - complete(&ctx->done); - - return NOTIFY_OK; -} - static int ubase_cmd_wait_mbx_completed(struct ubase_dev *udev, union ubase_mbox *mbx) { diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index d6f2def22117..c31c334d0a3f 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -14,7 +14,7 @@ #define UBASE_CMDQ_DESC_NUM_S 3 #define UBASE_CMDQ_DESC_NUM 1024 #define UBASE_CMDQ_TX_TIMEOUT 300000 -#define UBASE_CMDQ_MBX_TX_TIMEOUT 500 +#define UBASE_CMDQ_MBX_TX_TIMEOUT 50 #define UBASE_CMDQ_CLEAR_WAIT_TIME 200 #define UBASE_CMDQ_WAIT_TIME 10 @@ -311,9 +311,6 @@ int __ubase_cmd_send_in(struct ubase_dev *udev, struct ubase_cmd_buf *in); int __ubase_cmd_send_inout(struct ubase_dev *udev, struct ubase_cmd_buf *in, struct ubase_cmd_buf *out); -int ubase_cmd_mbx_event_cb(struct notifier_block *nb, unsigned long action, - void *data); - int __ubase_register_crq_event(struct ubase_dev *udev, struct ubase_crq_event_nb *nb); void __ubase_unregister_crq_event(struct ubase_dev *udev, u16 opcode); diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index 2afe9c3bf7fc..ff2a2e3ea721 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -402,6 +402,19 @@ static void ubase_init_aeq_work(struct ubase_dev *udev, struct ubase_aeqe *aeqe) queue_work(udev->ubase_async_wq, &aeq_work->work); } +static void ubase_mbx_complete(struct ubase_dev *udev, struct ubase_aeqe *aeqe) +{ + struct ubase_mbx_event_context *ctx = &udev->mb_cmd.ctx; + + if (aeqe->event.cmd.seq_num != ctx->seq_num) + return; + + ctx->result = aeqe->event.cmd.status == 0 ? 0 : -EIO; + ctx->out_param = aeqe->event.cmd.out_param; + + complete(&ctx->done); +} + static int ubase_async_event_handler(struct ubase_dev *udev) { struct ubase_aeq *aeq = &udev->irq_table.aeq; @@ -415,14 +428,12 @@ static int ubase_async_event_handler(struct ubase_dev *udev) trace_ubase_aeqe(udev->dev, aeqe, eq); - ubase_dbg(udev, - "event_type = 0x%x, sub_type = 0x%x, owner = %u, seq_num = %u, cons_index = %u.\n", - aeqe->event_type, aeqe->sub_type, aeqe->owner, - aeqe->event.cmd.seq_num, eq->cons_index); - ret = IRQ_HANDLED; - ubase_init_aeq_work(udev, aeqe); + if (aeqe->event_type == UBASE_EVENT_TYPE_MB) + ubase_mbx_complete(udev, aeqe); + else + ubase_init_aeq_work(udev, aeqe); ++aeq->eq.cons_index; aeqe = ubase_next_aeqe(udev, aeq); @@ -1170,11 +1181,6 @@ int ubase_register_ae_event(struct ubase_dev *udev) { struct ubase_event_nb ubase_ae_nbs[UBASE_AE_LEVEL_NUM] = { { - UBASE_DRV_UNIC, - UBASE_EVENT_TYPE_MB, - { ubase_cmd_mbx_event_cb }, - udev - }, { UBASE_DRV_UNIC, UBASE_EVENT_TYPE_TP_FLUSH_DONE, { ubase_ae_tp_flush_done }, diff --git a/drivers/ub/ubase/ubase_eq.h b/drivers/ub/ubase/ubase_eq.h index 59fa2c620720..18d18f026f0c 100644 --- a/drivers/ub/ubase/ubase_eq.h +++ b/drivers/ub/ubase/ubase_eq.h @@ -34,7 +34,7 @@ #define UBASE_INT_NAME_LEN 32 -#define UBASE_AE_LEVEL_NUM 4 +#define UBASE_AE_LEVEL_NUM 3 /* Vector0 interrupt CMDQ event source register(RW) */ #define UBASE_VECTOR0_CMDQ_SRC_REG 0x18004 -- Gitee From 1ac7be3c8252c43b7c5b1ea76b30aaa1fe9a6144 Mon Sep 17 00:00:00 2001 From: Yixi Shen Date: Mon, 22 Dec 2025 14:06:41 +0800 Subject: [PATCH 232/243] net: unic: Fix ethtool configuration error issue. commit 951ede65b3eb5d79da7753db915002a625c06543 openEuler Due to the fact that the double pointer is a random value when passed in, using this pointer during assignment will access the exception address. Change the application method to apply for memory using local variables, and return it to the double pointer after the assignment is completed. Fixes: e0ccc63cc72e ("net: unic: support querying and configuring coalesce parameters.") Signed-off-by: Yixi Shen Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/net/ub/unic/unic_ethtool.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index 337d77b16ff4..c027989ac8e8 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -430,28 +430,32 @@ static int unic_backup_stats(struct unic_dev *unic_dev, struct unic_sq_stats **sq_stats, struct unic_rq_stats **rq_stats) { + struct unic_sq_stats *tx_stats; + struct unic_rq_stats *rx_stats; u32 i; - *sq_stats = kcalloc(unic_dev->channels.num, sizeof(**sq_stats), - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(*sq_stats)) + tx_stats = kcalloc(unic_dev->channels.num, sizeof(*tx_stats), + GFP_KERNEL); + if (!tx_stats) return -ENOMEM; - *rq_stats = kcalloc(unic_dev->channels.num, sizeof(**rq_stats), - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(*rq_stats)) { - if (unic_tx_changed(unic_dev)) - kfree(*sq_stats); + rx_stats = kcalloc(unic_dev->channels.num, sizeof(*rx_stats), + GFP_KERNEL); + if (!rx_stats) { + kfree(tx_stats); return -ENOMEM; } for (i = 0; i < unic_dev->channels.num; i++) { - memcpy(sq_stats[i], &unic_dev->channels.c[i].sq->stats, + memcpy(&tx_stats[i], &unic_dev->channels.c[i].sq->stats, sizeof(struct unic_sq_stats)); - memcpy(rq_stats[i], &unic_dev->channels.c[i].rq->stats, + memcpy(&rx_stats[i], &unic_dev->channels.c[i].rq->stats, sizeof(struct unic_rq_stats)); } + *sq_stats = tx_stats; + *rq_stats = rx_stats; + return 0; } -- Gitee From 1eeb756ca58e1fbf67e6512a8b627206b40f5e96 Mon Sep 17 00:00:00 2001 From: Zhang Lei Date: Wed, 24 Dec 2025 21:21:07 +0800 Subject: [PATCH 233/243] ub: ubase: fix ubase activate/deactivate resp timeout commit 91eb723fc386949fa48850e2a0d79a932f294578 openEuler Currently, the cmdq crq task and the ctrlq crq task use the same workqueue. When the ctrlq crq task takes too long to execute, it will block the cmdq crq task from responding ubase activate/deactivate message. This patch uses two different workqueues to execute the cmdq crq task and the ctrlq crq task. Signed-off-by: Zhang Lei Signed-off-by: Fengyan Mu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.c | 25 ++++++++++++++++--------- drivers/ub/ubase/ubase_ctrlq.h | 4 ++-- drivers/ub/ubase/ubase_dev.c | 26 ++++++++++++++++++++++++-- drivers/ub/ubase/ubase_dev.h | 2 ++ drivers/ub/ubase/ubase_eq.c | 6 +++--- 5 files changed, 47 insertions(+), 16 deletions(-) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 34a36843a5f6..18f72cc68235 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -921,7 +921,10 @@ static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, u16 seq) { #define EDRVNOEXIST 255 +#define TIME_COST_THRESHOLD 200 + struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; + unsigned long start_jiffies, time_cost = 0; int ret = -ENOENT; u32 i; @@ -937,16 +940,22 @@ static void ubase_ctrlq_crq_event_callback(struct ubase_dev *udev, ret = -EDRVNOEXIST; break; } + start_jiffies = jiffies; ret = crq_tab->crq_nbs[i].crq_handler(crq_tab->crq_nbs[i].back, head->service_ver, msg_data, msg_data_len, seq); + time_cost = jiffies_to_msecs(jiffies - start_jiffies); break; } } mutex_unlock(&crq_tab->lock); + if (time_cost > TIME_COST_THRESHOLD) + ubase_warn(udev, "ctrlq crq callback executed in %lums.\n", + time_cost); + if (ret == -ENOENT) { ubase_info(udev, "this notice is not supported."); ubase_ctrlq_send_unsupported_resp(udev, head, seq, EOPNOTSUPP); @@ -1149,7 +1158,7 @@ static void ubase_ctrlq_crq_handler(struct ubase_dev *udev) if (udev->log_rs.ctrlq_self_seq_invalid_log_cnt || udev->log_rs.ctrlq_other_seq_invalid_log_cnt) { ubase_warn(udev, - "ubase log rate is limited, ctrlq_self_seq_invalid_log_cnt = %u, ctrlq_other_seq_invalid_log_cnt = %u.\n", + "rate limited log: ctrlq_self_seq_invalid_log_cnt = %u, ctrlq_other_seq_invalid_log_cnt = %u.\n", udev->log_rs.ctrlq_self_seq_invalid_log_cnt, udev->log_rs.ctrlq_other_seq_invalid_log_cnt); udev->log_rs.ctrlq_self_seq_invalid_log_cnt = 0; @@ -1160,16 +1169,16 @@ static void ubase_ctrlq_crq_handler(struct ubase_dev *udev) ubase_ctrlq_task_schedule(udev); } -void ubase_ctrlq_service_task(struct ubase_delay_work *ubase_work) +void ubase_ctrlq_crq_service_task(struct ubase_delay_work *ubase_work) { struct ubase_dev *udev = container_of(ubase_work, struct ubase_dev, - service_task); + ctrlq_service_task); struct ubase_ctrlq_crq_table *crq_tab = &udev->ctrlq.crq_table; if (!test_and_clear_bit(UBASE_STATE_CTRLQ_SERVICE_SCHED, - &udev->service_task.state) || + &udev->ctrlq_service_task.state) || test_and_set_bit(UBASE_STATE_CTRLQ_HANDLING, - &udev->service_task.state)) + &udev->ctrlq_service_task.state)) return; if (time_is_before_eq_jiffies(crq_tab->last_crq_scheduled + @@ -1181,13 +1190,11 @@ void ubase_ctrlq_service_task(struct ubase_delay_work *ubase_work) ubase_ctrlq_crq_handler(udev); - clear_bit(UBASE_STATE_CTRLQ_HANDLING, &udev->service_task.state); + clear_bit(UBASE_STATE_CTRLQ_HANDLING, &udev->ctrlq_service_task.state); } -void ubase_ctrlq_clean_service_task(struct ubase_delay_work *ubase_work) +void ubase_ctrlq_clean_service_task(struct ubase_dev *udev) { - struct ubase_dev *udev = container_of(ubase_work, struct ubase_dev, - service_task); struct ubase_ctrlq_ring *csq = &udev->ctrlq.csq; u16 i, max_seq = ubase_ctrlq_max_seq(udev); struct ubase_ctrlq_msg_ctx *ctx; diff --git a/drivers/ub/ubase/ubase_ctrlq.h b/drivers/ub/ubase/ubase_ctrlq.h index 93960c181e3b..b4ea535642d3 100644 --- a/drivers/ub/ubase/ubase_ctrlq.h +++ b/drivers/ub/ubase/ubase_ctrlq.h @@ -119,11 +119,11 @@ int __ubase_ctrlq_send(struct ubase_dev *udev, struct ubase_ctrlq_msg *msg, struct ubase_ctrlq_ue_info *ue_info); bool ubase_ctrlq_check_seq(struct ubase_dev *udev, u16 seq); -void ubase_ctrlq_service_task(struct ubase_delay_work *ubase_work); +void ubase_ctrlq_crq_service_task(struct ubase_delay_work *ubase_work); void ubase_ctrlq_handle_crq_msg(struct ubase_dev *udev, struct ubase_ctrlq_base_block *head, u16 seq, void *msg, u16 data_len); -void ubase_ctrlq_clean_service_task(struct ubase_delay_work *ubase_work); +void ubase_ctrlq_clean_service_task(struct ubase_dev *udev); void ubase_ctrlq_disable_remote(struct ubase_dev *udev); #endif diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index d63a045137e1..11f7e3cffd1a 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -337,6 +337,7 @@ static int ubase_enable_period_service_task(struct ubase_dev *udev) static void ubase_period_service_task(struct work_struct *work) { #define UBASE_STATS_TIMER_INTERVAL (300000 / (UBASE_PERIOD_100MS)) +#define UBASE_CTRLQ_TIMER_INTERVAL (3000 / (UBASE_PERIOD_100MS)) struct ubase_delay_work *ubase_work = container_of(work, struct ubase_delay_work, service_task.work); @@ -352,6 +353,10 @@ static void ubase_period_service_task(struct work_struct *work) !(udev->serv_proc_cnt % UBASE_STATS_TIMER_INTERVAL)) ubase_update_stats_for_all(udev); + if (test_bit(UBASE_STATE_INITED_B, &udev->state_bits) && + !(udev->serv_proc_cnt % UBASE_CTRLQ_TIMER_INTERVAL)) + ubase_ctrlq_clean_service_task(udev); + udev->serv_proc_cnt++; ubase_enable_period_service_task(udev); } @@ -379,13 +384,21 @@ static void ubase_service_task(struct work_struct *work) ubase_crq_service_task(ubase_work); ubase_errhandle_service_task(ubase_work); - ubase_ctrlq_service_task(ubase_work); - ubase_ctrlq_clean_service_task(ubase_work); +} + +static void ubase_ctrlq_service_task(struct work_struct *work) +{ + struct ubase_delay_work *ubase_work = + container_of(work, struct ubase_delay_work, service_task.work); + + ubase_ctrlq_crq_service_task(ubase_work); } static void ubase_init_delayed_work(struct ubase_dev *udev) { INIT_DELAYED_WORK(&udev->service_task.service_task, ubase_service_task); + INIT_DELAYED_WORK(&udev->ctrlq_service_task.service_task, + ubase_ctrlq_service_task); INIT_DELAYED_WORK(&udev->reset_service_task.service_task, ubase_reset_service_task); INIT_DELAYED_WORK(&udev->period_service_task.service_task, @@ -404,6 +417,12 @@ static int ubase_wq_init(struct ubase_dev *udev) goto err_alloc_ubase_wq; } + udev->ubase_ctrlq_wq = UBASE_ALLOC_WQ("ubase_ctrlq_service"); + if (!udev->ubase_ctrlq_wq) { + ubase_err(udev, "failed to alloc ubase ctrlq workqueue.\n"); + goto err_alloc_ubase_ctrlq_wq; + } + udev->ubase_async_wq = UBASE_ALLOC_WQ("ubase_async_service"); if (!udev->ubase_async_wq) { ubase_err(udev, "failed to alloc ubase async workqueue.\n"); @@ -438,6 +457,8 @@ static int ubase_wq_init(struct ubase_dev *udev) err_alloc_ubase_reset_wq: destroy_workqueue(udev->ubase_async_wq); err_alloc_ubase_async_wq: + destroy_workqueue(udev->ubase_ctrlq_wq); +err_alloc_ubase_ctrlq_wq: destroy_workqueue(udev->ubase_wq); err_alloc_ubase_wq: return -ENOMEM; @@ -449,6 +470,7 @@ static void ubase_wq_uninit(struct ubase_dev *udev) destroy_workqueue(udev->ubase_period_wq); destroy_workqueue(udev->ubase_reset_wq); destroy_workqueue(udev->ubase_async_wq); + destroy_workqueue(udev->ubase_ctrlq_wq); destroy_workqueue(udev->ubase_wq); } diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index f1cba478510a..093d1374c805 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -300,12 +300,14 @@ struct ubase_dev { struct ubase_irq_table irq_table; struct ubase_mbox_cmd mb_cmd; struct workqueue_struct *ubase_wq; + struct workqueue_struct *ubase_ctrlq_wq; struct workqueue_struct *ubase_async_wq; struct workqueue_struct *ubase_reset_wq; struct workqueue_struct *ubase_period_wq; struct workqueue_struct *ubase_arq_wq; unsigned long serv_proc_cnt; struct ubase_delay_work service_task; + struct ubase_delay_work ctrlq_service_task; struct ubase_delay_work reset_service_task; struct ubase_delay_work period_service_task; struct ubase_delay_work arq_service_task; diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index ff2a2e3ea721..18414018c16c 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -213,10 +213,10 @@ static void ubase_errhandle_task_schedule(struct ubase_dev *udev) void ubase_ctrlq_task_schedule(struct ubase_dev *udev) { if (!test_and_set_bit(UBASE_STATE_CTRLQ_SERVICE_SCHED, - &udev->service_task.state)) { + &udev->ctrlq_service_task.state)) { udev->ctrlq.crq_table.last_crq_scheduled = jiffies; - mod_delayed_work(udev->ubase_wq, - &udev->service_task.service_task, 0); + mod_delayed_work(udev->ubase_ctrlq_wq, + &udev->ctrlq_service_task.service_task, 0); } } -- Gitee From 8acda11730e25d5dd51a1c49ff2a0797d9e265ad Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Wed, 24 Dec 2025 13:14:28 +0800 Subject: [PATCH 234/243] ub: cdma: add ioctl logs and error codes commit 9aa302d3264cab6ff26c2df119f1b7f30bdc7edb openEuler add ioctl logs and error codes Fixes: 34c67ed8f4c1 ("ub: cdma: support for cdma kernelspace north-south compatibility requirements") Signed-off-by: Zhipeng Lu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_event.c | 26 ++++++++++++++++++-------- drivers/ub/cdma/cdma_ioctl.c | 2 +- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index e8ecb7f8c4f6..240934a733ed 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -181,22 +181,25 @@ static long cdma_jfce_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cdma_jfce *jfce = (struct cdma_jfce *)filp->private_data; - unsigned int nr; - int ret; + unsigned int nr = (unsigned int)_IOC_NR(cmd); + long ret = -ENOIOCTLCMD; - if (!arg || !jfce || _IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { - pr_err("invalid parameter, cmd = %u.\n", cmd); + if (!arg || !jfce) { + pr_err("jfce ioctl invalid parameter.\n"); return -EINVAL; } - nr = (unsigned int)_IOC_NR(cmd); + if (_IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { + pr_err("jfce ioctl invalid cmd type, cmd = %u.\n", cmd); + return ret; + } + switch (nr) { case JFCE_CMD_WAIT_EVENT: ret = cdma_jfce_wait(jfce, filp, arg); break; default: - ret = -ENOIOCTLCMD; - break; + pr_err("jfce ioctl wrong nr = %u.\n", nr); } return ret; @@ -588,8 +591,15 @@ static long cdma_jfae_ioctl(struct file *filp, unsigned int cmd, unsigned long a unsigned int nr = (unsigned int)_IOC_NR(cmd); long ret = -ENOIOCTLCMD; - if (!jfae) + if (!jfae) { + pr_err("jfae ioctl invalid parameter.\n"); return -EINVAL; + } + + if (_IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { + pr_err("jfae ioctl invalid cmd type, cmd = %u.\n", cmd); + return ret; + } switch (nr) { case JFAE_CMD_GET_ASYNC_EVENT: diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 4a30cbbd383f..70c3e0d3b4f2 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -811,7 +811,7 @@ int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) dev_err(cdev->dev, "invalid cdma user command or no handler, command = %u\n", hdr->command); - return -EINVAL; + return -ENOIOCTLCMD; } mutex_lock(&cfile->ctx_mutex); -- Gitee From 8eb1be0bfe4ade2f343e00995de01a14bc30acdf Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Thu, 25 Dec 2025 11:07:01 +0800 Subject: [PATCH 235/243] ub:ubfi:skipped address of subtable 0 in ubrt commit 89c7290fa351a4f062cb6508570e4a388d19a6b9 openEuler In the ubios method, the address of subtable 0 is an interval address, which is skipped during processing Fixes: 312a6b7fabe9 ("ub:ubfi: ubfi driver parse ubc information from ubrt") Signed-off-by: Yuhao Xiang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubfi/ubrt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/ub/ubfi/ubrt.c b/drivers/ub/ubfi/ubrt.c index ecf975526e72..463707f4bcc2 100644 --- a/drivers/ub/ubfi/ubrt.c +++ b/drivers/ub/ubfi/ubrt.c @@ -143,6 +143,8 @@ int handle_dts_ubrt(void) pr_info("ubios sub table count is %u\n", ubios_table->count); for (i = 0; i < ubios_table->count; i++) { + if (ubios_table->sub_tables[i] == 0) + continue; memset(name, 0, UB_TABLE_HEADER_NAME_LEN); ret = get_ubrt_table_name(name, ubios_table->sub_tables[i]); if (ret) -- Gitee From 825e0cecedfa179cf4cc70beb23726df17b210bb Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 17:01:31 +0800 Subject: [PATCH 236/243] ub: udma: Support retrieving the migr field. commit 1685c9263f82b70a4f53a3719baf0de8847f9cec openEuler This patch support retrieving the migr field. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index bdd4617cb4c4..9baf7d1c8495 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -51,7 +51,9 @@ struct udma_ctrlq_tpid { uint32_t tpid : 24; uint32_t tpn_cnt : 8; uint32_t tpn_start : 24; - uint32_t rsv : 8; + uint32_t rsv0 : 4; + uint32_t migr : 1; + uint32_t rsv1 : 3; }; struct udma_ctrlq_tpid_list_rsp { -- Gitee From d7982c58cc9a984fd3b4562806777f8a36801967 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 10 Dec 2025 21:01:41 +0800 Subject: [PATCH 237/243] ub: udma: bugfix related to print location. commit 4650a8abb5f2656fbe01b9bc9c0898f3f369fb53 openEuler This patch fix a bug about print location. Fixes: d0c38b53548d ("ub: udma: Support query ub memory info.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 12 +++++++----- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 1 - drivers/ub/urma/hw/udma/udma_ctx.c | 8 +++++--- drivers/ub/urma/hw/udma/udma_eq.c | 10 +++++----- drivers/ub/urma/hw/udma/udma_jfr.c | 2 +- 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 07d57a5ce96b..830c22f732a1 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -249,8 +249,8 @@ int udma_id_alloc_auto_grow(struct udma_dev *udma_dev, struct udma_ida *ida_tabl id = ida_alloc_range(&ida_table->ida, ida_table->min, ida_table->max, GFP_ATOMIC); if (id < 0) { - dev_err(udma_dev->dev, "failed to alloc id, ret = %d.\n", id); spin_unlock(&ida_table->lock); + dev_err(udma_dev->dev, "failed to alloc id, ret = %d.\n", id); return id; } } @@ -291,9 +291,9 @@ int udma_specify_adv_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bit spin_lock(&bitmap_table->lock); if ((bit[block] & (1U << bit_idx)) == 0) { + spin_unlock(&bitmap_table->lock); dev_err(udma_dev->dev, "user specify id %u been used.\n", user_id); - spin_unlock(&bitmap_table->lock); return -ENOMEM; } @@ -347,10 +347,10 @@ int udma_adv_id_alloc(struct udma_dev *udma_dev, struct udma_group_bitmap *bitma ; if (i == bitmap_cnt) { + spin_unlock(&bitmap_table->lock); dev_err(udma_dev->dev, "all bitmaps have been used, bitmap_cnt = %u.\n", bitmap_cnt); - spin_unlock(&bitmap_table->lock); return -ENOMEM; } @@ -370,9 +370,9 @@ int udma_adv_id_alloc(struct udma_dev *udma_dev, struct udma_group_bitmap *bitma ; if (i == bitmap_cnt || (i + 1) * NUM_JETTY_PER_GROUP > bitmap_table->n_bits) { + spin_unlock(&bitmap_table->lock); dev_err(udma_dev->dev, "no completely bitmap for Jetty group.\n"); - spin_unlock(&bitmap_table->lock); return -ENOMEM; } @@ -858,9 +858,11 @@ void udma_init_hugepage(struct udma_dev *dev) void udma_destroy_hugepage(struct udma_dev *dev) { struct udma_hugepage_priv *priv; + struct udma_hugepage_priv *tmp; mutex_lock(&dev->hugepage_lock); - list_for_each_entry(priv, &dev->hugepage_list, list) { + list_for_each_entry_safe(priv, tmp, &dev->hugepage_list, list) { + list_del(&priv->list); dev_info(dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", priv->va_len >> UDMA_HUGEPAGE_SHIFT); udma_unpin_k_addr(priv->umem); diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 86d68ace7000..2782fbe92907 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -73,7 +73,6 @@ static struct udma_ue_idx_table *udma_find_ue_idx_by_tpn(struct udma_dev *udev, xa_lock(&udev->tpn_ue_idx_table); tp_ue_idx_info = xa_load(&udev->tpn_ue_idx_table, tpn); if (!tp_ue_idx_info) { - dev_warn(udev->dev, "ue idx info not exist, tpn %u.\n", tpn); xa_unlock(&udev->tpn_ue_idx_table); return NULL; diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index ccc3b4905af9..8caa0765b420 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -96,6 +96,7 @@ int udma_free_ucontext(struct ubcore_ucontext *ucontext) { struct udma_dev *udma_dev = to_udma_dev(ucontext->ub_dev); struct udma_hugepage_priv *priv; + struct udma_hugepage_priv *tmp; struct vm_area_struct *vma; struct udma_context *ctx; int ret; @@ -111,7 +112,8 @@ int udma_free_ucontext(struct ubcore_ucontext *ucontext) ummu_sva_unbind_device(ctx->sva); mutex_lock(&ctx->hugepage_lock); - list_for_each_entry(priv, &ctx->hugepage_list, list) { + list_for_each_entry_safe(priv, tmp, &ctx->hugepage_list, list) { + list_del(&priv->list); if (current->mm) { mmap_write_lock(current->mm); vma = find_vma(current->mm, (unsigned long)priv->va_base); @@ -148,9 +150,9 @@ static int udma_mmap_jetty_dsqe(struct udma_dev *dev, struct ubcore_ucontext *uc xa_lock(&dev->jetty_table.xa); sq = xa_load(&dev->jetty_table.xa, j_id); if (!sq) { + xa_unlock(&dev->jetty_table.xa); dev_err(dev->dev, "mmap failed, j_id: %llu not exist\n", j_id); - xa_unlock(&dev->jetty_table.xa); return -EINVAL; } @@ -160,9 +162,9 @@ static int udma_mmap_jetty_dsqe(struct udma_dev *dev, struct ubcore_ucontext *uc jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; if (jetty_uctx != uctx) { + xa_unlock(&dev->jetty_table.xa); dev_err(dev->dev, "mmap failed, j_id: %llu, uctx invalid\n", j_id); - xa_unlock(&dev->jetty_table.xa); return -EINVAL; } xa_unlock(&dev->jetty_table.xa); diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index d3b6813b1d55..dab9130df761 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -84,9 +84,9 @@ static int udma_ae_jfs_check_err(struct auxiliary_device *adev, uint32_t queue_n xa_lock(&udma_dev->jetty_table.xa); udma_sq = (struct udma_jetty_queue *)xa_load(&udma_dev->jetty_table.xa, queue_num); if (!udma_sq) { + xa_unlock(&udma_dev->jetty_table.xa); dev_warn(udma_dev->dev, "async event for bogus queue number = %u.\n", queue_num); - xa_unlock(&udma_dev->jetty_table.xa); return -EINVAL; } @@ -138,9 +138,9 @@ static int udma_ae_jfr_check_err(struct auxiliary_device *adev, uint32_t queue_n xa_lock(&udma_dev->jfr_table.xa); udma_jfr = (struct udma_jfr *)xa_load(&udma_dev->jfr_table.xa, queue_num); if (!udma_jfr) { + xa_unlock(&udma_dev->jfr_table.xa); dev_warn(udma_dev->dev, "async event for bogus jfr number = %u.\n", queue_num); - xa_unlock(&udma_dev->jfr_table.xa); return -EINVAL; } @@ -172,9 +172,9 @@ static int udma_ae_jfc_check_err(struct auxiliary_device *adev, uint32_t queue_n xa_lock_irqsave(&udma_dev->jfc_table.xa, flags); udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, queue_num); if (!udma_jfc) { + xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); dev_warn(udma_dev->dev, "async event for bogus jfc number = %u.\n", queue_num); - xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); return -EINVAL; } @@ -206,9 +206,9 @@ static int udma_ae_jetty_group_check_err(struct auxiliary_device *adev, uint32_t xa_lock(&udma_dev->jetty_grp_table.xa); udma_jetty_grp = (struct udma_jetty_grp *)xa_load(&udma_dev->jetty_grp_table.xa, queue_num); if (!udma_jetty_grp) { + xa_unlock(&udma_dev->jetty_grp_table.xa); dev_warn(udma_dev->dev, "async event for bogus jetty group number = %u.\n", queue_num); - xa_unlock(&udma_dev->jetty_grp_table.xa); return -EINVAL; } @@ -373,9 +373,9 @@ static int udma_save_tpn_ue_idx_info(struct udma_dev *udma_dev, uint8_t ue_idx, tp_ue_idx_info = xa_load(&udma_dev->tpn_ue_idx_table, tpn); if (tp_ue_idx_info) { if (tp_ue_idx_info->num >= UDMA_UE_NUM) { + xa_unlock(&udma_dev->tpn_ue_idx_table); dev_err(udma_dev->dev, "num exceeds the maximum value.\n"); - xa_unlock(&udma_dev->tpn_ue_idx_table); return -EINVAL; } diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 8e98319715e0..b4ba66f934ec 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -393,10 +393,10 @@ static int udma_alloc_jfr_id(struct udma_dev *udma_dev, uint32_t cfg_id, uint32_ id = ida_alloc_range(&ida_table->ida, min = ida_table->min, max, GFP_ATOMIC); if (id < 0) { + spin_unlock(&ida_table->lock); dev_err(udma_dev->dev, "alloc jfr id range (%u - %u) failed, ret = %d.\n", min, max, id); - spin_unlock(&ida_table->lock); return id; } -- Gitee From fddee22ba6a8cff67c21e423859e37febbb07668 Mon Sep 17 00:00:00 2001 From: qinwei0930 Date: Mon, 22 Dec 2025 18:51:35 +0800 Subject: [PATCH 238/243] ub: udma: Bugfix related to 2M hugepage. commit 5be85f78760f3a4c849e63126199f8cf09b35ca6 openEuler This patch support fix bug about 2M hugepage. Fixes: 1ae22d037be8 ("ub: udma: Support 2M hugepage function") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- include/uapi/ub/urma/udma/udma_abi.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/uapi/ub/urma/udma/udma_abi.h b/include/uapi/ub/urma/udma/udma_abi.h index 5859f5254b5e..257962765d5a 100644 --- a/include/uapi/ub/urma/udma/udma_abi.h +++ b/include/uapi/ub/urma/udma/udma_abi.h @@ -6,9 +6,9 @@ #include -#define MAP_COMMAND_MASK 0xff -#define MAP_INDEX_MASK 0xffffff -#define MAP_INDEX_SHIFT 8 +#define MAP_COMMAND_MASK 0xf +#define MAP_INDEX_MASK 0xfffffff +#define MAP_INDEX_SHIFT 4 #define UDMA_SEGMENT_ACCESS_GUARD (1UL << 5) @@ -112,9 +112,9 @@ struct udma_create_jfr_resp { }; enum db_mmap_type { + UDMA_MMAP_HUGEPAGE, UDMA_MMAP_JFC_PAGE, UDMA_MMAP_JETTY_DSQE, - UDMA_MMAP_HUGEPAGE, }; enum { -- Gitee From 5efc9f99a120065a14bc654bfb992b51f65538b8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Mon, 29 Dec 2025 16:51:06 +0800 Subject: [PATCH 239/243] ub: udma: bugfix related to spin lock. commit 858a66525c60e5396298e1a8122359f70f4f5578 openEuler This patch fix a bug related to spin lock in clean jfc. Fixes: d72435589dce ("ub: udma: Support poll jfc.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 50ef624629df..961dbfa28107 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -1068,6 +1068,7 @@ void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev * struct udma_jfc *udma_jfc = to_udma_jfc(jfc); struct udma_jfc_cqe *dest; struct udma_jfc_cqe *cqe; + unsigned long flags; struct ubcore_cr cr; uint32_t nfreed = 0; uint32_t local_id; @@ -1078,7 +1079,7 @@ void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev * return; if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_lock(&udma_jfc->lock); + spin_lock_irqsave(&udma_jfc->lock, flags); for (pi = udma_jfc->ci; get_next_cqe(udma_jfc, pi) != NULL; ++pi) { if (pi > udma_jfc->ci + udma_jfc->buf.entry_cnt) @@ -1113,5 +1114,5 @@ void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev * } if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_unlock(&udma_jfc->lock); + spin_unlock_irqrestore(&udma_jfc->lock, flags); } -- Gitee From 48704ad88f06471ce310280bafeaf4ed23144803 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 5 Dec 2025 15:06:48 +0800 Subject: [PATCH 240/243] ub: udma: bugfix for rx close. commit 82ba2bc6121dd1485447223291fd68cfbfaa1544 openEuler This patch fix a bug about rx close. Fixes: 534649e2be8e ("ub: udma: Support get tp list.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_main.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 44a93fd000b0..99c9fa2fe219 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1123,6 +1123,10 @@ int udma_probe(struct auxiliary_device *adev, void udma_remove(struct auxiliary_device *adev) { +#define MIN_SLEEP_TIME 100 +#define MAX_SLEEP_TIME 800 +#define TIME_SLEEP_RATE 2 + uint32_t wait_time = MIN_SLEEP_TIME; struct udma_dev *udma_dev; ubase_reset_unregister(adev); @@ -1135,12 +1139,14 @@ void udma_remove(struct auxiliary_device *adev) } ubcore_stop_requests(&udma_dev->ub_dev); - if (udma_close_ue_rx(udma_dev, false, false, false, 0)) { - mutex_unlock(&udma_reset_mutex); - dev_err(&adev->dev, "udma close ue rx failed in remove process.\n"); - return; + while (true) { + if (!udma_close_ue_rx(udma_dev, false, false, false, 0)) + break; + msleep(wait_time); + if (wait_time < MAX_SLEEP_TIME) + wait_time *= TIME_SLEEP_RATE; + dev_err_ratelimited(&adev->dev, "udma close ue rx failed in remove process.\n"); } - udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); @@ -1150,7 +1156,8 @@ void udma_remove(struct auxiliary_device *adev) udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); check_and_wait_flush_done(udma_dev); - (void)ubase_activate_dev(adev); + if (is_rmmod) + (void)ubase_activate_dev(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); dev_info(&adev->dev, "udma device remove success.\n"); -- Gitee From f1a8f737ee14a9957bafec4cbd48eeccc030862c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 5 Dec 2025 16:04:10 +0800 Subject: [PATCH 241/243] ub: udma: bugfix related to init xa flags. commit 430e4e841b46b8d9e07514cdbfe5ee3544e2ac02 openEuler This patch fix a bug about init xa flags. Fixes: d0c38b53548d ("ub: udma: Support query ub memory info.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 15 +++++++++++---- drivers/ub/urma/hw/udma/udma_common.h | 4 ++-- drivers/ub/urma/hw/udma/udma_ctl.c | 2 ++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 12 +++++------- drivers/ub/urma/hw/udma/udma_ctx.c | 3 ++- drivers/ub/urma/hw/udma/udma_db.c | 1 + drivers/ub/urma/hw/udma/udma_jfr.c | 2 +- drivers/ub/urma/hw/udma/udma_jfs.c | 1 + drivers/ub/urma/hw/udma/udma_main.c | 12 ++++++------ 9 files changed, 31 insertions(+), 21 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 830c22f732a1..8d5336622a2c 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -418,15 +418,21 @@ static void udma_init_ida_table(struct udma_ida *ida_table, uint32_t max, uint32 ida_table->next = min; } -void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min) +void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min, bool irq_lock) { udma_init_ida_table(&table->ida_table, max, min); - xa_init(&table->xa); + if (irq_lock) + xa_init_flags(&table->xa, XA_FLAGS_LOCK_IRQ); + else + xa_init(&table->xa); } -void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex) +void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex, bool irq_lock) { - xa_init(table); + if (irq_lock) + xa_init_flags(table, XA_FLAGS_LOCK_IRQ); + else + xa_init(table); mutex_init(udma_mutex); } @@ -582,6 +588,7 @@ int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, if (IS_ERR(buf->umem)) { ret = PTR_ERR(buf->umem); vfree(buf->aligned_va); + buf->aligned_va = NULL; dev_err(udma_dev->dev, "pin kernel buf failed, ret = %d.\n", ret); return ret; } diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index dee92a4186d3..22992e94e2e1 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -316,8 +316,8 @@ struct udma_tp_ctx { struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); -void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); -void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); +void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min, bool irq_lock); +void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex, bool irq_lock); void udma_destroy_npu_cb_table(struct udma_dev *dev); void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name); diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index af0568f3ce74..f2451c25fb02 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -1008,6 +1008,7 @@ static int copy_out_cqe_data_from_user(struct udma_dev *udma_dev, sizeof(uint32_t), GFP_KERNEL); if (!aux_info_out->aux_info_value) { kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; return -ENOMEM; } } @@ -1206,6 +1207,7 @@ static int copy_out_ae_data_from_user(struct udma_dev *udma_dev, sizeof(uint32_t), GFP_KERNEL); if (!aux_info_out->aux_info_value) { kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; return -ENOMEM; } } diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 2782fbe92907..ae9549de1e22 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -331,7 +331,8 @@ static int udma_ctrlq_store_one_tpid(struct udma_dev *udev, struct xarray *ctrlq int ret; if (debug_switch) - dev_info(udev->dev, "udma ctrlq store one tpid start. tpid %u\n", tpid->tpid); + dev_info_ratelimited(udev->dev, "udma ctrlq store one tpid start. tpid %u\n", + tpid->tpid); if (xa_load(ctrlq_tpid_table, tpid->tpid)) { dev_warn(udev->dev, @@ -418,7 +419,7 @@ static int udma_ctrlq_store_tpid_list(struct udma_dev *udev, int i; if (debug_switch) - dev_info(udev->dev, "udma ctrlq store tpid list tp_list_cnt = %u.\n", + dev_info_ratelimited(udev->dev, "udma ctrlq store tpid list tp_list_cnt = %u.\n", tpid_list_resp->tp_list_cnt); for (i = 0; i < (int)tpid_list_resp->tp_list_cnt; i++) { @@ -775,10 +776,6 @@ int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *activ struct udma_dev *udma_dev = to_udma_dev(dev); int ret; - if (debug_switch) - udma_dfx_ctx_print(udma_dev, "udma active tp ex", active_cfg->tp_handle.bs.tpid, - sizeof(struct ubcore_active_tp_cfg) / sizeof(uint32_t), - (uint32_t *)active_cfg); ret = udma_ctrlq_set_active_tp_ex(udma_dev, active_cfg); if (ret) dev_err(udma_dev->dev, "Failed to set active tp msg, ret %d.\n", ret); @@ -792,7 +789,8 @@ int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle struct udma_dev *udma_dev = to_udma_dev(dev); if (debug_switch) - dev_info(udma_dev->dev, "udma deactivate tp ex tp_id = %u\n", tp_handle.bs.tpid); + dev_info_ratelimited(udma_dev->dev, "udma deactivate tp ex tp_id = %u\n", + tp_handle.bs.tpid); return udma_k_ctrlq_deactive_tp(udma_dev, tp_handle, udata); } diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 8caa0765b420..e842b523ab33 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -200,7 +200,8 @@ static int udma_mmap_hugepage(struct udma_dev *dev, struct ubcore_ucontext *uctx return -EINVAL; } - vm_flags_set(vma, VM_IO | VM_LOCKED | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY); + vm_flags_set(vma, VM_IO | VM_LOCKED | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | + VM_WIPEONFORK); vma->vm_page_prot = __pgprot(((~PTE_ATTRINDX_MASK) & vma->vm_page_prot.pgprot) | PTE_ATTRINDX(MT_NORMAL)); if (udma_alloc_u_hugepage(to_udma_context(uctx), vma)) { diff --git a/drivers/ub/urma/hw/udma/udma_db.c b/drivers/ub/urma/hw/udma/udma_db.c index c66d6b23b2e8..ba8c3ffd265d 100644 --- a/drivers/ub/urma/hw/udma/udma_db.c +++ b/drivers/ub/urma/hw/udma/udma_db.c @@ -69,6 +69,7 @@ void udma_unpin_sw_db(struct udma_context *ctx, struct udma_sw_db *db) list_del(&db->page->list); udma_umem_release(db->page->umem, false); kfree(db->page); + db->page = NULL; } mutex_unlock(&ctx->pgdir_mutex); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index b4ba66f934ec..a80f2cc0f1aa 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -81,7 +81,7 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) goto err_alloc_db; } - udma_init_udma_table(&jfr->idx_que.jfr_idx_table, jfr->idx_que.buf.entry_cnt - 1, 0); + udma_init_udma_table(&jfr->idx_que.jfr_idx_table, jfr->idx_que.buf.entry_cnt - 1, 0, false); jfr->rq.tid = dev->tid; diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 5d520a0cea00..5875e7e0ff80 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -102,6 +102,7 @@ void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) if (sq->buf.kva) { udma_k_free_buf(dev, &sq->buf); kfree(sq->wrid); + sq->wrid = NULL; return; } diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 99c9fa2fe219..686e4a02026c 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -365,15 +365,15 @@ int udma_init_tables(struct udma_dev *udma_dev) } udma_init_udma_table(&udma_dev->jfr_table, udma_dev->caps.jfr.max_cnt + - udma_dev->caps.jfr.start_idx - 1, udma_dev->caps.jfr.start_idx); + udma_dev->caps.jfr.start_idx - 1, udma_dev->caps.jfr.start_idx, false); udma_init_udma_table(&udma_dev->jfc_table, udma_dev->caps.jfc.max_cnt + - udma_dev->caps.jfc.start_idx - 1, udma_dev->caps.jfc.start_idx); + udma_dev->caps.jfc.start_idx - 1, udma_dev->caps.jfc.start_idx, true); udma_init_udma_table(&udma_dev->jetty_grp_table, udma_dev->caps.jetty_grp.max_cnt + udma_dev->caps.jetty_grp.start_idx - 1, - udma_dev->caps.jetty_grp.start_idx); - udma_init_udma_table_mutex(&udma_dev->ksva_table, &udma_dev->ksva_mutex); - udma_init_udma_table_mutex(&udma_dev->npu_nb_table, &udma_dev->npu_nb_mutex); - xa_init(&udma_dev->tpn_ue_idx_table); + udma_dev->caps.jetty_grp.start_idx, true); + udma_init_udma_table_mutex(&udma_dev->ksva_table, &udma_dev->ksva_mutex, false); + udma_init_udma_table_mutex(&udma_dev->npu_nb_table, &udma_dev->npu_nb_mutex, true); + xa_init_flags(&udma_dev->tpn_ue_idx_table, XA_FLAGS_LOCK_IRQ); xa_init(&udma_dev->crq_nb_table); ida_init(&udma_dev->rsvd_jetty_ida_table.ida); mutex_init(&udma_dev->disable_ue_rx_mutex); -- Gitee From 9cddc30615b47ffa385e7fa759218f18837cc49b Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Mon, 15 Dec 2025 21:21:43 +0800 Subject: [PATCH 242/243] ub: udma: Bugfix related to crq event unregister. commit e584b00d3d0f88496dca0256097c9a62f062354e openEuler This patch support fix bug about crq event unregister. Fixes: 1ae22d037be8 ("ub: udma: Support 2M hugepage function") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_main.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 686e4a02026c..6b4059cadc96 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -891,11 +891,10 @@ static int udma_register_event(struct auxiliary_device *adev) return ret; } -static void udma_unregister_event(struct auxiliary_device *adev) +static void udma_unregister_none_crq_event(struct auxiliary_device *adev) { ubase_port_unregister(adev); udma_unregister_ctrlq_event(adev); - udma_unregister_crq_event(adev); udma_unregister_ce_event(adev); udma_unregister_ae_event(adev); } @@ -1021,7 +1020,8 @@ static int udma_init_dev(struct auxiliary_device *adev) err_set_ubcore_dev: udma_unregister_activate_workqueue(udma_dev); err_register_act_init: - udma_unregister_event(adev); + udma_unregister_none_crq_event(adev); + udma_unregister_crq_event(adev); err_event_register: udma_destroy_dev(udma_dev); err_create: @@ -1096,12 +1096,12 @@ void udma_reset_uninit(struct auxiliary_device *adev) return; } - /* Event should unregister before unset ubcore dev. */ - udma_unregister_event(adev); + udma_unregister_none_crq_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); udma_open_ue_rx(udma_dev, false, false, true, 0); + udma_unregister_crq_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); } @@ -1149,15 +1149,14 @@ void udma_remove(struct auxiliary_device *adev) } udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); - - /* Event should unregister before unset ubcore dev. */ - udma_unregister_event(adev); + udma_unregister_none_crq_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); check_and_wait_flush_done(udma_dev); if (is_rmmod) (void)ubase_activate_dev(adev); + udma_unregister_crq_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); dev_info(&adev->dev, "udma device remove success.\n"); -- Gitee From e708560c6aa04c52fb88aa876a39af5c0aad7e31 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 25 Dec 2025 10:38:21 +0800 Subject: [PATCH 243/243] ub: udma: bugfix related to rx close. commit 69f805cd610bc634e8439a9bd3b939b7b35d8266 openEuler This patch fix a bug about rx close. Fixes: 534649e2be8e ("ub: udma: Support get tp list.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_main.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 6b4059cadc96..70999af9962d 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1123,10 +1123,6 @@ int udma_probe(struct auxiliary_device *adev, void udma_remove(struct auxiliary_device *adev) { -#define MIN_SLEEP_TIME 100 -#define MAX_SLEEP_TIME 800 -#define TIME_SLEEP_RATE 2 - uint32_t wait_time = MIN_SLEEP_TIME; struct udma_dev *udma_dev; ubase_reset_unregister(adev); @@ -1139,14 +1135,9 @@ void udma_remove(struct auxiliary_device *adev) } ubcore_stop_requests(&udma_dev->ub_dev); - while (true) { - if (!udma_close_ue_rx(udma_dev, false, false, false, 0)) - break; - msleep(wait_time); - if (wait_time < MAX_SLEEP_TIME) - wait_time *= TIME_SLEEP_RATE; - dev_err_ratelimited(&adev->dev, "udma close ue rx failed in remove process.\n"); - } + if (udma_close_ue_rx(udma_dev, false, false, false, 0)) + dev_err(&adev->dev, "udma close ue rx failed in remove process.\n"); + udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); udma_unregister_none_crq_event(adev); -- Gitee