From 67ec17d5e41c166bc7b5bc8232ebf75b17ed78c0 Mon Sep 17 00:00:00 2001 From: chenyutao Date: Wed, 31 Dec 2025 16:56:49 +0800 Subject: [PATCH] ubcore: bugfix for RC RTP connect category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX ---------------------------------- The inplementation includes: 1. Bugfix for RC RTP connect implement. Fixes: c78127c4b6eb ("ubcore: bugfix for RC RTP connect.") Signed-off-by: chenyutao --- .../ub/urma/ubcore/ubcore_connect_adapter.c | 128 +++++++++++++++++- .../ub/urma/ubcore/ubcore_connect_adapter.h | 3 + drivers/ub/urma/ubcore/ubcore_jetty.c | 61 ++++++++- drivers/ub/urma/ubcore/ubcore_priv.h | 11 ++ drivers/ub/urma/uburma/uburma_cmd.c | 3 +- drivers/ub/urma/uburma/uburma_cmd.h | 3 + drivers/ub/urma/uburma/uburma_cmd_tlv.c | 4 + drivers/ub/urma/uburma/uburma_cmd_tlv.h | 2 + include/ub/urma/ubcore_uapi.h | 5 +- 9 files changed, 209 insertions(+), 11 deletions(-) diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c index dbb8dccd68ef..0f73fdee6996 100644 --- a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c @@ -11,6 +11,8 @@ #include #include +#include + #include "ubcore_log.h" #include "net/ubcore_net.h" #include "net/ubcore_session.h" @@ -22,7 +24,8 @@ enum msg_create_conn_result { CREATE_CONN_SUCCESS = 0, GET_TP_LIST_ERROR, ACTIVE_TP_ERROR, - CREATE_CONN_FAIL + CREATE_CONN_FAIL, + CHECK_JETTY_FAIL }; struct session_data_create_conn { @@ -35,6 +38,9 @@ struct msg_create_conn_req { struct ubcore_get_tp_cfg get_tp_cfg; uint64_t tp_handle; uint32_t tx_psn; + /* Only for RC + RTP */ + uint32_t src_jetty_id; + uint32_t dst_jetty_id; }; struct msg_create_conn_resp { @@ -45,6 +51,9 @@ struct msg_create_conn_resp { struct msg_destroy_conn_req { union ubcore_tp_handle tp_handle; + /* Only for RC + RTP */ + uint32_t src_jetty_id; + uint32_t dst_jetty_id; }; static int ubcore_active_tp(struct ubcore_device *dev, @@ -265,7 +274,8 @@ static void ubcore_free_local_tpid(struct ubcore_device *dev, int ubcore_exchange_tp_info(struct ubcore_device *dev, struct ubcore_get_tp_cfg *cfg, uint64_t tp_handle, uint32_t tx_psn, uint64_t *peer_tp_handle, - uint32_t *rx_psn, struct ubcore_udata *udata) + uint32_t *rx_psn, uint32_t src_jetty_id, + uint32_t dst_jetty_id, struct ubcore_udata *udata) { struct session_data_create_conn *session_data; struct msg_create_conn_req req = { 0 }; @@ -293,6 +303,10 @@ int ubcore_exchange_tp_info(struct ubcore_device *dev, req.get_tp_cfg = *cfg; req.tp_handle = tp_handle; req.tx_psn = tx_psn; + if (cfg->trans_mode == UBCORE_TP_RC) { + req.src_jetty_id = src_jetty_id; + req.dst_jetty_id = dst_jetty_id; + } ret = send_create_req(dev, ubcore_session_get_id(session), &req); if (ret != 0) { ubcore_log_err("Failed to send create req message"); @@ -327,6 +341,48 @@ int ubcore_exchange_tp_info(struct ubcore_device *dev, } EXPORT_SYMBOL(ubcore_exchange_tp_info); +static int ubcore_record_jetty(struct ubcore_device *dev, + struct msg_create_conn_req *req) +{ + struct ubcore_jetty_ctx *ctx; + struct ubcore_jetty *jetty; + + jetty = ubcore_find_get_jetty(dev, req->dst_jetty_id); + if (IS_ERR_OR_NULL(jetty)) { + ubcore_log_warn("Do not find jetty, jetty_id: %u.\n", + req->dst_jetty_id); + return 0; + } + + if (IS_ERR_OR_NULL(jetty->jetty_cfg.jetty_context)) { + ubcore_log_warn("Do not find jetty context.\n"); + ubcore_put_jetty(jetty); + return 0; + } + + ctx = jetty->jetty_cfg.jetty_context; + if (ctx->targ_valid && ctx->targ_rjetty_id != req->src_jetty_id) { + ubcore_log_err( + "Invalid target, rjetty_id: %u, src_id: %u.\n", + ctx->targ_rjetty_id, req->src_jetty_id); + ubcore_put_jetty(jetty); + return -1; + } + + if (ctx->init_valid && ctx->init_rjetty_id != req->src_jetty_id) { + ubcore_log_err( + "Invalid init, rjetty_id: %u, src_id: %u.\n", + ctx->init_rjetty_id, req->src_jetty_id); + ubcore_put_jetty(jetty); + return -1; + } + + ctx->targ_valid = true; + ctx->targ_rjetty_id = req->src_jetty_id; + ubcore_put_jetty(jetty); + return 0; +} + static void handle_create_req(struct ubcore_device *dev, struct ubcore_net_msg *msg, void *conn) { @@ -373,6 +429,14 @@ static void handle_create_req(struct ubcore_device *dev, goto send_resp; } + if (get_tp_cfg.trans_mode == UBCORE_TP_RC && + ubcore_record_jetty(dev, req) != 0) { + ret = CHECK_JETTY_FAIL; + (void)ubcore_deactive_tp(dev, + (union ubcore_tp_handle)tp_handle, NULL); + goto send_resp; + } + resp.tp_handle = tp_handle; resp.tx_psn = tx_psn; ret = CREATE_CONN_SUCCESS; @@ -411,13 +475,16 @@ static void handle_create_resp(struct ubcore_device *dev, } static int send_destroy_req(struct ubcore_device *dev, union ubcore_eid addr, - union ubcore_tp_handle tp_handle) + union ubcore_tp_handle tp_handle, uint32_t src_jetty_id, + uint32_t dst_jetty_id) { struct ubcore_net_msg msg = { 0 }; struct msg_destroy_conn_req req = { 0 }; int ret; req.tp_handle = tp_handle; + req.src_jetty_id = src_jetty_id; + req.dst_jetty_id = dst_jetty_id; msg.type = UBCORE_NET_DESTROY_REQ; msg.len = (uint16_t)sizeof(struct msg_destroy_conn_req); @@ -469,7 +536,8 @@ int ubcore_adapter_layer_disconnect(struct ubcore_vtpn *vtpn) if ((vtpn->trans_mode == UBCORE_TP_RM || vtpn->trans_mode == UBCORE_TP_RC) && !ctp && ubcore_check_ctrlplane_compat(dev->ops->import_jetty)) { - ret = send_destroy_req(dev, peer_eid, peer_tp_handle); + ret = send_destroy_req(dev, peer_eid, peer_tp_handle, + vtpn->local_jetty, vtpn->peer_jetty); if (ret != 0) ubcore_log_err("Failed to send destroy req message"); } @@ -482,12 +550,26 @@ static void handle_destroy_req(struct ubcore_device *dev, { struct msg_destroy_conn_req *req = (struct msg_destroy_conn_req *)msg->data; + struct ubcore_jetty_ctx *ctx; + struct ubcore_jetty *jetty; int ret; /* Target tp_handle get from kernel space */ ret = ubcore_deactive_tp(dev, req->tp_handle, NULL); if (ret != 0) ubcore_log_err("Failed to deactivate tp"); + + jetty = ubcore_find_get_jetty(dev, req->dst_jetty_id); + if (IS_ERR_OR_NULL(jetty)) { + ubcore_log_warn("Do not find jetty, jetty_id: %u.\n", + req->dst_jetty_id); + return; + } + + ctx = jetty->jetty_cfg.jetty_context; + if (!IS_ERR_OR_NULL(ctx)) + ctx->targ_valid = false; + ubcore_put_jetty(jetty); } /* Only for impoprt_jetty/jfr, thus only for RM/UM */ @@ -559,7 +641,7 @@ struct ubcore_tjetty *ubcore_import_jfr_compat(struct ubcore_device *dev, dev, &get_tp_cfg, tp_list.tp_handle.value, active_tp_cfg.tp_attr.tx_psn, &active_tp_cfg.peer_tp_handle.value, - &active_tp_cfg.tp_attr.rx_psn, udata); + &active_tp_cfg.tp_attr.rx_psn, 0, 0, udata); if (ret != 0) { ubcore_log_err("Failed to exchange tp info, ret: %d.\n", ret); @@ -612,7 +694,7 @@ struct ubcore_tjetty *ubcore_import_jetty_compat(struct ubcore_device *dev, dev, &get_tp_cfg, tp_list.tp_handle.value, active_tp_cfg.tp_attr.tx_psn, &active_tp_cfg.peer_tp_handle.value, - &active_tp_cfg.tp_attr.rx_psn, udata); + &active_tp_cfg.tp_attr.rx_psn, 0, 0, udata); if (ret != 0) { ubcore_log_err( "Failed to exchange tp info, ret: %d.\n", @@ -634,6 +716,33 @@ struct ubcore_tjetty *ubcore_import_jetty_compat(struct ubcore_device *dev, return tjetty; } +int ubcore_check_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty) +{ + struct ubcore_jetty_ctx *ctx = jetty->jetty_cfg.jetty_context; + + if (IS_ERR_OR_NULL(ctx)) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ctx->init_valid && ctx->init_rjetty_id != tjetty->cfg.id.id) { + ubcore_log_err( + "Failed to check init, expect: %d, read: %u.\n", + ctx->init_rjetty_id, tjetty->cfg.id.id); + return -EINVAL; + } + + if (ctx->targ_valid && ctx->targ_rjetty_id != tjetty->cfg.id.id) { + ubcore_log_err( + "Failed to check target, expect: %d, read: %u.\n", + ctx->targ_rjetty_id, tjetty->cfg.id.id); + return -EINVAL; + } + + return 0; +} + int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, struct ubcore_udata *udata) @@ -645,6 +754,10 @@ int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, uint32_t tp_cnt = 1; int ret; + ret = ubcore_check_jetty(jetty, tjetty); + if (ret != 0) + return ret; + ret = ubcore_fill_get_tp_cfg(dev, &get_tp_cfg, &tjetty->cfg); if (ret != 0) return ret; @@ -664,7 +777,8 @@ int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, tp_list.tp_handle.value, active_tp_cfg.tp_attr.tx_psn, &active_tp_cfg.peer_tp_handle.value, - &active_tp_cfg.tp_attr.rx_psn, udata); + &active_tp_cfg.tp_attr.rx_psn, jetty->jetty_id.id, + tjetty->cfg.id.id, udata); if (ret != 0) { ubcore_log_err("Failed to exchange tp info, ret: %d.\n", ret); return ret; diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.h b/drivers/ub/urma/ubcore/ubcore_connect_adapter.h index 66df8cc1d272..e595ab9a8ec7 100644 --- a/drivers/ub/urma/ubcore/ubcore_connect_adapter.h +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.h @@ -41,4 +41,7 @@ static inline bool ubcore_check_ctrlplane_compat(void *op_ptr) return (op_ptr == NULL); } +int ubcore_check_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty); + #endif diff --git a/drivers/ub/urma/ubcore/ubcore_jetty.c b/drivers/ub/urma/ubcore/ubcore_jetty.c index 3d630a5d30cf..db4a951b69b5 100644 --- a/drivers/ub/urma/ubcore/ubcore_jetty.c +++ b/drivers/ub/urma/ubcore/ubcore_jetty.c @@ -1183,6 +1183,7 @@ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) { + struct ubcore_jetty_ctx *ctx; struct ubcore_jetty *jetty; int ret; @@ -1234,11 +1235,16 @@ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, kref_init(&jetty->ref_cnt); init_completion(&jetty->comp); + ctx = kzalloc(sizeof(struct ubcore_jetty_ctx), GFP_KERNEL); + if (IS_ERR_OR_NULL(ctx)) + goto destroy_tptable; + jetty->jetty_cfg.jetty_context = ctx; + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode, jetty->jetty_id.id); if (ret != 0) { ubcore_log_err("Failed to add jetty.\n"); - goto destroy_tptable; + goto free_ctx; } atomic_inc(&cfg->send_jfc->use_cnt); @@ -1248,6 +1254,9 @@ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, atomic_inc(&cfg->jfr->use_cnt); return jetty; +free_ctx: + kfree(ctx); + jetty->jetty_cfg.jetty_context = NULL; destroy_tptable: ubcore_destroy_tptable(&jetty->tptable); delete_jetty_to_grp: @@ -1324,6 +1333,14 @@ static int ubcore_check_jetty_attr(struct ubcore_jetty *jetty) return 0; } +static void ubcore_free_jetty_ctx(struct ubcore_jetty *jetty) +{ + if (jetty->jetty_cfg.jetty_context) { + kfree(jetty->jetty_cfg.jetty_context); + jetty->jetty_cfg.jetty_context = NULL; + } +} + int ubcore_delete_jetty(struct ubcore_jetty *jetty) { struct ubcore_jetty_group *jetty_grp; @@ -1346,6 +1363,7 @@ int ubcore_delete_jetty(struct ubcore_jetty *jetty) (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode); + ubcore_free_jetty_ctx(jetty); ubcore_destroy_tptable(&jetty->tptable); if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB && @@ -1443,6 +1461,7 @@ int ubcore_delete_jetty_batch(struct ubcore_jetty **jetty_arr, int jetty_num, (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode); + ubcore_free_jetty_ctx(jetty); ubcore_destroy_tptable(&jetty->tptable); if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB && @@ -1938,6 +1957,9 @@ int ubcore_bind_jetty_ex(struct ubcore_jetty *jetty, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata) { + struct ubcore_jetty_ctx *ctx; + int ret; + if (!jetty || !tjetty || !jetty->ub_dev || !jetty->ub_dev->ops || !active_tp_cfg) { ubcore_log_err("Invalid parameter.\n"); @@ -1959,6 +1981,12 @@ int ubcore_bind_jetty_ex(struct ubcore_jetty *jetty, return -EINVAL; } + ret = ubcore_check_jetty(jetty, tjetty); + if (ret != 0) { + ubcore_log_err("Failed to check jetty, ret: %d.\n", ret); + return ret; + } + if (tjetty->vtpn && (!is_create_rc_shared_tp(tjetty->cfg.trans_mode, tjetty->cfg.flag.bs.order_type, @@ -1968,14 +1996,25 @@ int ubcore_bind_jetty_ex(struct ubcore_jetty *jetty, return -EINVAL; } - return ubcore_inner_bind_jetty_ctrlplane(jetty, tjetty, active_tp_cfg, + ctx = jetty->jetty_cfg.jetty_context; + if (!IS_ERR_OR_NULL(ctx)) { + ctx->init_valid = true; + ctx->init_rjetty_id = tjetty->cfg.id.id; + } + + ret = ubcore_inner_bind_jetty_ctrlplane(jetty, tjetty, active_tp_cfg, udata); + if (ret && !IS_ERR_OR_NULL(ctx)) + ctx->init_valid = false; + + return ret; } EXPORT_SYMBOL(ubcore_bind_jetty_ex); static int ubcore_inner_unbind_ub_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty) { + struct ubcore_jetty_ctx *ctx; int ret; if (tjetty->vtpn) { @@ -1992,6 +2031,12 @@ static int ubcore_inner_unbind_ub_jetty(struct ubcore_jetty *jetty, tjetty->vtpn = NULL; mutex_unlock(&tjetty->lock); } + + if (jetty->jetty_cfg.trans_mode == UBCORE_TP_RC) { + ctx = jetty->jetty_cfg.jetty_context; + if (!IS_ERR_OR_NULL(ctx)) + ctx->init_valid = false; + } } return 0; } @@ -2057,6 +2102,18 @@ struct ubcore_jetty *ubcore_find_jetty(struct ubcore_device *dev, } EXPORT_SYMBOL(ubcore_find_jetty); +struct ubcore_jetty *ubcore_find_get_jetty(struct ubcore_device *dev, + uint32_t jetty_id) +{ + if (!dev) { + ubcore_log_err("invalid parameter.\n"); + return NULL; + } + + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_JETTY], jetty_id, + &jetty_id); +} + struct ubcore_jetty_group *ubcore_create_jetty_grp( struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) diff --git a/drivers/ub/urma/ubcore/ubcore_priv.h b/drivers/ub/urma/ubcore/ubcore_priv.h index 5f187be31d77..9772b7af458e 100644 --- a/drivers/ub/urma/ubcore/ubcore_priv.h +++ b/drivers/ub/urma/ubcore/ubcore_priv.h @@ -88,6 +88,14 @@ struct ubcore_global_file { struct ubcore_uvs_instance *uvs; }; +struct ubcore_jetty_ctx { + /* Only for RC connection */ + uint32_t init_rjetty_id; + bool init_valid; + uint32_t targ_rjetty_id; + bool targ_valid; +}; + static inline struct ubcore_ucontext * ubcore_get_uctx(struct ubcore_udata *udata) { @@ -165,6 +173,9 @@ static inline uint32_t ubcore_get_jetty_hash(struct ubcore_jetty_id *jetty_id) return jhash(jetty_id, sizeof(struct ubcore_jetty_id), 0); } +struct ubcore_jetty *ubcore_find_get_jetty(struct ubcore_device *dev, + uint32_t jetty_id); + static inline uint32_t ubcore_get_tseg_hash(struct ubcore_ubva *ubva) { return jhash(ubva, sizeof(struct ubcore_ubva), 0); diff --git a/drivers/ub/urma/uburma/uburma_cmd.c b/drivers/ub/urma/uburma/uburma_cmd.c index 4bf939a8fc3b..9809cb237cc7 100644 --- a/drivers/ub/urma/uburma/uburma_cmd.c +++ b/drivers/ub/urma/uburma/uburma_cmd.c @@ -3068,7 +3068,8 @@ static int uburma_cmd_exchange_tp_info(struct ubcore_device *ubc_dev, return ret; get_tcp_cfg = arg.in.get_tp_cfg; ret = ubcore_exchange_tp_info(ubc_dev, &get_tcp_cfg, arg.in.tp_handle, - arg.in.tx_psn, &peer_tp_handle, &rx_psn, &udata); + arg.in.tx_psn, &peer_tp_handle, &rx_psn, arg.in.src_jetty_id, + arg.in.dst_jetty_id, &udata); if (ret != 0) { uburma_log_err("Failed to exchange tp info, ret: %d.\n", ret); return ret; diff --git a/drivers/ub/urma/uburma/uburma_cmd.h b/drivers/ub/urma/uburma/uburma_cmd.h index ef5dc2db5e52..773fee9b0274 100644 --- a/drivers/ub/urma/uburma/uburma_cmd.h +++ b/drivers/ub/urma/uburma/uburma_cmd.h @@ -971,6 +971,9 @@ struct uburma_cmd_exchange_tp_info { struct ubcore_get_tp_cfg get_tp_cfg; uint64_t tp_handle; uint32_t tx_psn; + /* Only for RC trans_mode */ + uint32_t src_jetty_id; + uint32_t dst_jetty_id; } in; struct { uint64_t peer_tp_handle; diff --git a/drivers/ub/urma/uburma/uburma_cmd_tlv.c b/drivers/ub/urma/uburma/uburma_cmd_tlv.c index 028be3acc6a7..b28c7aa518ae 100644 --- a/drivers/ub/urma/uburma/uburma_cmd_tlv.c +++ b/drivers/ub/urma/uburma/uburma_cmd_tlv.c @@ -1442,6 +1442,10 @@ uburma_exchange_tp_info_fill_spec_in(void *arg_addr, arg->in.get_tp_cfg.peer_eid); SPEC(s++, EXCHANGE_TP_INFO_IN_TP_HANDLE, arg->in.tp_handle); SPEC(s++, EXCHANGE_TP_INFO_IN_TX_PSN, arg->in.tx_psn); + SPEC(s++, EXCHANGE_TP_INFO_IN_SRC_JETTY_ID, + arg->in.src_jetty_id); + SPEC(s++, EXCHANGE_TP_INFO_IN_DST_JETTY_ID, + arg->in.dst_jetty_id); } static void diff --git a/drivers/ub/urma/uburma/uburma_cmd_tlv.h b/drivers/ub/urma/uburma/uburma_cmd_tlv.h index 7dc179c70ded..a6e0fc764919 100644 --- a/drivers/ub/urma/uburma/uburma_cmd_tlv.h +++ b/drivers/ub/urma/uburma/uburma_cmd_tlv.h @@ -928,6 +928,8 @@ enum uburma_cmd_exchange_tp_info_type { EXCHANGE_TP_INFO_IN_PEER_EID, EXCHANGE_TP_INFO_IN_TP_HANDLE, EXCHANGE_TP_INFO_IN_TX_PSN, + EXCHANGE_TP_INFO_IN_SRC_JETTY_ID, + EXCHANGE_TP_INFO_IN_DST_JETTY_ID, EXCHANGE_TP_INFO_IN_NUM, /* Only for calculating number of types */ /* Out type */ EXCHANGE_TP_INFO_OUT_PEER_TP_HANDLE = UBURMA_CMD_OUT_TYPE_INIT, diff --git a/include/ub/urma/ubcore_uapi.h b/include/ub/urma/ubcore_uapi.h index 666ac3be990b..819e2a5a5174 100644 --- a/include/ub/urma/ubcore_uapi.h +++ b/include/ub/urma/ubcore_uapi.h @@ -628,6 +628,8 @@ int ubcore_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, * @param[in] tx_psn: local packet sequence number; * @param[out] peer_tp_handle: tp_handle got by ubcore_exchange_tp_info; * @param[out] rx_psn: remote packet sequence number; + * @param[in] src_jetty_id: source jetty id; + * @param[in] dst_jetty_id: dest jetty id; * @param[in] udata: [Optional] udata should be NULL when called * by kernel application and be valid when called * by user space application @@ -636,7 +638,8 @@ int ubcore_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, int ubcore_exchange_tp_info(struct ubcore_device *dev, struct ubcore_get_tp_cfg *cfg, uint64_t tp_handle, uint32_t tx_psn, uint64_t *peer_tp_handle, - uint32_t *rx_psn, struct ubcore_udata *udata); + uint32_t *rx_psn, uint32_t src_jetty_id, + uint32_t dst_jetty_id, struct ubcore_udata *udata); /** * operation of user ioctl cmd. -- Gitee