#include "ksz9477.h"
#include "ksz9477_reg.h"
#include "ksz_common.h"
#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
#define KSZ9477_MAX_TC 7
static int ksz9477_flower_parse_key_l2(struct ksz_device *dev, int port,
struct netlink_ext_ack *extack,
struct flow_rule *rule,
unsigned long cookie, u32 prio)
{
struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
struct flow_match_eth_addrs ematch;
struct ksz9477_acl_entries *acles;
int required_entries;
u8 *src_mac = NULL;
u8 *dst_mac = NULL;
u16 ethtype = 0;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
if (match.key->n_proto) {
if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack,
"ethernet type mask must be a full mask");
return -EINVAL;
}
ethtype = be16_to_cpu(match.key->n_proto);
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
flow_rule_match_eth_addrs(rule, &ematch);
if (!is_zero_ether_addr(ematch.key->src)) {
if (!is_broadcast_ether_addr(ematch.mask->src))
goto not_full_mask_err;
src_mac = ematch.key->src;
}
if (!is_zero_ether_addr(ematch.key->dst)) {
if (!is_broadcast_ether_addr(ematch.mask->dst))
goto not_full_mask_err;
dst_mac = ematch.key->dst;
}
}
acles = &acl->acles;
required_entries = src_mac && dst_mac ? 2 : 1;
if (acles->entries_count + required_entries > KSZ9477_ACL_MAX_ENTRIES) {
NL_SET_ERR_MSG_MOD(extack, "ACL entry limit reached");
return -EOPNOTSUPP;
}
ksz9477_acl_match_process_l2(dev, port, ethtype, src_mac, dst_mac,
cookie, prio);
return 0;
not_full_mask_err:
NL_SET_ERR_MSG_MOD(extack, "MAC address mask must be a full mask");
return -EOPNOTSUPP;
}
static int ksz9477_flower_parse_key(struct ksz_device *dev, int port,
struct netlink_ext_ack *extack,
struct flow_rule *rule,
unsigned long cookie, u32 prio)
{
struct flow_dissector *dissector = rule->match.dissector;
int ret;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
}
if (flow_rule_match_has_control_flags(rule, extack))
return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule,
cookie, prio);
if (ret)
return ret;
}
return 0;
}
static int ksz9477_flower_parse_action(struct ksz_device *dev, int port,
struct netlink_ext_ack *extack,
struct flow_cls_offload *cls,
int entry_idx)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
const struct flow_action_entry *act;
struct ksz9477_acl_entry *entry;
bool prio_force = false;
u8 prio_val = 0;
int i;
if (TC_H_MIN(cls->classid)) {
NL_SET_ERR_MSG_MOD(extack, "hw_tc is not supported. Use: action skbedit prio");
return -EOPNOTSUPP;
}
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_PRIORITY:
if (act->priority > KSZ9477_MAX_TC) {
NL_SET_ERR_MSG_MOD(extack, "Priority value is too high");
return -EOPNOTSUPP;
}
prio_force = true;
prio_val = act->priority;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "action not supported");
return -EOPNOTSUPP;
}
}
entry = &acl->acles.entries[entry_idx];
ksz9477_acl_action_rule_cfg(entry->entry, prio_force, prio_val);
ksz9477_acl_processing_rule_set_action(entry->entry, entry_idx);
return 0;
}
int ksz9477_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
struct ksz_device *dev = ds->priv;
struct ksz9477_acl_priv *acl;
int action_entry_idx;
int ret;
acl = dev->ports[port].acl_priv;
if (!acl) {
NL_SET_ERR_MSG_MOD(extack, "ACL offloading is not supported");
return -EOPNOTSUPP;
}
action_entry_idx = acl->acles.entries_count;
ret = ksz9477_flower_parse_key(dev, port, extack, rule, cls->cookie,
cls->common.prio);
if (ret)
return ret;
ret = ksz9477_flower_parse_action(dev, port, extack, cls,
action_entry_idx);
if (ret)
return ret;
ret = ksz9477_sort_acl_entries(dev, port);
if (ret)
return ret;
return ksz9477_acl_write_list(dev, port);
}
int ksz9477_cls_flower_del(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
unsigned long cookie = cls->cookie;
struct ksz_device *dev = ds->priv;
struct ksz9477_acl_priv *acl;
acl = dev->ports[port].acl_priv;
if (!acl)
return -EOPNOTSUPP;
ksz9477_acl_remove_entries(dev, port, &acl->acles, cookie);
return ksz9477_acl_write_list(dev, port);
}