summaryrefslogtreecommitdiff
path: root/drivers/net/dsa/ocelot
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/dsa/ocelot')
-rw-r--r--drivers/net/dsa/ocelot/Kconfig8
-rw-r--r--drivers/net/dsa/ocelot/Makefile4
-rw-r--r--drivers/net/dsa/ocelot/felix.c931
-rw-r--r--drivers/net/dsa/ocelot/felix.h12
-rw-r--r--drivers/net/dsa/ocelot/felix_tsn.c1759
-rw-r--r--drivers/net/dsa/ocelot/felix_tsn.h17
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c1030
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c31
8 files changed, 3394 insertions, 398 deletions
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 9948544ba1c4..aa588116be5f 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -28,3 +28,11 @@ config NET_DSA_MSCC_SEVILLE
help
This driver supports the VSC9953 (Seville) switch, which is embedded
as a platform device on the NXP T1040 SoC.
+
+config MSCC_FELIX_SWITCH_TSN
+ bool "TSN support on Felix switch"
+ depends on NET_DSA_MSCC_FELIX
+ depends on TSN
+ help
+ This driver supports TSN which is using tsntool netlink
+ infrastructure on felix switch.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index f6dd131e7491..86243f4a17a4 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -9,3 +9,7 @@ mscc_felix-objs := \
mscc_seville-objs := \
felix.o \
seville_vsc9953.o
+
+ifdef CONFIG_MSCC_FELIX_SWITCH_TSN
+mscc_felix-objs += felix_tsn.o
+endif
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 1513dfb523de..e90d38902dbf 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -21,26 +21,155 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
-#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
-static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
+{
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+/* We are called before felix_npi_port_init(), so ocelot->npi is -1. */
+static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev);
+}
+
+static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct switchdev_obj_port_mdb mdb;
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ memset(&mdb, 0, sizeof(mdb));
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev);
+}
+
+static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to,
+ int pgid)
+{
+ struct ocelot *ocelot = ds->priv;
+ bool on;
+ u32 val;
+
+ val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid);
+ on = !!(val & BIT(from));
+ val &= ~BIT(from);
+ if (on)
+ val |= BIT(to);
+ else
+ val &= ~BIT(to);
+
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid);
+}
+
+static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC);
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC);
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC);
+}
+
+static void
+felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC);
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC);
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC);
+}
+
+/* ocelot->npi was already set to -1 by felix_npi_port_deinit, so
+ * ocelot_fdb_add() will not redirect FDB entries towards the
+ * CPU port module here, which is what we want.
+ */
+static int
+felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int
+felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct switchdev_obj_port_mdb mdb;
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ memset(&mdb, 0, sizeof(mdb));
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev);
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int key_length, upstream, err;
- /* We don't need to install the rxvlan into the other ports' filtering
- * tables, because we're just pushing the rxvlan when sending towards
- * the CPU
- */
- if (!pvid)
- return 0;
-
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
upstream = dsa_upstream_port(ds, port);
@@ -51,7 +180,7 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port);
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -72,21 +201,32 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
-static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = &felix->ocelot;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ port, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int upstream, err;
- /* tag_8021q.c assumes we are implementing this via port VLAN
- * membership, which we aren't. So we don't need to add any VCAP filter
- * for the CPU port.
- */
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
return -ENOMEM;
@@ -104,7 +244,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -125,7 +265,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
redirect_rule->ingress_port_mask = BIT(upstream);
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -143,49 +283,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return 0;
}
-static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- u16 flags)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct ocelot *ocelot = ds->priv;
-
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- return 0;
-}
-
-static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
-{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
-
- block_vcap_es0 = &ocelot->block[VCAP_ES0];
-
- outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
- /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
- * installing outer tagging ES0 rules where they weren't needed.
- * But in rxvlan_del, the API doesn't give us the "flags" anymore,
- * so that forces us to be slightly sloppy here, and just assume that
- * if we didn't find an outer_tagging_rule it means that there was
- * none in the first place, i.e. rxvlan_del is called on a non-pvid
- * port. This is most probably true though.
- */
- if (!outer_tagging_rule)
- return 0;
-
- return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
-}
-
-static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
@@ -193,16 +291,13 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
struct ocelot *ocelot = &felix->ocelot;
int err;
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
port, false);
if (!untagging_rule)
- return 0;
+ return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
@@ -211,22 +306,54 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
port, false);
if (!redirect_rule)
- return 0;
+ return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
+ if (err)
+ return err;
+
+ err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid);
+ if (err) {
+ felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
+ return err;
+ }
+
+ return 0;
+}
+
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot *ocelot = ds->priv;
+ int err;
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ if (!dsa_is_user_port(ds, port))
+ return 0;
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
+ if (err)
+ return err;
+
+ err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid);
+ if (err) {
+ felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
+ return err;
+ }
return 0;
}
@@ -240,168 +367,113 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
*/
static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
{
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
- ocelot->npi = -1;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot_port_set_dsa_8021q_cpu(ocelot, port);
/* Overwrite PGID_CPU with the non-tagging port */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
{
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot_port_unset_dsa_8021q_cpu(ocelot, port);
/* Restore PGID_CPU */
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
PGID_CPU);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
*/
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
-
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
-
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
- }
-
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
- }
- }
-
- if (cpu < 0) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return -EINVAL;
- }
-
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ struct dsa_port *dp;
+ bool cpu_copy_ena;
+ int cpu = -1, err;
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
- }
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ /* Figure out the current CPU port */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ cpu = dp->index;
+ break;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
*/
- ocelot_drain_cpu_queue(ocelot, 0);
-
- return 0;
-}
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
-static int felix_teardown_mmio_filtering(struct felix *felix)
-{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(cpu);
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
+ }
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
+ }
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
+ return 0;
}
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- unsigned long cpu_flood;
- int port, err;
+ struct dsa_port *dp;
+ int err;
felix_8021q_cpu_port_init(ocelot, cpu);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
@@ -414,28 +486,43 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
- /* In tag_8021q mode, the CPU port module is unused, except for PTP
- * frames. So we want to disable flooding of any kind to the CPU port
- * module, since packets going there will end in a black hole.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
-
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
- err = felix_setup_mmio_filtering(felix);
+ err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
if (err)
goto out_tag_8021q_unregister;
+ err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port);
+ if (err)
+ goto out_migrate_fdbs;
+
+ felix_migrate_flood_to_tag_8021q_port(ds, cpu);
+
+ err = felix_update_trapping_destinations(ds, true);
+ if (err)
+ goto out_migrate_flood;
+
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_drain_cpu_queue(ocelot, 0);
+
return 0;
+out_migrate_flood:
+ felix_migrate_flood_to_npi_port(ds, cpu);
+ dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+out_migrate_fdbs:
+ dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
out_tag_8021q_unregister:
dsa_tag_8021q_unregister(ds);
return err;
@@ -444,27 +531,24 @@ out_tag_8021q_unregister:
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
+ struct dsa_port *dp;
+ int err;
- err = felix_teardown_mmio_filtering(felix);
+ err = felix_update_trapping_destinations(ds, false);
if (err)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
dsa_tag_8021q_unregister(ds);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
+ dp->index);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
@@ -516,27 +600,26 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- unsigned long cpu_flood;
+ int err;
- felix_npi_port_init(ocelot, cpu);
+ err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+ if (err)
+ return err;
- /* Include the CPU port module (and indirectly, the NPI port)
- * in the forwarding mask for unknown unicast - the hardware
- * default value for ANA_FLOODING_FLD_UNICAST excludes
- * BIT(ocelot->num_phys_ports), and so does ocelot_init,
- * since Ocelot relies on whitelisting MAC addresses towards
- * PGID_CPU.
- * We do this because DSA does not yet perform RX filtering,
- * and the NPI port does not perform source address learning,
- * so traffic sent to Linux is effectively unknown from the
- * switch's perspective.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+ err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+ if (err)
+ goto out_migrate_fdbs;
+
+ felix_migrate_flood_to_npi_port(ds, cpu);
+
+ felix_npi_port_init(ocelot, cpu);
return 0;
+
+out_migrate_fdbs:
+ dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
+
+ return err;
}
static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
@@ -592,6 +675,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
enum dsa_tag_protocol old_proto = felix->tag_proto;
+ bool cpu_port_active = false;
+ struct dsa_port *dp;
int err;
if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -599,6 +684,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
proto != DSA_TAG_PROTO_OCELOT_8021Q)
return -EPROTONOSUPPORT;
+ /* We don't support multiple CPU ports, yet the DT blob may have
+ * multiple CPU ports defined. The first CPU port is the active one,
+ * the others are inactive. In this case, DSA will call
+ * ->change_tag_protocol() multiple times, once per CPU port.
+ * Since we implement the tagging protocol change towards "ocelot" or
+ * "seville" as effectively initializing the NPI port, what we are
+ * doing is effectively changing who the NPI port is to the last @cpu
+ * argument passed, which is an unused DSA CPU port and not the one
+ * that should actively pass traffic.
+ * Suppress DSA's calls on CPU ports that are inactive.
+ */
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dp->cpu_dp->index == cpu) {
+ cpu_port_active = true;
+ break;
+ }
+ }
+
+ if (!cpu_port_active)
+ return 0;
+
felix_del_tag_protocol(ds, cpu, old_proto);
err = felix_set_tag_protocol(ds, cpu, proto);
@@ -632,6 +738,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
+static void felix_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_mact_flush(ocelot, port);
+ if (err)
+ dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
+ port, ERR_PTR(err));
+}
+
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -641,35 +758,97 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port,
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -701,38 +880,38 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, br);
-
- return 0;
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_leave(ocelot, port, br);
+ ocelot_port_bridge_leave(ocelot, port, bridge.dev);
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ return ocelot_port_lag_join(ocelot, port, lag.dev, info);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
return 0;
}
@@ -824,7 +1003,7 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
struct dsa_port *dp = dsa_to_port(ds, port);
if (felix->pcs[port])
- phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
+ phylink_set_pcs(dp->pl, felix->pcs[port]);
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -961,8 +1140,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
- dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
return -ENODEV;
}
@@ -990,6 +1171,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
+ ocelot->vcap_pol.base = felix->info->vcap_pol_base;
+ ocelot->vcap_pol.max = felix->info->vcap_pol_max;
+ ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
+ ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
ocelot->ops = felix->info->ops;
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
@@ -1017,7 +1202,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res.start += felix->switch_base;
res.end += felix->switch_base;
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix->info->init_regmap(ocelot, &res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
@@ -1054,7 +1239,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res.start += felix->switch_base;
res.end += felix->switch_base;
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix->info->init_regmap(ocelot, &res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map memory space for port %d\n",
@@ -1066,6 +1251,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->target = target;
+ /* Enable cut-through forwarding on all traffic classes by
+ * default, to be compatible with the upstream kernel.
+ */
+ ocelot_port->cut_thru = GENMASK(7, 0);
ocelot->ports[port] = ocelot_port;
}
@@ -1141,38 +1330,22 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
kfree(xmit_work);
}
-static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+static int felix_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- struct felix_port *felix_port;
+ struct ocelot_8021q_tagger_data *tagger_data;
- if (!dsa_port_is_user(dp))
+ switch (proto) {
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ tagger_data = ocelot_8021q_tagger_data(ds);
+ tagger_data->xmit_work_fn = felix_port_deferred_xmit;
return 0;
-
- felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
- if (!felix_port)
- return -ENOMEM;
-
- felix_port->xmit_worker = felix->xmit_worker;
- felix_port->xmit_work_fn = felix_port_deferred_xmit;
-
- dp->priv = felix_port;
-
- return 0;
-}
-
-static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
-{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct felix_port *felix_port = dp->priv;
-
- if (!felix_port)
- return;
-
- dp->priv = NULL;
- kfree(felix_port);
+ case DSA_TAG_PROTO_OCELOT:
+ case DSA_TAG_PROTO_SEVILLE:
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
}
/* Hardware initialization done here so that we can allocate structures with
@@ -1184,7 +1357,9 @@ static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ unsigned long cpu_flood;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
@@ -1203,64 +1378,46 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
- if (IS_ERR(felix->xmit_worker)) {
- err = PTR_ERR(felix->xmit_worker);
- goto out_deinit_timestamp;
- }
-
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_init_port(ocelot, port);
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
-
- err = felix_port_setup_tagger_data(ds, port);
- if (err) {
- dev_err(ds->dev,
- "port %d failed to set up tagger data: %pe\n",
- port, ERR_PTR(err));
- goto out_deinit_ports;
- }
+ felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
+ dsa_switch_for_each_cpu_port(dp, ds) {
/* The initial tag protocol is NPI which always returns 0, so
* there's no real point in checking for errors.
*/
- felix_set_tag_protocol(ds, port, felix->tag_proto);
+ felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
+
+ /* Start off with flooding disabled towards the NPI port
+ * (actually CPU port module).
+ */
+ cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
+
break;
}
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- felix_port_teardown_tagger_data(ds, port);
- ocelot_deinit_port(ocelot, port);
- }
-
- kthread_destroy_worker(felix->xmit_worker);
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
-out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1275,25 +1432,15 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_del_tag_protocol(ds, port, felix->tag_proto);
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ felix_del_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- felix_port_teardown_tagger_data(ds, port);
- ocelot_deinit_port(ocelot, port);
- }
-
- kthread_destroy_worker(felix->xmit_worker);
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
@@ -1315,14 +1462,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
-static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
- int err, grp = 0;
+ int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
@@ -1330,9 +1486,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
if (!felix->info->quirk_no_xtr_irq)
return false;
- if (ptp_type == PTP_CLASS_NONE)
- return false;
-
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1362,8 +1515,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
}
out:
- if (err < 0)
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
+ }
return true;
}
@@ -1383,7 +1540,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
- if (felix_check_xtr_pkt(ocelot, type)) {
+ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
@@ -1443,8 +1600,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
@@ -1482,6 +1648,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1631,9 +1815,48 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
+ .connect_tag_protocol = felix_connect_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
@@ -1645,9 +1868,12 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
@@ -1669,6 +1895,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
@@ -1689,6 +1917,11 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be3e42e135c0..b50240f8c88e 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -21,8 +21,10 @@ struct felix_info {
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
- int switch_pci_bar;
- int imdio_pci_bar;
+ u16 vcap_pol_base;
+ u16 vcap_pol_max;
+ u16 vcap_pol_base2;
+ u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
/* Some Ocelot switches are integrated into the SoC without the
@@ -47,7 +49,9 @@ struct felix_info {
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
- u32 speed);
+ int speed);
+ struct regmap *(*init_regmap)(struct ocelot *ocelot,
+ struct resource *res);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -58,7 +62,7 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct lynx_pcs **pcs;
+ struct phylink_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
diff --git a/drivers/net/dsa/ocelot/felix_tsn.c b/drivers/net/dsa/ocelot/felix_tsn.c
new file mode 100644
index 000000000000..eee7c726a3aa
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix_tsn.c
@@ -0,0 +1,1759 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Felix Switch TSN driver
+ *
+ * Copyright 2020-2022 NXP
+ */
+
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pcs-lynx.h>
+#include <linux/io.h>
+#include <net/dsa.h>
+#include <net/tsn.h>
+#include "felix_tsn.h"
+#include "felix.h"
+
+#define ETH_P_8021CB 0x2345
+#define FELIX_QSYS_HSCH_NUM 72
+/* MSCC TSN parameters limited */
+#define FELIX_PSFP_SFID_NUM 176
+#define FELIX_FRER_SSID_NUM 128
+#define FELIX_STREAM_NUM 2048
+
+struct felix_switch_capa {
+ u8 num_tas_gcl;
+ u32 tas_ct_min;
+ u32 tas_ct_max;
+ u32 tas_cte_max;
+ u32 tas_it_max;
+ u32 tas_it_min;
+ u8 num_hsch;
+ u8 num_psfp_sfid;
+ u8 num_frer_ssid;
+ u8 num_psfp_sgid;
+ u16 psfp_fmi_max;
+ u16 psfp_fmi_min;
+ u8 num_sgi_gcl;
+ u32 sgi_ct_min;
+ u32 sgi_ct_max;
+ u32 sgi_cte_max;
+ u16 qos_pol_max;
+ u8 pol_cbs_max;
+ u8 pol_pbs_max;
+ u8 frer_seq_len_min;
+ u8 frer_seq_len_max;
+ u8 frer_his_len_min;
+ u8 frer_his_len_max;
+ u8 qos_dscp_max;
+ u8 qos_cos_max;
+ u8 qos_dp_max;
+};
+
+struct stream_filter {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u16 vid;
+ u32 index;
+ u8 handle;
+ u8 dst_idx;
+};
+
+static struct list_head streamtable;
+static int hsch_bw[FELIX_QSYS_HSCH_NUM] = {0};
+
+static const struct felix_switch_capa capa = {
+ .num_tas_gcl = 64,
+ .tas_ct_min = 100,
+ .tas_ct_max = 1000000000,
+ .tas_cte_max = 999999999,
+ .tas_it_max = 999999999,
+ .tas_it_min = 1000,
+ .num_hsch = 72,
+ .num_psfp_sfid = FELIX_PSFP_SFID_NUM,
+ .num_psfp_sgid = 184,
+ .psfp_fmi_max = 246,
+ .psfp_fmi_min = 63,
+ .num_sgi_gcl = 4,
+ .sgi_ct_min = 5000,
+ .sgi_ct_max = 1000000000,
+ .sgi_cte_max = 999999999,
+ .qos_pol_max = 383,
+ /* Maximum allowed value of committed burst size(CBS) is 240 KB */
+ .pol_cbs_max = 60,
+ /* Maximum allowed value of excess burst size(EBS) is 240 KB */
+ .pol_pbs_max = 60,
+ .num_frer_ssid = FELIX_FRER_SSID_NUM,
+ .frer_seq_len_min = 1,
+ .frer_seq_len_max = 28,
+ .frer_his_len_min = 1,
+ .frer_his_len_max = 32,
+ .qos_dscp_max = 63,
+ .qos_cos_max = OCELOT_NUM_TC - 1,
+ .qos_dp_max = 1,
+};
+
+static u32 felix_tsn_get_cap(struct net_device *ndev)
+{
+ return TSN_CAP_QBV | TSN_CAP_QCI | TSN_CAP_QBU | TSN_CAP_CBS |
+ TSN_CAP_CB | TSN_CAP_TBS | TSN_CAP_CTH;
+}
+
+static void felix_get_basetime(struct ocelot *ocelot, ptptime_t basetime,
+ u64 cycle_time, struct timespec64 *ts_base)
+{
+ ptptime_t new_basetime;
+ ptptime_t cur_time;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, ts_base);
+ cur_time = ts_base->tv_sec * NSEC_PER_SEC + ts_base->tv_nsec;
+
+ new_basetime = basetime;
+ if (cur_time > basetime) {
+ u64 nr_of_cycles = cur_time - basetime;
+
+ do_div(nr_of_cycles, cycle_time);
+ new_basetime += cycle_time * (nr_of_cycles + 1);
+ }
+
+ ts_base->tv_sec = new_basetime / NSEC_PER_SEC;
+ ts_base->tv_nsec = new_basetime % NSEC_PER_SEC;
+}
+
+static int felix_tas_gcl_set(struct ocelot *ocelot, const u8 gcl_ix,
+ struct tsn_qbv_entry *control_list)
+{
+ if (gcl_ix >= capa.num_tas_gcl) {
+ dev_err(ocelot->dev, "Invalid gcl ix %u\n", gcl_ix);
+ return -EINVAL;
+ }
+ if (control_list->time_interval < capa.tas_it_min ||
+ control_list->time_interval > capa.tas_it_max) {
+ dev_err(ocelot->dev, "Invalid time_interval %u\n",
+ control_list->time_interval);
+
+ return -EINVAL;
+ }
+
+ ocelot_write(ocelot,
+ QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(gcl_ix) |
+ QSYS_GCL_CFG_REG_1_GATE_STATE(control_list->gate_state),
+ QSYS_GCL_CFG_REG_1);
+
+ ocelot_write(ocelot,
+ control_list->time_interval,
+ QSYS_GCL_CFG_REG_2);
+
+ return 0;
+}
+
+static u32 felix_tas_read_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, QSYS_TAS_PARAM_CFG_CTRL);
+}
+
+static int felix_qbv_set(struct net_device *ndev,
+ struct tsn_qbv_conf *shaper_config)
+{
+ struct tsn_qbv_basic *admin_basic = &shaper_config->admin;
+ struct tsn_qbv_entry *control_list = admin_basic->control_list;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 ts_base;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int ret = 0;
+ int i, port;
+ u32 val;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ if (admin_basic->control_list_length > capa.num_tas_gcl) {
+ dev_err(ocelot->dev,
+ "Invalid admin_control_list_length %u\n",
+ admin_basic->control_list_length);
+ return -EINVAL;
+ }
+
+ if ((admin_basic->cycle_time < capa.tas_ct_min ||
+ admin_basic->cycle_time > capa.tas_ct_max) &&
+ shaper_config->gate_enabled) {
+ dev_err(ocelot->dev, "Invalid admin_cycle_time %u ns\n",
+ admin_basic->cycle_time);
+ return -EINVAL;
+ }
+ if (admin_basic->cycle_time_extension > capa.tas_cte_max) {
+ dev_err(ocelot->dev,
+ "Invalid admin_cycle_time_extension %u\n",
+ admin_basic->cycle_time_extension);
+ return -EINVAL;
+ }
+
+ ocelot_port = ocelot->ports[port];
+ ocelot_port->base_time = admin_basic->base_time;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ felix_get_basetime(ocelot, admin_basic->base_time,
+ admin_basic->cycle_time, &ts_base);
+
+ /* Select port */
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q |
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+ }
+
+ if (!shaper_config->gate_enabled)
+ admin_basic->gate_states = 0xff;
+
+ ocelot_rmw_rix(ocelot,
+ (shaper_config->gate_enabled ? QSYS_TAG_CONFIG_ENABLE : 0) |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(admin_basic->gate_states) |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(0xff),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
+ QSYS_TAG_CONFIG,
+ port);
+
+ if (shaper_config->maxsdu) {
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_0, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_1, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_2, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_3, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_4, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_5, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_6, port);
+ ocelot_write_rix(ocelot, shaper_config->maxsdu,
+ QSYS_QMAXSDU_CFG_7, port);
+ }
+
+ if (shaper_config->gate_enabled) {
+ ocelot_write(ocelot, ts_base.tv_nsec,
+ QSYS_PARAM_CFG_REG_1);
+
+ ocelot_write(ocelot, lower_32_bits(ts_base.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+
+ val = upper_32_bits(ts_base.tv_sec);
+ ocelot_write(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val) |
+ QSYS_PARAM_CFG_REG_3_LIST_LENGTH(admin_basic->control_list_length),
+ QSYS_PARAM_CFG_REG_3);
+
+ ocelot_write(ocelot, admin_basic->cycle_time,
+ QSYS_PARAM_CFG_REG_4);
+
+ ocelot_write(ocelot, admin_basic->cycle_time_extension,
+ QSYS_PARAM_CFG_REG_5);
+
+ for (i = 0; i < admin_basic->control_list_length; i++) {
+ felix_tas_gcl_set(ocelot, i, control_list);
+ control_list++;
+ }
+
+ /* Start configuration change */
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ ret = readx_poll_timeout(felix_tas_read_status, ocelot, val,
+ !(QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE
+ & val), 10, 100000);
+ }
+
+ mutex_unlock(&ocelot->tas_lock);
+
+ return ret;
+}
+
+static int felix_qbv_get(struct net_device *ndev, struct tsn_qbv_conf *shaper_config)
+{
+ struct tsn_qbv_basic *admin = &shaper_config->admin;
+ struct tsn_qbv_entry *list;
+ u32 base_timel, base_timeh;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 val, reg;
+ int i, port;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ shaper_config->maxsdu = ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ shaper_config->gate_enabled = (val & QSYS_TAG_CONFIG_ENABLE);
+ admin->gate_states = QSYS_TAG_CONFIG_INIT_GATE_STATE_X(val);
+
+ base_timel = ocelot_read(ocelot, QSYS_PARAM_CFG_REG_1);
+ base_timeh = ocelot_read(ocelot, QSYS_PARAM_CFG_REG_2);
+ reg = ocelot_read(ocelot, QSYS_PARAM_CFG_REG_3);
+ admin->base_time = base_timeh |
+ (((u64)QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(reg)) << 32);
+
+ admin->base_time = (admin->base_time * 1000000000) + base_timel;
+
+ admin->control_list_length =
+ QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(reg);
+
+ admin->cycle_time = ocelot_read(ocelot, QSYS_PARAM_CFG_REG_4);
+ admin->cycle_time_extension =
+ ocelot_read(ocelot, QSYS_PARAM_CFG_REG_5);
+
+ list = kmalloc_array(admin->control_list_length,
+ sizeof(struct tsn_qbv_entry), GFP_KERNEL);
+ if (!list) {
+ mutex_unlock(&ocelot->tas_lock);
+ return -ENOMEM;
+ }
+
+ admin->control_list = list;
+
+ for (i = 0; i < admin->control_list_length; i++) {
+ ocelot_rmw(ocelot,
+ QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(i),
+ QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M,
+ QSYS_GCL_CFG_REG_1);
+
+ list->time_interval =
+ ocelot_read(ocelot, QSYS_GCL_CFG_REG_2);
+
+ reg = ocelot_read(ocelot, QSYS_GCL_CFG_REG_1);
+ list->gate_state = QSYS_GCL_CFG_REG_1_GATE_STATE_X(reg);
+
+ list++;
+ }
+
+ mutex_unlock(&ocelot->tas_lock);
+
+ return 0;
+}
+
+static int felix_qbv_get_gatelist(struct ocelot *ocelot,
+ struct tsn_qbv_basic *oper)
+{
+ u32 base_timel;
+ u32 base_timeh;
+ u32 val;
+ struct tsn_qbv_entry *glist;
+ int i;
+
+ base_timel = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_1);
+ base_timeh = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_2);
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_3);
+ oper->base_time = base_timeh;
+ oper->base_time +=
+ ((u64)QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(val)) <<
+ 32;
+ oper->base_time = (oper->base_time * 1000000000) + base_timel;
+
+ oper->control_list_length =
+ QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(val);
+
+ oper->cycle_time = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_4);
+ oper->cycle_time_extension = ocelot_read(ocelot,
+ QSYS_PARAM_STATUS_REG_5);
+
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ oper->gate_states = QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(val);
+
+ glist = kmalloc_array(oper->control_list_length,
+ sizeof(struct tsn_qbv_entry), GFP_KERNEL);
+ if (!glist)
+ return -ENOMEM;
+
+ oper->control_list = glist;
+
+ for (i = 0; i < oper->control_list_length; i++) {
+ ocelot_rmw(ocelot,
+ QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(i),
+ QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M,
+ QSYS_GCL_STATUS_REG_1);
+
+ val = ocelot_read(ocelot, QSYS_GCL_STATUS_REG_2);
+ glist->time_interval = val;
+ val = ocelot_read(ocelot, QSYS_GCL_STATUS_REG_1);
+ glist->gate_state =
+ QSYS_GCL_STATUS_REG_1_GATE_STATE_X(val);
+
+ glist++;
+ }
+
+ return 0;
+}
+
+static int felix_qbv_get_status(struct net_device *ndev,
+ struct tsn_qbv_status *qbvstatus)
+{
+ struct tsn_qbv_basic *oper = &qbvstatus->oper;
+ struct ocelot *ocelot;
+ struct timespec64 ts;
+ struct dsa_port *dp;
+ ptptime_t cur_time;
+ int port;
+ u32 val;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ qbvstatus->supported_list_max = capa.num_tas_gcl;
+
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ qbvstatus->config_pending =
+ (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) ? 1 : 0;
+
+ qbvstatus->config_change_time =
+ ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_7);
+
+ qbvstatus->config_change_time +=
+ ((u64)QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(val)) <<
+ 32;
+
+ qbvstatus->config_change_time =
+ (qbvstatus->config_change_time * 1000000000) +
+ ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_6);
+
+ qbvstatus->config_change_error =
+ ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_9);
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ cur_time = ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+
+ qbvstatus->current_time = cur_time;
+ felix_qbv_get_gatelist(ocelot, oper);
+
+ mutex_unlock(&ocelot->tas_lock);
+
+ return 0;
+}
+
+static int felix_qbu_set(struct net_device *ndev, u8 preemptible)
+{
+ struct ocelot_port *ocelot_port;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int port;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ ocelot_port = ocelot->ports[port];
+
+ ocelot_port_rmwl(ocelot_port,
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA |
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA,
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA |
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA,
+ DEV_MM_ENABLE_CONFIG);
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_PREEMPTION_CFG_P_QUEUES(preemptible),
+ QSYS_PREEMPTION_CFG_P_QUEUES_M,
+ QSYS_PREEMPTION_CFG,
+ port);
+
+ return 0;
+}
+
+static int felix_qbu_get(struct net_device *ndev, struct tsn_preempt_status *c)
+{
+ struct ocelot_port *ocelot_port;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int port;
+ u32 val;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ ocelot_port = ocelot->ports[port];
+
+ val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
+
+ c->admin_state = QSYS_PREEMPTION_CFG_P_QUEUES(val);
+ c->hold_advance = QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(val);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
+ c->preemption_active =
+ DEV_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS & val;
+
+ return 0;
+}
+
+static int felix_stream_table_add(u32 index, const unsigned char mac[ETH_ALEN],
+ int vid, u8 dst_idx, u8 handle)
+{
+ struct stream_filter *stream, *tmp;
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &streamtable) {
+ tmp = list_entry(pos, struct stream_filter, list);
+ if (tmp->index == index) {
+ ether_addr_copy(tmp->mac, mac);
+ tmp->vid = vid;
+ tmp->dst_idx = dst_idx;
+ tmp->handle = handle;
+ return 0;
+ }
+ if (tmp->index > index)
+ break;
+ }
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->index = index;
+ ether_addr_copy(stream->mac, mac);
+ stream->vid = vid;
+ stream->dst_idx = dst_idx;
+ stream->handle = handle;
+ list_add(&stream->list, pos->prev);
+
+ return 0;
+}
+
+static void felix_stream_table_del(u32 index)
+{
+ struct stream_filter *tmp;
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &streamtable) {
+ tmp = list_entry(pos, struct stream_filter, list);
+ if (tmp->index == index) {
+ list_del(pos);
+ kfree(tmp);
+ break;
+ }
+ }
+}
+
+static struct stream_filter *felix_stream_table_get(u32 index)
+{
+ struct stream_filter *tmp;
+
+ list_for_each_entry(tmp, &streamtable, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static int felix_streamid_force_forward_clear(struct ocelot *ocelot, u8 port)
+{
+ struct stream_filter *tmp;
+
+ if (port >= ocelot->num_phys_ports)
+ return 0;
+
+ list_for_each_entry(tmp, &streamtable, list)
+ if (tmp->dst_idx == port)
+ return 0;
+
+ ocelot_bridge_force_forward_port(ocelot, port, false);
+
+ return 0;
+}
+
+static int felix_cb_streamid_set(struct net_device *ndev, u32 index, bool enable,
+ struct tsn_cb_streamid *streamid)
+{
+ enum macaccess_entry_type type;
+ struct stream_filter *stream;
+ unsigned char mac[ETH_ALEN];
+ struct ocelot *ocelot;
+ int sfid, ssid, port;
+ struct dsa_port *dp;
+ u32 dst_idx;
+ u16 vid;
+ int ret;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ if (index >= FELIX_STREAM_NUM) {
+ dev_err(ocelot->dev, "Invalid index %u, maximum:%u\n",
+ index, FELIX_STREAM_NUM - 1);
+ return -EINVAL;
+ }
+
+ if (!enable) {
+ stream = felix_stream_table_get(index);
+ if (!stream)
+ return -EINVAL;
+
+ ocelot_mact_forget(ocelot, stream->mac, stream->vid);
+ felix_stream_table_del(index);
+
+ felix_streamid_force_forward_clear(ocelot, stream->dst_idx);
+
+ return 0;
+ }
+
+ if (streamid->type != 1) {
+ dev_err(ocelot->dev, "Invalid stream type\n");
+ return -EINVAL;
+ }
+
+ if (streamid->handle >= FELIX_PSFP_SFID_NUM) {
+ dev_err(ocelot->dev,
+ "Invalid stream handle %u, maximum:%u\n",
+ streamid->handle, FELIX_PSFP_SFID_NUM - 1);
+ return -EINVAL;
+ }
+
+ sfid = streamid->handle;
+ ssid = (streamid->handle < FELIX_FRER_SSID_NUM ?
+ streamid->handle : (FELIX_FRER_SSID_NUM - 1));
+
+ u64_to_ether_addr(streamid->para.nid.dmac, mac);
+ vid = streamid->para.nid.vid;
+
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ if (ret == -ENOENT) {
+ /* The MAC table doesn't contain this entry, learn it as static
+ * and annotate it with a SSID and a SFID.
+ */
+ ret = ocelot_mact_learn_streamdata(ocelot, port, mac, vid,
+ ENTRYTYPE_LOCKED, sfid,
+ ssid);
+ if (ret)
+ return ret;
+
+ return felix_stream_table_add(index, mac, vid, port,
+ streamid->handle);
+ }
+
+ if (type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+ if (ret)
+ return ret;
+
+ return felix_stream_table_add(index, mac, vid, dst_idx,
+ streamid->handle);
+}
+
+static int felix_cb_streamid_get(struct net_device *ndev, u32 index,
+ struct tsn_cb_streamid *streamid)
+{
+ enum macaccess_entry_type type;
+ struct stream_filter *stream;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 dst, fwdmask;
+ int ret;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index >= FELIX_STREAM_NUM) {
+ dev_err(ocelot->dev,
+ "Invalid stream handle %u, maximum:%u\n",
+ index, FELIX_STREAM_NUM - 1);
+ return -EINVAL;
+ }
+
+ stream = felix_stream_table_get(index);
+ if (!stream)
+ return -EINVAL;
+
+ ret = ocelot_mact_lookup(ocelot, &dst, stream->mac, stream->vid, &type);
+ if (ret)
+ return ret;
+
+ streamid->type = type;
+
+ fwdmask = ocelot_read_rix(ocelot, ANA_PGID_PGID, dst);
+ streamid->ofac_oport = ANA_PGID_PGID_PGID(fwdmask);
+
+ streamid->para.nid.dmac = ether_addr_to_u64(stream->mac);
+ streamid->para.nid.vid = stream->vid;
+
+ streamid->handle = stream->handle;
+
+ return 0;
+}
+
+static int felix_cb_streamid_counters_get(struct net_device *ndev, u32 index,
+ struct tsn_cb_streamid_counters *sc)
+{
+ return 0;
+}
+
+static int felix_qci_sfi_set(struct net_device *ndev, u32 index, bool enable,
+ struct tsn_qci_psfp_sfi_conf *sfi)
+{
+ int igr_prio = sfi->priority_spec;
+ u16 sgid = sfi->stream_gate_instance_id;
+ int fmid = sfi->stream_filter.flow_meter_instance_id;
+ u16 max_sdu_len = sfi->stream_filter.maximum_sdu_size;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int sfid = index;
+ int i, port;
+ u16 pol_idx;
+ u32 val;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ if (fmid == -1)
+ pol_idx = capa.psfp_fmi_max;
+ else
+ pol_idx = (u16)fmid;
+
+ if (!enable) {
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+ return 0;
+ }
+
+ /* Port default SFID set */
+ if (sfi->stream_handle_spec < 0) {
+ val = ANA_PORT_SFID_CFG_SFID_VALID |
+ ANA_PORT_SFID_CFG_SFID(sfid);
+ if (igr_prio < 0) {
+ for (i = 0; i < OCELOT_NUM_TC; i++)
+ ocelot_write_ix(ocelot, val, ANA_PORT_SFID_CFG,
+ port, i);
+ } else {
+ ocelot_write_ix(ocelot, val, ANA_PORT_SFID_CFG,
+ port, igr_prio & 0x7);
+ }
+ } else if (index != sfi->stream_handle_spec) {
+ dev_err(ocelot->dev, "Index must equal to streamHandle\n");
+ return -EINVAL;
+ }
+
+ if (sgid >= capa.num_psfp_sgid) {
+ dev_err(ocelot->dev, "Invalid sgid %u, maximum:%u\n",
+ sgid, capa.num_psfp_sgid);
+ return -EINVAL;
+ }
+ if (pol_idx > capa.psfp_fmi_max || pol_idx < capa.psfp_fmi_min) {
+ dev_err(ocelot->dev, "Invalid pol_idx %u, range:%d~%d\n",
+ pol_idx, capa.psfp_fmi_min, capa.psfp_fmi_max);
+ return -EINVAL;
+ }
+
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SGID_VALID |
+ ANA_TABLES_SFIDTIDX_SGID(sgid) |
+ ((fmid != -1) ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(pol_idx) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ((igr_prio >= 0) ?
+ ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(igr_prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(max_sdu_len) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return 0;
+}
+
+static int felix_qci_sfi_get(struct net_device *ndev, u32 index,
+ struct tsn_qci_psfp_sfi_conf *sfi)
+{
+ u32 val, reg, fmeter_id, max_sdu;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 sfid = index;
+ int enable = 1;
+ int i, port;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ if (sfid >= capa.num_psfp_sfid) {
+ dev_err(ocelot->dev, "Invalid index %u, maximum:%u\n",
+ sfid, capa.num_psfp_sfid);
+ return -EINVAL;
+ }
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_READ),
+ ANA_TABLES_SFIDACCESS);
+
+ val = ocelot_read(ocelot, ANA_TABLES_SFIDTIDX);
+ if (!(val & ANA_TABLES_SFIDTIDX_SGID_VALID)) {
+ enable = 0;
+ return enable;
+ }
+
+ sfi->stream_gate_instance_id = ANA_TABLES_SFIDTIDX_SGID_X(val);
+ fmeter_id = ANA_TABLES_SFIDTIDX_POL_IDX_X(val);
+ sfi->stream_filter.flow_meter_instance_id = fmeter_id;
+
+ reg = ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+ max_sdu = ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(reg);
+ sfi->stream_filter.maximum_sdu_size = max_sdu;
+
+ if (reg & ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA)
+ sfi->priority_spec = ANA_TABLES_SFIDACCESS_IGR_PRIO_X(reg);
+ else
+ dev_err(ocelot->dev, "priority not enable\n");
+
+ for (i = 0; i < OCELOT_NUM_TC; i++) {
+ val = ocelot_read_ix(ocelot, ANA_PORT_SFID_CFG, port, i);
+ if ((val & ANA_PORT_SFID_CFG_SFID_VALID) &&
+ sfid == ANA_PORT_SFID_CFG_SFID(val)) {
+ sfi->stream_handle_spec = -1;
+ return enable;
+ }
+ }
+
+ sfi->stream_handle_spec = sfid;
+ return enable;
+}
+
+static int felix_qci_sfi_counters_get(struct net_device *ndev, u32 index,
+ struct tsn_qci_psfp_sfi_counters *sfi_cnt)
+{
+ u32 match, not_pass, not_pass_sdu, red;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 sfid = index;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (sfid >= capa.num_psfp_sfid) {
+ dev_err(ocelot->dev, "Invalid index %u, maximum:%u\n",
+ sfid, capa.num_psfp_sfid);
+ return -EINVAL;
+ }
+
+ ocelot_rmw(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
+ not_pass = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
+ not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
+ red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
+
+ sfi_cnt->matching_frames_count = match;
+ sfi_cnt->not_passing_frames_count = not_pass;
+ sfi_cnt->not_passing_sdu_count = not_pass_sdu;
+ sfi_cnt->red_frames_count = red;
+
+ sfi_cnt->passing_frames_count = match - not_pass;
+ sfi_cnt->passing_sdu_count = match - not_pass - not_pass_sdu;
+
+ return 0;
+}
+
+static int felix_qci_max_cap_get(struct net_device *ndev,
+ struct tsn_qci_psfp_stream_param *stream_para)
+{
+ /* MaxStreamFilterInstances */
+ stream_para->max_sf_instance = capa.num_psfp_sfid;
+ /* MaxStreamGateInstances */
+ stream_para->max_sg_instance = capa.num_psfp_sgid;
+ /* MaxFlowMeterInstances */
+ stream_para->max_fm_instance = capa.psfp_fmi_max -
+ capa.psfp_fmi_min + 1;
+ /* SupportedListMax */
+ stream_para->supported_list_max = capa.num_sgi_gcl;
+
+ return 0;
+}
+
+static int felix_sgi_set_glist(struct ocelot *ocelot,
+ struct tsn_qci_psfp_gcl *gcl, uint32_t num)
+{
+ u32 time_sum = 0;
+ int i;
+
+ if (num > capa.num_sgi_gcl)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ u32 val = ANA_SG_GCL_GS_CONFIG_IPS((gcl->ipv < 0) ?
+ 0 : gcl->ipv + 8);
+ val |= (gcl->gate_state ? ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0);
+ ocelot_write_rix(ocelot, val, ANA_SG_GCL_GS_CONFIG, i);
+
+ time_sum += gcl->time_interval;
+ ocelot_write_rix(ocelot, time_sum, ANA_SG_GCL_TI_CONFIG, i);
+
+ gcl++;
+ }
+
+ return 0;
+}
+
+static u32 felix_sgi_read_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int felix_qci_sgi_set(struct net_device *ndev, u32 index,
+ struct tsn_qci_psfp_sgi_conf *sgi_conf)
+{
+ struct tsn_qci_sg_control *admin_list = &sgi_conf->admin;
+ u32 list_length = sgi_conf->admin.control_list_length;
+ u32 cycle_time = sgi_conf->admin.cycle_time;
+ u32 cycle_time_ex = sgi_conf->admin.cycle_time_extension;
+ struct timespec64 ts_base;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 sgid = index;
+ int ret;
+ u32 val;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (sgid >= capa.num_psfp_sgid) {
+ dev_err(ocelot->dev, "Invalid sgid %u, maximum:%u\n",
+ sgid, capa.num_psfp_sgid);
+ return -EINVAL;
+ }
+ if ((cycle_time < capa.sgi_ct_min ||
+ cycle_time > capa.sgi_ct_max) &&
+ sgi_conf->gate_enabled) {
+ dev_err(ocelot->dev, "Invalid cycle_time %u ns\n",
+ cycle_time);
+ return -EINVAL;
+ }
+ if (cycle_time_ex > capa.sgi_cte_max) {
+ dev_err(ocelot->dev,
+ "Invalid cycle_time_extension %u\n",
+ cycle_time_ex);
+ return -EINVAL;
+ }
+ if (list_length > capa.num_sgi_gcl) {
+ dev_err(ocelot->dev,
+ "Invalid sgi_gcl len %u, maximum:%u\n",
+ list_length, capa.num_sgi_gcl);
+ return -EINVAL;
+ }
+
+ /* configure SGID */
+ ocelot_rmw(ocelot,
+ ANA_SG_ACCESS_CTRL_SGID(sgid),
+ ANA_SG_ACCESS_CTRL_SGID_M,
+ ANA_SG_ACCESS_CTRL);
+
+ /* Disable SG */
+ if (!sgi_conf->gate_enabled) {
+ ocelot_rmw(ocelot,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+ return 0;
+ }
+
+ felix_get_basetime(ocelot, sgi_conf->admin.base_time,
+ sgi_conf->admin.cycle_time, &ts_base);
+
+ ocelot_write(ocelot, ts_base.tv_nsec, ANA_SG_CONFIG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(ts_base.tv_sec),
+ ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(ts_base.tv_sec);
+ ocelot_write(ocelot,
+ (sgi_conf->admin.init_ipv < 0 ?
+ 0 : ANA_SG_CONFIG_REG_3_IPV_VALID) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi_conf->admin.init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(list_length) |
+ (sgi_conf->admin.gate_states > 0 ?
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE : 0) |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, cycle_time, ANA_SG_CONFIG_REG_4);
+ ocelot_write(ocelot, cycle_time_ex, ANA_SG_CONFIG_REG_5);
+
+ ret = felix_sgi_set_glist(ocelot, admin_list->gcl, list_length);
+ if (ret < 0)
+ return ret;
+
+ /* Start configuration change */
+ ocelot_rmw(ocelot,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ ret = readx_poll_timeout(felix_sgi_read_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+
+ return ret;
+}
+
+static int felix_sgi_get_glist(struct ocelot *ocelot,
+ struct tsn_qci_psfp_gcl *gcl,
+ uint32_t num)
+{
+ u32 time = 0;
+ u32 reg;
+ u16 val;
+ int i;
+
+ if (num > capa.num_sgi_gcl)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ val = ocelot_read_rix(ocelot, ANA_SG_GCL_GS_CONFIG, i);
+ gcl->gate_state = (val & ANA_SG_GCL_GS_CONFIG_GATE_STATE);
+
+ if (val & ANA_SG_GCL_GS_CONFIG_IPV_VALID)
+ gcl->ipv = ANA_SG_GCL_GS_CONFIG_IPV(val);
+ else
+ gcl->ipv = -1;
+
+ reg = ocelot_read_rix(ocelot, ANA_SG_GCL_TI_CONFIG, i);
+ gcl->time_interval = (reg - time);
+ time = reg;
+
+ gcl++;
+ }
+
+ return 0;
+}
+
+static int felix_qci_sgi_get(struct net_device *ndev, u32 index,
+ struct tsn_qci_psfp_sgi_conf *sgi_conf)
+{
+ struct tsn_qci_sg_control *admin = &sgi_conf->admin;
+ struct tsn_qci_psfp_gcl *glist;
+ u32 val, reg, list_num;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index >= capa.num_psfp_sgid) {
+ dev_err(ocelot->dev, "Invalid sgid %u, maximum:%u\n",
+ index, capa.num_psfp_sgid);
+ return -EINVAL;
+ }
+
+ ocelot_rmw(ocelot,
+ ANA_SG_ACCESS_CTRL_SGID(index),
+ ANA_SG_ACCESS_CTRL_SGID_M,
+ ANA_SG_ACCESS_CTRL);
+
+ admin->cycle_time = ocelot_read(ocelot, ANA_SG_CONFIG_REG_4);
+ admin->cycle_time_extension =
+ ocelot_read(ocelot, ANA_SG_CONFIG_REG_5);
+
+ val = ocelot_read(ocelot, ANA_SG_CONFIG_REG_2);
+ admin->base_time = val;
+
+ reg = ocelot_read(ocelot, ANA_SG_CONFIG_REG_1);
+ val = ocelot_read(ocelot, ANA_SG_CONFIG_REG_3);
+
+ admin->base_time +=
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val) << 32;
+
+ admin->base_time = admin->base_time * 1000000000 + reg;
+
+ if (val & ANA_SG_CONFIG_REG_3_IPV_VALID)
+ admin->init_ipv = ANA_SG_CONFIG_REG_3_INIT_IPV_X(val);
+ else
+ admin->init_ipv = -1;
+
+ if (val & ANA_SG_CONFIG_REG_3_GATE_ENABLE)
+ sgi_conf->gate_enabled = 1;
+
+ admin->control_list_length = ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(val);
+
+ list_num = admin->control_list_length;
+
+ glist = kmalloc_array(list_num, sizeof(struct tsn_qci_psfp_gcl),
+ GFP_KERNEL);
+ if (!glist)
+ return -ENOMEM;
+
+ admin->gcl = glist;
+
+ return felix_sgi_get_glist(ocelot, glist, list_num);
+}
+
+static int felix_qci_sgi_status_get(struct net_device *ndev, u16 index,
+ struct tsn_psfp_sgi_status *sgi_status)
+{
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 val, reg;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index >= capa.num_psfp_sgid) {
+ dev_err(ocelot->dev, "Invalid sgid %u, maximum:%u\n",
+ index, capa.num_psfp_sgid);
+ return -EINVAL;
+ }
+
+ ocelot_rmw(ocelot,
+ ANA_SG_ACCESS_CTRL_SGID(index),
+ ANA_SG_ACCESS_CTRL_SGID_M,
+ ANA_SG_ACCESS_CTRL);
+
+ val = ocelot_read(ocelot, ANA_SG_STATUS_REG_2);
+ sgi_status->config_change_time = val;
+
+ reg = ocelot_read(ocelot, ANA_SG_STATUS_REG_1);
+ val = ocelot_read(ocelot, ANA_SG_STATUS_REG_3);
+ sgi_status->config_change_time +=
+ ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(val) << 32;
+ sgi_status->config_change_time =
+ sgi_status->config_change_time * 1000000000 + reg;
+
+ if (val & ANA_SG_STATUS_REG_3_CONFIG_PENDING)
+ sgi_status->config_pending = 1;
+ else
+ sgi_status->config_pending = 0;
+
+ if (val & ANA_SG_STATUS_REG_3_GATE_STATE)
+ sgi_status->oper.gate_states = 1;
+ else
+ sgi_status->oper.gate_states = 0;
+ /*bit 3 encoding 0:IPV [0:2]is invalid . 1:IPV[0:2] is valid*/
+ if (val & ANA_SG_STATUS_REG_3_IPV_VALID)
+ sgi_status->oper.init_ipv = ANA_SG_STATUS_REG_3_IPV_X(val);
+ else
+ sgi_status->oper.init_ipv = -1;
+
+ return 0;
+}
+
+static int felix_qci_fmi_set(struct net_device *ndev, u32 index,
+ bool enable, struct tsn_qci_psfp_fmi *fmi)
+{
+ u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
+ bool cir_discard = 0, pir_discard = 0;
+ u32 pbs_max = 0, cbs_max = 0;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 cir_ena = 0;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index > capa.qos_pol_max) {
+ dev_err(ocelot->dev, "Invalid pol_idx %u, maximum: %u\n",
+ index, capa.qos_pol_max);
+ return -EINVAL;
+ }
+
+ if (fmi->mark_red_enable && fmi->mark_red) {
+ fmi->eir = 0;
+ fmi->ebs = 0;
+ fmi->cir = 0;
+ fmi->cbs = 0;
+ }
+
+ pir = fmi->eir;
+ pbs = fmi->ebs;
+
+ if (!fmi->drop_on_yellow)
+ cir_ena = 1;
+
+ if (cir_ena) {
+ cir = fmi->cir;
+ cbs = fmi->cbs;
+ if (cir == 0 && cbs == 0) {
+ cir_discard = 1;
+ } else {
+ cir = DIV_ROUND_UP(cir, 100);
+ cir *= 3; /* Rate unit is 33 1/3 kbps */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ cbs = (cbs ? cbs : 1);
+ cbs_max = capa.pol_cbs_max;
+ if (fmi->cf)
+ pir += fmi->cir;
+ }
+ }
+
+ if (pir == 0 && pbs == 0) {
+ pir_discard = 1;
+ } else {
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* Rate unit is 33 1/3 kbps */
+ pbs = DIV_ROUND_UP(pbs, 4096);
+ pbs = (pbs ? pbs : 1);
+ pbs_max = capa.pol_pbs_max;
+ }
+ pir = min_t(u32, GENMASK(15, 0), pir);
+ cir = min_t(u32, GENMASK(15, 0), cir);
+ pbs = min(pbs_max, pbs);
+ cbs = min(cbs_max, cbs);
+
+ ocelot_write_gix(ocelot, (ANA_POL_MODE_CFG_IPG_SIZE(20) |
+ ANA_POL_MODE_CFG_FRM_MODE(1) |
+ (fmi->cf ? ANA_POL_MODE_CFG_DLB_COUPLED : 0) |
+ (cir_ena ? ANA_POL_MODE_CFG_CIR_ENA : 0) |
+ ANA_POL_MODE_CFG_OVERSHOOT_ENA),
+ ANA_POL_MODE_CFG, index);
+
+ ocelot_write_gix(ocelot, ANA_POL_PIR_CFG_PIR_RATE(pir) |
+ ANA_POL_PIR_CFG_PIR_BURST(pbs),
+ ANA_POL_PIR_CFG, index);
+
+ ocelot_write_gix(ocelot,
+ (pir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_PIR_STATE, index);
+
+ ocelot_write_gix(ocelot, ANA_POL_CIR_CFG_CIR_RATE(cir) |
+ ANA_POL_CIR_CFG_CIR_BURST(cbs),
+ ANA_POL_CIR_CFG, index);
+
+ ocelot_write_gix(ocelot,
+ (cir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_CIR_STATE, index);
+
+ return 0;
+}
+
+static int felix_qci_fmi_get(struct net_device *ndev, u32 index,
+ struct tsn_qci_psfp_fmi *fmi,
+ struct tsn_qci_psfp_fmi_counters *counters)
+{
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 val, reg;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index > capa.qos_pol_max) {
+ dev_err(ocelot->dev, "Invalid pol_idx %u, maximum: %u\n",
+ index, capa.qos_pol_max);
+ return -EINVAL;
+ }
+
+ val = ocelot_read_gix(ocelot, ANA_POL_PIR_CFG, index);
+ reg = ocelot_read_gix(ocelot, ANA_POL_CIR_CFG, index);
+
+ fmi->eir = ANA_POL_PIR_CFG_PIR_RATE_X(val);
+ fmi->eir = fmi->eir * 100 / 3;
+ fmi->ebs = ANA_POL_PIR_CFG_PIR_BURST(val);
+ fmi->ebs *= 4096;
+ fmi->cir = ANA_POL_CIR_CFG_CIR_RATE_X(reg);
+ fmi->cir = fmi->cir * 100 / 3;
+ fmi->cbs = ANA_POL_CIR_CFG_CIR_BURST(reg);
+ fmi->cbs *= 4096;
+ if (!(fmi->eir | fmi->ebs | fmi->cir | fmi->cbs))
+ fmi->mark_red = 1;
+ else
+ fmi->mark_red = 0;
+
+ val = ocelot_read_gix(ocelot, ANA_POL_MODE_CFG, index);
+ if (val & ANA_POL_MODE_CFG_DLB_COUPLED)
+ fmi->cf = 1;
+ else
+ fmi->cf = 0;
+ if (val & ANA_POL_MODE_CFG_CIR_ENA)
+ fmi->drop_on_yellow = 0;
+ else
+ fmi->drop_on_yellow = 1;
+
+ return 0;
+}
+
+static int felix_qos_shaper_conf_set(struct ocelot *ocelot, int idx,
+ u8 percent, int speed)
+{
+ u32 cbs = 0;
+ u32 cir = 0;
+
+ if (percent > 100) {
+ dev_err(ocelot->dev, "percentage %d larger than 100\n",
+ percent);
+ return -EINVAL;
+ }
+ if (idx >= capa.num_hsch) {
+ dev_err(ocelot->dev,
+ "CIR_CFG: id %d is exceed num of HSCH instance\n",
+ idx);
+ return -EINVAL;
+ }
+
+ switch (speed) {
+ case SPEED_10:
+ cir = 10000;
+ break;
+ case SPEED_100:
+ cir = 100000;
+ break;
+ case SPEED_1000:
+ cir = 1000000;
+ break;
+ case SPEED_2500:
+ cir = 2500000;
+ break;
+ }
+
+ cir = cir * percent / 100;
+ cir = DIV_ROUND_UP(cir, 100); /* Rate unit is 100 kbps */
+ cir = (cir ? cir : 1); /* Avoid using zero rate */
+ cbs = DIV_ROUND_UP(cbs, 4096); /* Burst unit is 4kB */
+ cbs = (cbs ? cbs : 1); /* Avoid using zero burst size */
+ cir = min_t(u32, GENMASK(15, 0), cir);
+ cbs = min_t(u32, GENMASK(6, 0), cbs);
+
+ if (!percent) {
+ cir = 0;
+ cbs = 0;
+ }
+
+ ocelot_write_gix(ocelot,
+ QSYS_CIR_CFG_CIR_RATE(cir) |
+ QSYS_CIR_CFG_CIR_BURST(cbs),
+ QSYS_CIR_CFG,
+ idx);
+
+ return 0;
+}
+
+static int felix_cbs_set(struct net_device *ndev, u8 tc, u8 bw)
+{
+ struct ocelot_port *ocelot_port;
+ struct phylink_link_state state;
+ struct phylink_pcs *pcs;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ struct dsa_port *dp;
+ int port, speed;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+ ocelot_port = ocelot->ports[port];
+
+ if (tc > capa.qos_cos_max) {
+ dev_err(ocelot->dev, "Invalid tc: %u\n", tc);
+ return -EINVAL;
+ }
+
+ memset(&state, 0, sizeof(state));
+ state.interface = ocelot_port->phy_mode;
+
+ felix = ocelot_to_felix(ocelot);
+ pcs = felix->pcs[port];
+ pcs->ops->pcs_get_state(pcs, &state);
+
+ speed = state.speed;
+
+ felix_qos_shaper_conf_set(ocelot, port * 8 + tc, bw, speed);
+
+ ocelot_rmw_gix(ocelot,
+ QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG,
+ port * 8 + tc);
+
+ hsch_bw[port * 8 + tc] = bw;
+
+ return 0;
+}
+
+void felix_cbs_reset(struct ocelot *ocelot, int port, int speed)
+{
+ int i, idx;
+
+ for (i = 0; i < OCELOT_NUM_TC; i++) {
+ idx = port * 8 + i;
+ if (hsch_bw[idx] > 0)
+ felix_qos_shaper_conf_set(ocelot, idx, hsch_bw[idx],
+ speed);
+ }
+}
+
+static int felix_cbs_get(struct net_device *ndev, u8 tc)
+{
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int port;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ if (tc > capa.qos_cos_max) {
+ dev_err(ocelot->dev, "Invalid tc: %u\n", tc);
+ return -EINVAL;
+ }
+
+ return hsch_bw[port * 8 + tc];
+}
+
+static int felix_cut_thru_set(struct net_device *ndev, u8 cut_thru)
+{
+ struct ocelot_port *ocelot_port;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int port;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+ ocelot_port = ocelot->ports[port];
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot_port->cut_thru = cut_thru;
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return 0;
+}
+
+static int felix_rtag_parse_enable(struct ocelot *ocelot, u8 port)
+{
+ ocelot_rmw_rix(ocelot,
+ ANA_PORT_MODE_REDTAG_PARSE_CFG,
+ ANA_PORT_MODE_REDTAG_PARSE_CFG,
+ ANA_PORT_MODE,
+ port);
+
+ ocelot_write(ocelot, ETH_P_8021CB, SYS_SR_ETYPE_CFG);
+
+ /* No harm, no foul: we are telling the switch to adjust maximum frame
+ * length for double-tagged VLANs lying that the EtherType for S-Tags
+ * is the one for 802.1CB. This is not an issue because with 802.1CB
+ * traffic, the switch will not parse more than 2 tags anyway, so
+ * either it doesn't support 802.1CB or the second VLAN tag.
+ */
+ ocelot_port_writel(ocelot->ports[port],
+ DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021CB) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ return 0;
+}
+
+static int felix_seq_gen_set(struct net_device *ndev, u32 index,
+ struct tsn_seq_gen_conf *sg_conf)
+{
+ u8 iport_mask = sg_conf->iport_mask;
+ u8 split_mask = sg_conf->split_mask;
+ u8 seq_len = sg_conf->seq_len;
+ u32 seq_num = sg_conf->seq_num;
+ struct stream_filter *tmp;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index >= capa.num_frer_ssid) {
+ dev_err(ocelot->dev, "Invalid SSID %u, maximum:%u\n",
+ index, capa.num_frer_ssid - 1);
+ return -EINVAL;
+ }
+ if (seq_len < capa.frer_seq_len_min ||
+ seq_len > capa.frer_seq_len_max) {
+ dev_err(ocelot->dev,
+ "Invalid seq_space_bits num %u,range:%d~%d\n",
+ seq_len,
+ capa.frer_seq_len_min,
+ capa.frer_seq_len_max);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp, &streamtable, list)
+ if (tmp->handle == index &&
+ tmp->dst_idx < ocelot->num_phys_ports)
+ ocelot_bridge_force_forward_port(ocelot, tmp->dst_idx, true);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SEQ_MASK_SPLIT_MASK(split_mask) |
+ ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(iport_mask),
+ ANA_TABLES_SEQ_MASK);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_STREAMTIDX_S_INDEX(index) |
+ ANA_TABLES_STREAMTIDX_STREAM_SPLIT |
+ ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(seq_len),
+ ANA_TABLES_STREAMTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(seq_num) |
+ ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA |
+ ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_STREAMACCESS);
+
+ return 0;
+}
+
+static int felix_seq_rec_set(struct net_device *ndev, u32 index,
+ struct tsn_seq_rec_conf *sr_conf)
+{
+ u8 seq_len = sr_conf->seq_len;
+ u8 hislen = sr_conf->his_len;
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ int i;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index >= capa.num_frer_ssid) {
+ dev_err(ocelot->dev, "Invalid SSID %u, maximum:%u\n",
+ index, capa.num_frer_ssid - 1);
+ return -EINVAL;
+ }
+ if (seq_len < capa.frer_seq_len_min ||
+ seq_len > capa.frer_seq_len_max) {
+ dev_err(ocelot->dev,
+ "Invalid seq_space_bits num %u,range:%d~%d\n",
+ seq_len,
+ capa.frer_seq_len_min,
+ capa.frer_seq_len_max);
+ return -EINVAL;
+ }
+ if (hislen < capa.frer_his_len_min ||
+ hislen > capa.frer_his_len_max) {
+ dev_err(ocelot->dev,
+ "Invalid history_bits num %u,range:%d~%d\n",
+ hislen,
+ capa.frer_his_len_min,
+ capa.frer_his_len_max);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ocelot->num_phys_ports; i++)
+ felix_rtag_parse_enable(ocelot, i);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_STREAMTIDX_S_INDEX(index) |
+ ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR |
+ ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(hislen) |
+ ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE |
+ (sr_conf->rtag_pop_en ?
+ ANA_TABLES_STREAMTIDX_REDTAG_POP : 0) |
+ ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(seq_len),
+ ANA_TABLES_STREAMTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA |
+ ANA_TABLES_STREAMACCESS_GEN_REC_TYPE |
+ ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_STREAMACCESS);
+
+ return 0;
+}
+
+static int felix_cb_get(struct net_device *ndev, u32 index,
+ struct tsn_cb_status *c)
+{
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 val;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+
+ if (index >= capa.num_frer_ssid) {
+ dev_err(ocelot->dev, "Invalid SSID %u, maximum:%u\n",
+ index, capa.num_frer_ssid - 1);
+ return -EINVAL;
+ }
+
+ ocelot_write(ocelot,
+ ANA_TABLES_STREAMTIDX_S_INDEX(index),
+ ANA_TABLES_STREAMTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(SFIDACCESS_CMD_READ),
+ ANA_TABLES_STREAMACCESS);
+
+ val = ocelot_read(ocelot, ANA_TABLES_STREAMACCESS);
+ c->gen_rec = (ANA_TABLES_STREAMACCESS_GEN_REC_TYPE & val) >> 2;
+ c->seq_num = ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(val);
+
+ val = ocelot_read(ocelot, ANA_TABLES_STREAMTIDX);
+ c->err = ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(val);
+ c->his_len = ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(val);
+ c->seq_len = ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(val);
+
+ val = ocelot_read(ocelot, ANA_TABLES_SEQ_MASK);
+ c->split_mask = ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(val);
+ c->iport_mask = ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(val);
+
+ c->seq_his = ocelot_read(ocelot, ANA_TABLES_SEQ_HISTORY);
+
+ return 0;
+}
+
+static int felix_dscp_set(struct net_device *ndev, bool enable, const u8 dscp_ix,
+ struct tsn_qos_switch_dscp_conf *c)
+{
+ struct ocelot *ocelot;
+ struct dsa_port *dp;
+ u32 ri = dscp_ix;
+ u32 val;
+ int port;
+
+ dp = dsa_port_from_netdev(ndev);
+ ocelot = dp->ds->priv;
+ port = dp->index;
+
+ c->dscp = 0;
+ c->trust = 1;
+ c->remark = 0;
+
+ if (dscp_ix > capa.qos_dscp_max) {
+ dev_err(ocelot->dev, "Invalid dscp_ix %u\n", dscp_ix);
+ return -EINVAL;
+ }
+ if (c->cos > capa.qos_cos_max) {
+ dev_err(ocelot->dev, "Invalid cos %d\n", c->cos);
+ return -EINVAL;
+ }
+ if (c->dpl > capa.qos_dp_max) {
+ dev_err(ocelot->dev, "Invalid dpl %d\n", c->dpl);
+ return -EINVAL;
+ }
+
+ ocelot_rmw_gix(ocelot,
+ (enable ? ANA_PORT_QOS_CFG_QOS_DSCP_ENA : 0) |
+ (c->dscp ? ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA : 0),
+ ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ val = (c->dpl ? ANA_DSCP_CFG_DP_DSCP_VAL : 0) |
+ ANA_DSCP_CFG_QOS_DSCP_VAL(c->cos) |
+ ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(c->dscp) |
+ (c->trust ? ANA_DSCP_CFG_DSCP_TRUST_ENA : 0) |
+ (c->remark ? ANA_DSCP_CFG_DSCP_REWR_ENA : 0);
+
+ ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, ri);
+
+ return 0;
+}
+
+void felix_preempt_irq_clean(struct ocelot *ocelot)
+{
+ struct ocelot_port *ocelot_port;
+ int port;
+ u32 val;
+
+ val = DEV_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY;
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ ocelot_port_rmwl(ocelot_port, val, val, DEV_MM_STATUS);
+ }
+}
+
+static const struct tsn_ops felix_tsn_ops = {
+ .get_capability = felix_tsn_get_cap,
+ .qbv_set = felix_qbv_set,
+ .qbv_get = felix_qbv_get,
+ .qbv_get_status = felix_qbv_get_status,
+ .qbu_set = felix_qbu_set,
+ .qbu_get = felix_qbu_get,
+ .cb_streamid_set = felix_cb_streamid_set,
+ .cb_streamid_get = felix_cb_streamid_get,
+ .cb_streamid_counters_get = felix_cb_streamid_counters_get,
+ .qci_sfi_set = felix_qci_sfi_set,
+ .qci_sfi_get = felix_qci_sfi_get,
+ .qci_sfi_counters_get = felix_qci_sfi_counters_get,
+ .qci_get_maxcap = felix_qci_max_cap_get,
+ .qci_sgi_set = felix_qci_sgi_set,
+ .qci_sgi_get = felix_qci_sgi_get,
+ .qci_sgi_status_get = felix_qci_sgi_status_get,
+ .qci_fmi_set = felix_qci_fmi_set,
+ .qci_fmi_get = felix_qci_fmi_get,
+ .cbs_set = felix_cbs_set,
+ .cbs_get = felix_cbs_get,
+ .ct_set = felix_cut_thru_set,
+ .cbgen_set = felix_seq_gen_set,
+ .cbrec_set = felix_seq_rec_set,
+ .cb_get = felix_cb_get,
+ .dscp_set = felix_dscp_set,
+};
+
+int felix_tsn_enable(struct dsa_switch *ds)
+{
+ struct net_device *dev;
+ struct dsa_port *dp;
+ int port;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ dp = dsa_to_port(ds, port);
+ if (dp->type == DSA_PORT_TYPE_USER) {
+ dev = dp->slave;
+ tsn_port_register(dev, (struct tsn_ops *)&felix_tsn_ops,
+ GROUP_OFFSET_SWITCH);
+ }
+ }
+
+ INIT_LIST_HEAD(&streamtable);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/ocelot/felix_tsn.h b/drivers/net/dsa/ocelot/felix_tsn.h
new file mode 100644
index 000000000000..9895b85de8d9
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix_tsn.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ *
+ * Felix Switch TSN driver
+ *
+ * Copyright 2020-2021 NXP Semiconductors
+ */
+
+#ifndef _MSCC_FELIX_SWITCH_TSN_H_
+#define _MSCC_FELIX_SWITCH_TSN_H_
+
+#include <soc/mscc/ocelot.h>
+#include <net/dsa.h>
+
+void felix_preempt_irq_clean(struct ocelot *ocelot);
+void felix_cbs_reset(struct ocelot *ocelot, int port, int speed);
+int felix_tsn_enable(struct dsa_switch *ds);
+#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index e53ad283e259..9bda4a135946 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -5,8 +5,10 @@
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <net/tc_act/tc_gate.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
@@ -14,9 +16,14 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
+#include "felix_tsn.h"
#include "felix.h"
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_VCAP_POLICER_BASE 63
+#define VSC9959_VCAP_POLICER_MAX 383
+#define VSC9959_SWITCH_PCI_BAR 4
+#define VSC9959_IMDIO_PCI_BAR 0
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
@@ -292,7 +299,7 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
+ REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -340,6 +347,9 @@ static const u32 vsc9959_dev_gmii_regmap[] = {
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
+ REG(DEV_MM_ENABLE_CONFIG, 0x48),
+ REG(DEV_MM_VERIF_CONFIG, 0x4C),
+ REG(DEV_MM_STATUS, 0x50),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
@@ -1022,15 +1032,6 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
*maxuse = val & GENMASK(11, 0);
}
-static const struct ocelot_ops vsc9959_ops = {
- .reset = vsc9959_reset,
- .wm_enc = vsc9959_wm_enc,
- .wm_dec = vsc9959_wm_dec,
- .wm_stat = vsc9959_wm_stat,
- .port_to_netdev = felix_port_to_netdev,
- .netdev_to_port = felix_netdev_to_port,
-};
-
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
@@ -1044,7 +1045,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct lynx_pcs *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
@@ -1094,8 +1095,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phylink_pcs *phylink_pcs;
struct mdio_device *pcs;
- struct lynx_pcs *lynx;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1107,13 +1108,13 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
if (IS_ERR(pcs))
continue;
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
+ phylink_pcs = lynx_pcs_create(pcs);
+ if (!phylink_pcs) {
mdio_device_free(pcs);
continue;
}
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1127,20 +1128,22 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
- if (!pcs)
+ if (!phylink_pcs)
continue;
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
mdiobus_free(felix->imdio);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
- u32 speed)
+ int speed)
{
u8 tas_speed;
@@ -1166,6 +1169,10 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
+
+#ifdef CONFIG_MSCC_FELIX_SWITCH_TSN
+ felix_cbs_reset(ocelot, port, speed);
+#endif
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1208,26 +1215,33 @@ static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
struct tc_taprio_qopt_offload *taprio)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct timespec64 base_ts;
int ret, i;
u32 val;
+ mutex_lock(&ocelot->tas_lock);
+
if (!taprio->enable) {
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
QSYS_TAG_CONFIG_ENABLE |
QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
QSYS_TAG_CONFIG, port);
-
+ mutex_unlock(&ocelot->tas_lock);
return 0;
}
if (taprio->cycle_time > NSEC_PER_SEC ||
- taprio->cycle_time_extension >= NSEC_PER_SEC)
- return -EINVAL;
+ taprio->cycle_time_extension >= NSEC_PER_SEC) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
- return -ERANGE;
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
+ ret = -ERANGE;
+ goto err;
+ }
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
@@ -1248,8 +1262,10 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
* config is pending, need reset the TAS module
*/
val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
- return -EBUSY;
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err;
+ }
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_ENABLE |
@@ -1260,6 +1276,8 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
QSYS_TAG_CONFIG, port);
+ ocelot_port->base_time = taprio->base_time;
+
vsc9959_new_base_time(ocelot, taprio->base_time,
taprio->cycle_time, &base_ts);
ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
@@ -1283,9 +1301,66 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
+err:
+ mutex_unlock(&ocelot->tas_lock);
+
return ret;
}
+static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+{
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+ u64 cycletime;
+ int port;
+ u32 val;
+
+ lockdep_assert_held(&ocelot->tas_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ if (!(val & QSYS_TAG_CONFIG_ENABLE))
+ continue;
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ QSYS_TAG_CONFIG, port);
+
+ cycletime = ocelot_read(ocelot, QSYS_PARAM_CFG_REG_4);
+ ocelot_port = ocelot->ports[port];
+
+ vsc9959_new_base_time(ocelot, ocelot_port->base_time,
+ cycletime, &base_ts);
+
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_rmw(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) |
+ QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ QSYS_TAG_CONFIG, port);
+ }
+}
+
static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *cbs_qopt)
{
@@ -1348,6 +1423,883 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
}
}
+#define VSC9959_PSFP_SFID_MAX 175
+#define VSC9959_PSFP_GATE_ID_MAX 183
+#define VSC9959_PSFP_POLICER_BASE 63
+#define VSC9959_PSFP_POLICER_MAX 383
+#define VSC9959_PSFP_GATE_LIST_NUM 4
+#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
+
+struct felix_stream {
+ struct list_head list;
+ unsigned long id;
+ bool dummy;
+ int ports;
+ int port;
+ u8 dmac[ETH_ALEN];
+ u16 vid;
+ s8 prio;
+ u8 sfid_valid;
+ u8 ssid_valid;
+ u32 sfid;
+ u32 ssid;
+};
+
+struct felix_stream_filter {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+ u8 enable;
+ int portmask;
+ u8 sg_valid;
+ u32 sgid;
+ u8 fm_valid;
+ u32 fmid;
+ u8 prio_valid;
+ u8 prio;
+ u32 maxsdu;
+};
+
+struct felix_stream_filter_counters {
+ u32 match;
+ u32 not_pass_gate;
+ u32 not_pass_sdu;
+ u32 red;
+};
+
+struct felix_stream_gate {
+ u32 index;
+ u8 enable;
+ u8 ipv_valid;
+ u8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletime_ext;
+ u32 num_entries;
+ struct action_gate_entry entries[0];
+};
+
+struct felix_stream_gate_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+};
+
+static int vsc9959_stream_identify(struct flow_cls_offload *f,
+ struct felix_stream *stream)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(stream->dmac, match.key->dst);
+ if (!is_zero_ether_addr(match.mask->src))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority)
+ stream->prio = match.key->vlan_priority;
+ else
+ stream->prio = -1;
+
+ if (!match.mask->vlan_id)
+ return -EOPNOTSUPP;
+ stream->vid = match.key->vlan_id;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ stream->id = f->cookie;
+
+ return 0;
+}
+
+static int vsc9959_mact_stream_set(struct ocelot *ocelot,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ enum macaccess_entry_type type;
+ int ret, sfid, ssid;
+ u32 vid, dst_idx;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, stream->dmac);
+ vid = stream->vid;
+
+ /* Stream identification desn't support to add a stream with non
+ * existent MAC (The MAC entry has not been learned in MAC table).
+ */
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret) {
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
+ return -EOPNOTSUPP;
+ }
+
+ if ((stream->sfid_valid || stream->ssid_valid) &&
+ type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
+
+ sfid = stream->sfid_valid ? stream->sfid : -1;
+ ssid = stream->ssid_valid ? stream->ssid : -1;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+
+ return ret;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_lookup(struct list_head *stream_list,
+ struct felix_stream *stream)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (ether_addr_equal(tmp->dmac, stream->dmac) &&
+ tmp->vid == stream->vid)
+ return tmp;
+
+ return NULL;
+}
+
+static int vsc9959_stream_table_add(struct ocelot *ocelot,
+ struct list_head *stream_list,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ struct felix_stream *stream_entry;
+ int ret;
+
+ stream_entry = kzalloc(sizeof(*stream_entry), GFP_KERNEL);
+ if (!stream_entry)
+ return -ENOMEM;
+
+ memcpy(stream_entry, stream, sizeof(*stream_entry));
+
+ if (!stream->dummy) {
+ ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
+ if (ret) {
+ kfree(stream_entry);
+ return ret;
+ }
+ }
+
+ list_add_tail(&stream_entry->list, stream_list);
+
+ return 0;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (tmp->id == id)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_stream_table_del(struct ocelot *ocelot,
+ struct felix_stream *stream)
+{
+ if (!stream->dummy)
+ vsc9959_mact_stream_set(ocelot, stream, NULL);
+
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+}
+
+static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ u32 val;
+
+ if (sfi->index > VSC9959_PSFP_SFID_MAX)
+ return -EINVAL;
+
+ if (!sfi->enable) {
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+ }
+
+ if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
+ sfi->fmid > VSC9959_PSFP_POLICER_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot,
+ (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
+ ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
+ (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
+{
+ u32 val;
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
+ ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
+ ANA_TABLES_SFID_MASK);
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct list_head *pos)
+{
+ struct felix_stream_filter *sfi_entry;
+ int ret;
+
+ sfi_entry = kzalloc(sizeof(*sfi_entry), GFP_KERNEL);
+ if (!sfi_entry)
+ return -ENOMEM;
+
+ memcpy(sfi_entry, sfi, sizeof(*sfi_entry));
+ refcount_set(&sfi_entry->refcount, 1);
+
+ ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
+ if (ret) {
+ kfree(sfi_entry);
+ return ret;
+ }
+
+ vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
+
+ list_add(&sfi_entry->list, pos);
+
+ return 0;
+}
+
+static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct list_head *pos, *q, *last;
+ struct felix_stream_filter *tmp;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ if (sfi->sg_valid == tmp->sg_valid &&
+ sfi->fm_valid == tmp->fm_valid &&
+ sfi->portmask == tmp->portmask &&
+ tmp->sgid == sfi->sgid &&
+ tmp->fmid == sfi->fmid) {
+ sfi->index = tmp->index;
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index == insert) {
+ last = pos;
+ insert++;
+ }
+ }
+ sfi->index = insert;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+}
+
+static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct felix_stream_filter *sfi2)
+{
+ struct felix_stream_filter *tmp;
+ struct list_head *pos, *q, *last;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+ int ret;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index >= insert + 2)
+ break;
+
+ insert = tmp->index + 1;
+ last = pos;
+ }
+ sfi->index = insert;
+
+ ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+ if (ret)
+ return ret;
+
+ sfi2->index = insert + 1;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
+}
+
+static struct felix_stream_filter *
+vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
+{
+ struct felix_stream_filter *tmp;
+
+ list_for_each_entry(tmp, sfi_list, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
+{
+ struct felix_stream_filter *tmp, *n;
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ tmp->enable = 0;
+ vsc9959_psfp_sfi_set(ocelot, tmp);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
+ struct felix_stream_gate *sgi)
+{
+ sgi->index = entry->hw_index;
+ sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
+ sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+ sgi->enable = 1;
+
+ memcpy(sgi->entries, entry->gate.entries,
+ entry->gate.num_entries * sizeof(struct action_gate_entry));
+}
+
+static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct action_gate_entry *e;
+ struct timespec64 base_ts;
+ u32 interval_sum = 0;
+ u32 val;
+ int i;
+
+ if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
+ ANA_SG_ACCESS_CTRL);
+
+ if (!sgi->enable) {
+ ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+
+ return 0;
+ }
+
+ if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
+ sgi->cycletime > NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
+ return -EINVAL;
+
+ vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
+ val = lower_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
+
+ e = sgi->entries;
+ for (i = 0; i < sgi->num_entries; i++) {
+ u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
+
+ ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
+ (e[i].gate_state ?
+ ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
+ ANA_SG_GCL_GS_CONFIG, i);
+
+ interval_sum += e[i].interval;
+ ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
+ }
+
+ ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct felix_stream_gate_entry *tmp;
+ struct ocelot_psfp_list *psfp;
+ int ret;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry(tmp, &psfp->sgi_list, list)
+ if (tmp->index == sgi->index) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = vsc9959_psfp_sgi_set(ocelot, sgi);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->index = sgi->index;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &psfp->sgi_list);
+
+ return 0;
+}
+
+static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
+ u32 index)
+{
+ struct felix_stream_gate_entry *tmp, *n;
+ struct felix_stream_gate sgi = {0};
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ sgi.index = index;
+ sgi.enable = 0;
+ vsc9959_psfp_sgi_set(ocelot, &sgi);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
+ struct felix_stream_filter_counters *counters)
+{
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
+ counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
+ counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
+ counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(index) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+}
+
+static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct felix_stream_filter old_sfi, *sfi_entry;
+ struct felix_stream_filter sfi = {0};
+ const struct flow_action_entry *a;
+ struct felix_stream *stream_entry;
+ struct felix_stream stream = {0};
+ struct felix_stream_gate *sgi;
+ struct ocelot_psfp_list *psfp;
+ struct ocelot_policer pol;
+ int ret, i, size;
+ u64 rate, burst;
+ u32 index;
+
+ psfp = &ocelot->psfp;
+
+ ret = vsc9959_stream_identify(f, &stream);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
+ return ret;
+ }
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_GATE:
+ size = struct_size(sgi, entries, a->gate.num_entries);
+ sgi = kzalloc(size, GFP_KERNEL);
+ vsc9959_psfp_parse_gate(a, sgi);
+ ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ if (ret) {
+ kfree(sgi);
+ goto err;
+ }
+ sfi.sg_valid = 1;
+ sfi.sgid = sgi->index;
+ kfree(sgi);
+ break;
+ case FLOW_ACTION_POLICE:
+ index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
+ if (index > VSC9959_PSFP_POLICER_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rate = a->police.rate_bytes_ps;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ pol = (struct ocelot_policer) {
+ .burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
+ .rate = div_u64(rate, 1000) * 8,
+ };
+ ret = ocelot_vcap_policer_add(ocelot, index, &pol);
+ if (ret)
+ goto err;
+
+ sfi.fm_valid = 1;
+ sfi.fmid = index;
+ sfi.maxsdu = a->police.mtu;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ stream.ports = BIT(port);
+ stream.port = port;
+
+ sfi.portmask = stream.ports;
+ sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
+ sfi.prio = (sfi.prio_valid ? stream.prio : 0);
+ sfi.enable = 1;
+
+ /* Check if stream is set. */
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
+ if (stream_entry) {
+ if (stream_entry->ports & BIT(port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on this port");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (stream_entry->ports != BIT(stream_entry->port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on two ports");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ stream_entry->ports |= BIT(port);
+ stream.ports = stream_entry->ports;
+
+ sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
+ stream_entry->sfid);
+ memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
+
+ old_sfi.portmask = stream_entry->ports;
+ sfi.portmask = stream.ports;
+
+ if (stream_entry->port > port) {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
+ &old_sfi);
+ stream_entry->dummy = true;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
+ &sfi);
+ stream.dummy = true;
+ }
+ if (ret)
+ goto err;
+
+ stream_entry->sfid = old_sfi.index;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
+ if (ret)
+ goto err;
+ }
+
+ stream.sfid = sfi.index;
+ stream.sfid_valid = 1;
+ ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
+ &stream, extack);
+ if (ret) {
+ vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (sfi.sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
+
+ if (sfi.fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi.fmid);
+
+ return ret;
+}
+
+static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
+ struct flow_cls_offload *f)
+{
+ struct felix_stream *stream, tmp, *stream_entry;
+ static struct felix_stream_filter *sfi;
+ struct ocelot_psfp_list *psfp;
+
+ psfp = &ocelot->psfp;
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -ENOMEM;
+
+ if (sfi->sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
+
+ if (sfi->fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi->fmid);
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
+
+ memcpy(&tmp, stream, sizeof(tmp));
+
+ stream->sfid_valid = 0;
+ vsc9959_stream_table_del(ocelot, stream);
+
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
+ if (stream_entry) {
+ stream_entry->ports = BIT(stream_entry->port);
+ if (stream_entry->dummy) {
+ stream_entry->dummy = false;
+ vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
+ }
+ vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
+ stream_entry->ports);
+ }
+
+ return 0;
+}
+
+static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
+ struct flow_cls_offload *f,
+ struct flow_stats *stats)
+{
+ struct felix_stream_filter_counters counters;
+ struct ocelot_psfp_list *psfp;
+ struct felix_stream *stream;
+
+ psfp = &ocelot->psfp;
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+
+ stats->pkts = counters.match;
+ stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
+ counters.red;
+
+ return 0;
+}
+
+static void vsc9959_psfp_init(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+
+ INIT_LIST_HEAD(&psfp->stream_list);
+ INIT_LIST_HEAD(&psfp->sfi_list);
+ INIT_LIST_HEAD(&psfp->sgi_list);
+}
+
+/* When using cut-through forwarding and the egress port runs at a higher data
+ * rate than the ingress port, the packet currently under transmission would
+ * suffer an underrun since it would be transmitted faster than it is received.
+ * The Felix switch implementation of cut-through forwarding does not check in
+ * hardware whether this condition is satisfied or not, so we must restrict the
+ * list of ports that have cut-through forwarding enabled on egress to only be
+ * the ports operating at the lowest link speed within their respective
+ * forwarding domain.
+ */
+static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+ int port, other_port;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int min_speed = ocelot_port->speed;
+ unsigned long mask = 0;
+ u32 tmp, val = 0;
+
+ /* Disable cut-through on ports that are down */
+ if (ocelot_port->speed <= 0)
+ goto set;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Ocelot switches forward from the NPI port towards
+ * any port, regardless of it being in the NPI port's
+ * forwarding domain or not.
+ */
+ mask = dsa_user_ports(ds);
+ } else {
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
+ mask &= ~BIT(port);
+ if (ocelot->npi >= 0)
+ mask |= BIT(ocelot->npi);
+ else
+ mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot);
+ }
+
+ /* Calculate the minimum link speed, among the ports that are
+ * up, of this source port's forwarding domain.
+ */
+ for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
+ struct ocelot_port *other_ocelot_port;
+
+ other_ocelot_port = ocelot->ports[other_port];
+ if (other_ocelot_port->speed <= 0)
+ continue;
+
+ if (min_speed > other_ocelot_port->speed)
+ min_speed = other_ocelot_port->speed;
+ }
+
+ /* Enable cut-through forwarding for the traffic classes
+ * selected by tsntool.
+ */
+ if (ocelot_port->speed == min_speed)
+ val = ocelot_port->cut_thru;
+
+set:
+ tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
+ if (tmp == val)
+ continue;
+
+ dev_dbg(ocelot->dev,
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ port, mask, ocelot_port->speed, min_speed,
+ val ? "enabling" : "disabling");
+
+ ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
+ }
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+ .wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+ .psfp_init = vsc9959_psfp_init,
+ .psfp_filter_add = vsc9959_psfp_filter_add,
+ .psfp_filter_del = vsc9959_psfp_filter_del,
+ .psfp_stats_get = vsc9959_psfp_stats_get,
+ .cut_through_fwd = vsc9959_cut_through_fwd,
+ .tas_clock_adjust = vsc9959_tas_clock_adjust,
+};
+
static const struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
@@ -1358,11 +2310,13 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
+ .vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = 0,
+ .vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = 6,
.num_tx_queues = OCELOT_NUM_TC,
- .switch_pci_bar = 4,
- .imdio_pci_bar = 0,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
@@ -1371,6 +2325,7 @@ static const struct felix_info felix_info_vsc9959 = {
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .init_regmap = ocelot_regmap_init,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -1381,10 +2336,13 @@ static irqreturn_t felix_irq_handler(int irq, void *data)
* and preemption status change interrupt on each port.
*
* - Get txtstamp if have
- * - TODO: handle preemption. Without handling it, driver may get
- * interrupt storm.
+ * - Handle preemption if it's preemption IRQ. Without handling it,
+ * driver may get interrupt storm.
*/
+#ifdef CONFIG_MSCC_FELIX_SWITCH_TSN
+ felix_preempt_irq_clean(ocelot);
+#endif
ocelot_get_txtstamp(ocelot);
return IRQ_HANDLED;
@@ -1421,10 +2379,8 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev,
- felix->info->switch_pci_bar);
- felix->imdio_base = pci_resource_start(pdev,
- felix->info->imdio_pci_bar);
+ felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
+ felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
pci_set_master(pdev);
@@ -1459,6 +2415,10 @@ static int felix_pci_probe(struct pci_dev *pdev,
goto err_register_ds;
}
+#ifdef CONFIG_MSCC_FELIX_SWITCH_TSN
+ felix_tsn_enable(ds);
+#endif
+
return 0;
err_register_ds:
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 05e4e75c0107..66e56c26afb1 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -19,6 +19,10 @@
#define MSCC_MIIM_CMD_REGAD_SHIFT 20
#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
#define MSCC_MIIM_CMD_VLD BIT(31)
+#define VSC9953_VCAP_POLICER_BASE 11
+#define VSC9953_VCAP_POLICER_MAX 31
+#define VSC9953_VCAP_POLICER_BASE2 120
+#define VSC9953_VCAP_POLICER_MAX2 161
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -1092,7 +1096,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
@@ -1121,9 +1125,9 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int addr = port + 4;
+ struct phylink_pcs *phylink_pcs;
struct mdio_device *pcs;
- struct lynx_pcs *lynx;
+ int addr = port + 4;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1135,13 +1139,13 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
if (IS_ERR(pcs))
continue;
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
+ phylink_pcs = lynx_pcs_create(pcs);
+ if (!phylink_pcs) {
mdio_device_free(pcs);
continue;
}
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -1155,13 +1159,15 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
- if (!pcs)
+ if (!phylink_pcs)
continue;
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
}
/* mdiobus_unregister and mdiobus_free handled by devres */
@@ -1176,6 +1182,10 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
+ .vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
+ .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.num_mact_rows = 2048,
.num_ports = 10,
.num_tx_queues = OCELOT_NUM_TC,
@@ -1183,6 +1193,7 @@ static const struct felix_info seville_info_vsc9953 = {
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
+ .init_regmap = ocelot_regmap_init,
};
static int seville_probe(struct platform_device *pdev)