mirror of
				git://git.openwrt.org/openwrt/openwrt.git
				synced 2025-10-31 14:04:26 -04:00 
			
		
		
		
	Refreshed all patches. This bump contains upstream commits which seem to avoid (not properly fix) the errors as seen in FS#2305 and FS#2297 Altered patches: - 403-net-mvneta-convert-to-phylink.patch - 410-sfp-hack-allow-marvell-10G-phy-support-to-use-SFP.patch Compile-tested on: ar71xx, cns3xxx, imx6, mvebu, x86_64 Runtime-tested on: ar71xx, cns3xxx, imx6, x86_64 Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
		
			
				
	
	
		
			9131 lines
		
	
	
		
			268 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			9131 lines
		
	
	
		
			268 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| From 90b3f1705785f0e30de6f41abc8764aae1391245 Mon Sep 17 00:00:00 2001
 | |
| From: Biwen Li <biwen.li@nxp.com>
 | |
| Date: Wed, 17 Apr 2019 18:58:28 +0800
 | |
| Subject: [PATCH] dpaa2-ethernet: support layerscape
 | |
| MIME-Version: 1.0
 | |
| Content-Type: text/plain; charset=UTF-8
 | |
| Content-Transfer-Encoding: 8bit
 | |
| 
 | |
| This is an integrated patch of dpaa2-ethernet for layerscape
 | |
| 
 | |
| Signed-off-by: Biwen Li <biwen.li@nxp.com>
 | |
| Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
 | |
| Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
 | |
| Signed-off-by: David S. Miller <davem@davemloft.net>
 | |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 | |
| Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
 | |
| Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
 | |
| Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
 | |
| Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
 | |
| Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
 | |
| Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
 | |
| Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 | |
| ---
 | |
|  drivers/staging/fsl-dpaa2/Kconfig             |    7 +
 | |
|  drivers/staging/fsl-dpaa2/ethernet/Makefile   |    3 +
 | |
|  .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c      | 1187 ++++++++
 | |
|  .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h      |  183 ++
 | |
|  .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c    |  356 +++
 | |
|  .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h    |   60 +
 | |
|  .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h      |   29 +-
 | |
|  .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c    | 2509 +++++++++++++----
 | |
|  .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h    |  394 ++-
 | |
|  .../fsl-dpaa2/ethernet/dpaa2-ethtool.c        |  716 ++++-
 | |
|  drivers/staging/fsl-dpaa2/ethernet/dpkg.h     |  380 ++-
 | |
|  drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h |  255 +-
 | |
|  drivers/staging/fsl-dpaa2/ethernet/dpni.c     |  704 ++++-
 | |
|  drivers/staging/fsl-dpaa2/ethernet/dpni.h     |  401 ++-
 | |
|  drivers/staging/fsl-dpaa2/ethernet/net.h      |   30 +-
 | |
|  15 files changed, 6315 insertions(+), 899 deletions(-)
 | |
|  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
 | |
|  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
 | |
|  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
 | |
|  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
 | |
| 
 | |
| --- a/drivers/staging/fsl-dpaa2/Kconfig
 | |
| +++ b/drivers/staging/fsl-dpaa2/Kconfig
 | |
| @@ -17,6 +17,13 @@ config FSL_DPAA2_ETH
 | |
|  	  Ethernet driver for Freescale DPAA2 SoCs, using the
 | |
|  	  Freescale MC bus driver
 | |
|  
 | |
| +config FSL_DPAA2_ETH_CEETM
 | |
| +	depends on NET_SCHED
 | |
| +	bool "DPAA2 Ethernet CEETM QoS"
 | |
| +	default n
 | |
| +	---help---
 | |
| +	  Enable QoS offloading support through the CEETM hardware block.
 | |
| +
 | |
|  if FSL_DPAA2_ETH
 | |
|  config FSL_DPAA2_ETH_USE_ERR_QUEUE
 | |
|  	bool "Enable Rx error queue"
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
 | |
| @@ -1,3 +1,4 @@
 | |
| +# SPDX-License-Identifier: GPL-2.0
 | |
|  #
 | |
|  # Makefile for the Freescale DPAA2 Ethernet controller
 | |
|  #
 | |
| @@ -5,6 +6,8 @@
 | |
|  obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
 | |
|  
 | |
|  fsl-dpaa2-eth-objs    := dpaa2-eth.o dpaa2-ethtool.o dpni.o
 | |
| +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
 | |
| +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
 | |
|  
 | |
|  # Needed by the tracing framework
 | |
|  CFLAGS_dpaa2-eth.o := -I$(src)
 | |
| --- /dev/null
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
 | |
| @@ -0,0 +1,1187 @@
 | |
| +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 | |
| +/*
 | |
| + * Copyright 2017-2019 NXP
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/module.h>
 | |
| +
 | |
| +#include "dpaa2-eth-ceetm.h"
 | |
| +#include "dpaa2-eth.h"
 | |
| +
 | |
| +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
 | |
| +/* Conversion formula from userspace passed Bps to expected Mbit */
 | |
| +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
 | |
| +
 | |
| +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
 | |
| +	[DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
 | |
| +	[DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
 | |
| +};
 | |
| +
 | |
| +struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
 | |
| +
 | |
| +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
 | |
| +					   struct dpni_tx_shaping_cfg *scfg,
 | |
| +					   struct dpni_tx_shaping_cfg *ecfg,
 | |
| +					   int coupled, int ch_id)
 | |
| +{
 | |
| +	int err = 0;
 | |
| +
 | |
| +	netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
 | |
| +		   ch_id, scfg->rate_limit);
 | |
| +	err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
 | |
| +				  ecfg, coupled);
 | |
| +	if (err)
 | |
| +		netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
 | |
| +					     int ch_id)
 | |
| +{
 | |
| +	struct dpni_tx_shaping_cfg cfg = { 0 };
 | |
| +
 | |
| +	return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
 | |
| +}
 | |
| +
 | |
| +static inline int
 | |
| +dpaa2_eth_update_shaping_cfg(struct net_device *dev,
 | |
| +			     struct dpaa2_ceetm_shaping_cfg cfg,
 | |
| +			     struct dpni_tx_shaping_cfg *scfg,
 | |
| +			     struct dpni_tx_shaping_cfg *ecfg)
 | |
| +{
 | |
| +	scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
 | |
| +	ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
 | |
| +
 | |
| +	if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
 | |
| +		netdev_err(dev, "Committed burst size must be under %d\n",
 | |
| +			   DPAA2_ETH_MAX_BURST_SIZE);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	scfg->max_burst_size = cfg.cbs;
 | |
| +
 | |
| +	if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
 | |
| +		netdev_err(dev, "Excess burst size must be under %d\n",
 | |
| +			   DPAA2_ETH_MAX_BURST_SIZE);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	ecfg->max_burst_size = cfg.ebs;
 | |
| +
 | |
| +	if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
 | |
| +		netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +enum update_tx_prio {
 | |
| +	DPAA2_ETH_ADD_CQ,
 | |
| +	DPAA2_ETH_DEL_CQ,
 | |
| +};
 | |
| +
 | |
| +/* Normalize weights based on max passed value */
 | |
| +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
 | |
| +{
 | |
| +	struct dpni_tx_schedule_cfg *sched_cfg;
 | |
| +	struct dpaa2_ceetm_class *cl;
 | |
| +	u32 qpri;
 | |
| +	u16 weight_max = 0, increment;
 | |
| +	int i;
 | |
| +
 | |
| +	/* Check the boundaries of the provided values */
 | |
| +	for (i = 0; i < priv->clhash.hashsize; i++)
 | |
| +		hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
 | |
| +			weight_max = (weight_max == 0 ? cl->prio.weight :
 | |
| +				     (weight_max < cl->prio.weight ?
 | |
| +				      cl->prio.weight : weight_max));
 | |
| +
 | |
| +	/* If there are no elements, there's nothing to do */
 | |
| +	if (weight_max == 0)
 | |
| +		return 0;
 | |
| +
 | |
| +	increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
 | |
| +		    weight_max;
 | |
| +
 | |
| +	for (i = 0; i < priv->clhash.hashsize; i++) {
 | |
| +		hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
 | |
| +			if (cl->prio.mode == STRICT_PRIORITY)
 | |
| +				continue;
 | |
| +
 | |
| +			qpri = cl->prio.qpri;
 | |
| +			sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
 | |
| +
 | |
| +			sched_cfg->delta_bandwidth =
 | |
| +				DPAA2_CEETM_MIN_WEIGHT +
 | |
| +				(cl->prio.weight * increment);
 | |
| +
 | |
| +			pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
 | |
| +				 __func__, qpri, sched_cfg->delta_bandwidth);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
 | |
| +					   struct dpaa2_ceetm_class *cl,
 | |
| +					   enum update_tx_prio type)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
 | |
| +	struct dpni_tx_schedule_cfg *sched_cfg;
 | |
| +	struct dpni_taildrop td = {0};
 | |
| +	u8 ch_id = 0, tc_id = 0;
 | |
| +	u32 qpri = 0;
 | |
| +	int err = 0;
 | |
| +
 | |
| +	qpri = cl->prio.qpri;
 | |
| +	tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
 | |
| +
 | |
| +	switch (type) {
 | |
| +	case DPAA2_ETH_ADD_CQ:
 | |
| +		/* Enable taildrop */
 | |
| +		td.enable = 1;
 | |
| +		td.units = DPNI_CONGESTION_UNIT_FRAMES;
 | |
| +		td.threshold = DPAA2_CEETM_TD_THRESHOLD;
 | |
| +		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
 | |
| +					DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
 | |
| +					0, &td);
 | |
| +		if (err) {
 | |
| +			netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
 | |
| +				   err);
 | |
| +			return err;
 | |
| +		}
 | |
| +		break;
 | |
| +	case DPAA2_ETH_DEL_CQ:
 | |
| +		/* Disable taildrop */
 | |
| +		td.enable = 0;
 | |
| +		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
 | |
| +					DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
 | |
| +					0, &td);
 | |
| +		if (err) {
 | |
| +			netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
 | |
| +				   err);
 | |
| +			return err;
 | |
| +		}
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	/* We can zero out the structure in the tx_prio_conf array */
 | |
| +	if (type == DPAA2_ETH_DEL_CQ) {
 | |
| +		sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
 | |
| +		memset(sched_cfg, 0, sizeof(*sched_cfg));
 | |
| +	}
 | |
| +
 | |
| +	/* Normalize priorities */
 | |
| +	err = dpaa2_eth_normalize_tx_prio(sch);
 | |
| +
 | |
| +	/* Debug print goes here */
 | |
| +	print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
 | |
| +			     &sch->prio.tx_prio_cfg,
 | |
| +			     sizeof(sch->prio.tx_prio_cfg), 0);
 | |
| +
 | |
| +	/* Call dpni_set_tx_priorities for the entire prio qdisc */
 | |
| +	err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
 | |
| +				     &sch->prio.tx_prio_cfg);
 | |
| +	if (err)
 | |
| +		netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
 | |
| +			   err);
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	priv->ceetm_en = true;
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	priv->ceetm_en = false;
 | |
| +}
 | |
| +
 | |
| +/* Find class in qdisc hash table using given handle */
 | |
| +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
 | |
| +							 struct Qdisc *sch)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct Qdisc_class_common *clc;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
 | |
| +		 __func__, handle, sch->handle);
 | |
| +
 | |
| +	clc = qdisc_class_find(&priv->clhash, handle);
 | |
| +	return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
 | |
| +}
 | |
| +
 | |
| +/* Insert a class in the qdisc's class hash */
 | |
| +static void dpaa2_ceetm_link_class(struct Qdisc *sch,
 | |
| +				   struct Qdisc_class_hash *clhash,
 | |
| +				   struct Qdisc_class_common *common)
 | |
| +{
 | |
| +	sch_tree_lock(sch);
 | |
| +	qdisc_class_hash_insert(clhash, common);
 | |
| +	sch_tree_unlock(sch);
 | |
| +	qdisc_class_hash_grow(sch, clhash);
 | |
| +}
 | |
| +
 | |
| +/* Destroy a ceetm class */
 | |
| +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
 | |
| +				    struct dpaa2_ceetm_class *cl)
 | |
| +{
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(dev);
 | |
| +
 | |
| +	if (!cl)
 | |
| +		return;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
 | |
| +		 __func__, cl->common.classid, sch->handle);
 | |
| +
 | |
| +	/* Recurse into child first */
 | |
| +	if (cl->child) {
 | |
| +		qdisc_destroy(cl->child);
 | |
| +		cl->child = NULL;
 | |
| +	}
 | |
| +
 | |
| +	switch (cl->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
 | |
| +			netdev_err(dev, "Error resetting channel shaping\n");
 | |
| +
 | |
| +		break;
 | |
| +
 | |
| +	case CEETM_PRIO:
 | |
| +		if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
 | |
| +			netdev_err(dev, "Error resetting tx_priorities\n");
 | |
| +
 | |
| +		if (cl->prio.cstats)
 | |
| +			free_percpu(cl->prio.cstats);
 | |
| +
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	tcf_block_put(cl->block);
 | |
| +	kfree(cl);
 | |
| +}
 | |
| +
 | |
| +/* Destroy a ceetm qdisc */
 | |
| +static void dpaa2_ceetm_destroy(struct Qdisc *sch)
 | |
| +{
 | |
| +	unsigned int i;
 | |
| +	struct hlist_node *next;
 | |
| +	struct dpaa2_ceetm_class *cl;
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
 | |
| +		 __func__, sch->handle);
 | |
| +
 | |
| +	/* All filters need to be removed before destroying the classes */
 | |
| +	tcf_block_put(priv->block);
 | |
| +
 | |
| +	for (i = 0; i < priv->clhash.hashsize; i++) {
 | |
| +		hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
 | |
| +			tcf_block_put(cl->block);
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < priv->clhash.hashsize; i++) {
 | |
| +		hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
 | |
| +					  common.hnode)
 | |
| +			dpaa2_ceetm_cls_destroy(sch, cl);
 | |
| +	}
 | |
| +
 | |
| +	qdisc_class_hash_destroy(&priv->clhash);
 | |
| +
 | |
| +	switch (priv->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		dpaa2_eth_ceetm_disable(priv_eth);
 | |
| +
 | |
| +		if (priv->root.qstats)
 | |
| +			free_percpu(priv->root.qstats);
 | |
| +
 | |
| +		if (!priv->root.qdiscs)
 | |
| +			break;
 | |
| +
 | |
| +		/* Destroy the pfifo qdiscs in case they haven't been attached
 | |
| +		 * to the netdev queues yet.
 | |
| +		 */
 | |
| +		for (i = 0; i < dev->num_tx_queues; i++)
 | |
| +			if (priv->root.qdiscs[i])
 | |
| +				qdisc_destroy(priv->root.qdiscs[i]);
 | |
| +
 | |
| +		kfree(priv->root.qdiscs);
 | |
| +		break;
 | |
| +
 | |
| +	case CEETM_PRIO:
 | |
| +		if (priv->prio.parent)
 | |
| +			priv->prio.parent->child = NULL;
 | |
| +		break;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
 | |
| +{
 | |
| +	struct Qdisc *qdisc;
 | |
| +	unsigned int ntx, i;
 | |
| +	struct nlattr *nest;
 | |
| +	struct dpaa2_ceetm_tc_qopt qopt;
 | |
| +	struct dpaa2_ceetm_qdisc_stats *qstats;
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	sch_tree_lock(sch);
 | |
| +	memset(&qopt, 0, sizeof(qopt));
 | |
| +	qopt.type = priv->type;
 | |
| +	qopt.shaped = priv->shaped;
 | |
| +
 | |
| +	switch (priv->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		/* Gather statistics from the underlying pfifo qdiscs */
 | |
| +		sch->q.qlen = 0;
 | |
| +		memset(&sch->bstats, 0, sizeof(sch->bstats));
 | |
| +		memset(&sch->qstats, 0, sizeof(sch->qstats));
 | |
| +
 | |
| +		for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 | |
| +			qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
 | |
| +			sch->q.qlen		+= qdisc->q.qlen;
 | |
| +			sch->bstats.bytes	+= qdisc->bstats.bytes;
 | |
| +			sch->bstats.packets	+= qdisc->bstats.packets;
 | |
| +			sch->qstats.qlen	+= qdisc->qstats.qlen;
 | |
| +			sch->qstats.backlog	+= qdisc->qstats.backlog;
 | |
| +			sch->qstats.drops	+= qdisc->qstats.drops;
 | |
| +			sch->qstats.requeues	+= qdisc->qstats.requeues;
 | |
| +			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
 | |
| +		}
 | |
| +
 | |
| +		for_each_online_cpu(i) {
 | |
| +			qstats = per_cpu_ptr(priv->root.qstats, i);
 | |
| +			sch->qstats.drops += qstats->drops;
 | |
| +		}
 | |
| +
 | |
| +		break;
 | |
| +
 | |
| +	case CEETM_PRIO:
 | |
| +		qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
 | |
| +		qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
 | |
| +		qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
 | |
| +		break;
 | |
| +
 | |
| +	default:
 | |
| +		pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
 | |
| +		sch_tree_unlock(sch);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	nest = nla_nest_start(skb, TCA_OPTIONS);
 | |
| +	if (!nest)
 | |
| +		goto nla_put_failure;
 | |
| +	if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
 | |
| +		goto nla_put_failure;
 | |
| +	nla_nest_end(skb, nest);
 | |
| +
 | |
| +	sch_tree_unlock(sch);
 | |
| +	return skb->len;
 | |
| +
 | |
| +nla_put_failure:
 | |
| +	sch_tree_unlock(sch);
 | |
| +	nla_nest_cancel(skb, nest);
 | |
| +	return -EMSGSIZE;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
 | |
| +				   struct dpaa2_ceetm_qdisc *priv,
 | |
| +				   struct dpaa2_ceetm_tc_qopt *qopt)
 | |
| +{
 | |
| +	/* TODO: Once LX2 support is added */
 | |
| +	/* priv->shaped = parent_cl->shaped; */
 | |
| +	priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
 | |
| +	priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
 | |
| +	priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/* Edit a ceetm qdisc */
 | |
| +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
 | |
| +	struct dpaa2_ceetm_tc_qopt *qopt;
 | |
| +	int err;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
 | |
| +			       dpaa2_ceetm_policy, NULL);
 | |
| +	if (err < 0) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
 | |
| +		       "nla_parse_nested");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	if (!tb[DPAA2_CEETM_TCA_QOPS]) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
 | |
| +		       "tb");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (TC_H_MIN(sch->handle)) {
 | |
| +		pr_err("CEETM: a qdisc should not have a minor\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
 | |
| +
 | |
| +	if (priv->type != qopt->type) {
 | |
| +		pr_err("CEETM: qdisc %X is not of the provided type\n",
 | |
| +		       sch->handle);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	switch (priv->type) {
 | |
| +	case CEETM_PRIO:
 | |
| +		err = dpaa2_ceetm_change_prio(sch, priv, qopt);
 | |
| +		break;
 | |
| +	default:
 | |
| +		pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
 | |
| +		err = -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Configure a root ceetm qdisc */
 | |
| +static int dpaa2_ceetm_init_root(struct Qdisc *sch,
 | |
| +				 struct dpaa2_ceetm_qdisc *priv,
 | |
| +				 struct dpaa2_ceetm_tc_qopt *qopt)
 | |
| +{
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
 | |
| +	struct netdev_queue *dev_queue;
 | |
| +	unsigned int i, parent_id;
 | |
| +	struct Qdisc *qdisc;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	/* Validate inputs */
 | |
| +	if (sch->parent != TC_H_ROOT) {
 | |
| +		pr_err("CEETM: a root ceetm qdisc must be root\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/* Pre-allocate underlying pfifo qdiscs.
 | |
| +	 *
 | |
| +	 * We want to offload shaping and scheduling decisions to the hardware.
 | |
| +	 * The pfifo qdiscs will be attached to the netdev queues and will
 | |
| +	 * guide the traffic from the IP stack down to the driver with minimum
 | |
| +	 * interference.
 | |
| +	 *
 | |
| +	 * The CEETM qdiscs and classes will be crossed when the traffic
 | |
| +	 * reaches the driver.
 | |
| +	 */
 | |
| +	priv->root.qdiscs = kcalloc(dev->num_tx_queues,
 | |
| +				    sizeof(priv->root.qdiscs[0]),
 | |
| +				    GFP_KERNEL);
 | |
| +	if (!priv->root.qdiscs)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	for (i = 0; i < dev->num_tx_queues; i++) {
 | |
| +		dev_queue = netdev_get_tx_queue(dev, i);
 | |
| +		parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
 | |
| +				      TC_H_MIN(i + PFIFO_MIN_OFFSET));
 | |
| +
 | |
| +		qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
 | |
| +					  parent_id);
 | |
| +		if (!qdisc)
 | |
| +			return -ENOMEM;
 | |
| +
 | |
| +		priv->root.qdiscs[i] = qdisc;
 | |
| +		qdisc->flags |= TCQ_F_ONETXQUEUE;
 | |
| +	}
 | |
| +
 | |
| +	sch->flags |= TCQ_F_MQROOT;
 | |
| +
 | |
| +	priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
 | |
| +	if (!priv->root.qstats) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
 | |
| +		       __func__);
 | |
| +		return -ENOMEM;
 | |
| +	}
 | |
| +
 | |
| +	dpaa2_eth_ceetm_enable(priv_eth);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/* Configure a prio ceetm qdisc */
 | |
| +static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
 | |
| +				 struct dpaa2_ceetm_qdisc *priv,
 | |
| +				 struct dpaa2_ceetm_tc_qopt *qopt)
 | |
| +{
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_ceetm_class *parent_cl;
 | |
| +	struct Qdisc *parent_qdisc;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	if (sch->parent == TC_H_ROOT) {
 | |
| +		pr_err("CEETM: a prio ceetm qdisc can not be root\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
 | |
| +	if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
 | |
| +		pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/* Obtain the parent root ceetm_class */
 | |
| +	parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
 | |
| +
 | |
| +	if (!parent_cl || parent_cl->type != CEETM_ROOT) {
 | |
| +		pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	priv->prio.parent = parent_cl;
 | |
| +	parent_cl->child = sch;
 | |
| +
 | |
| +	return dpaa2_ceetm_change_prio(sch, priv, qopt);
 | |
| +}
 | |
| +
 | |
| +/* Configure a generic ceetm qdisc */
 | |
| +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
 | |
| +	struct dpaa2_ceetm_tc_qopt *qopt;
 | |
| +	int err;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	if (!netif_is_multiqueue(dev))
 | |
| +		return -EOPNOTSUPP;
 | |
| +
 | |
| +	err = tcf_block_get(&priv->block, &priv->filter_list);
 | |
| +	if (err) {
 | |
| +		pr_err("CEETM: unable to get tcf_block\n");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	if (!opt) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
 | |
| +		       __func__);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
 | |
| +			       dpaa2_ceetm_policy, NULL);
 | |
| +	if (err < 0) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
 | |
| +		       "nla_parse_nested");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	if (!tb[DPAA2_CEETM_TCA_QOPS]) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
 | |
| +		       "tb");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (TC_H_MIN(sch->handle)) {
 | |
| +		pr_err("CEETM: a qdisc should not have a minor\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
 | |
| +
 | |
| +	/* Initialize the class hash list. Each qdisc has its own class hash */
 | |
| +	err = qdisc_class_hash_init(&priv->clhash);
 | |
| +	if (err < 0) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
 | |
| +		       __func__);
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	priv->type = qopt->type;
 | |
| +	priv->shaped = qopt->shaped;
 | |
| +
 | |
| +	switch (priv->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		err = dpaa2_ceetm_init_root(sch, priv, qopt);
 | |
| +		break;
 | |
| +	case CEETM_PRIO:
 | |
| +		err = dpaa2_ceetm_init_prio(sch, priv, qopt);
 | |
| +		break;
 | |
| +	default:
 | |
| +		pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
 | |
| +		/* Note: dpaa2_ceetm_destroy() will be called by our caller */
 | |
| +		err = -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Attach the underlying pfifo qdiscs */
 | |
| +static void dpaa2_ceetm_attach(struct Qdisc *sch)
 | |
| +{
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct Qdisc *qdisc, *old_qdisc;
 | |
| +	unsigned int i;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	for (i = 0; i < dev->num_tx_queues; i++) {
 | |
| +		qdisc = priv->root.qdiscs[i];
 | |
| +		old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
 | |
| +		if (old_qdisc)
 | |
| +			qdisc_destroy(old_qdisc);
 | |
| +	}
 | |
| +
 | |
| +	/* Remove the references to the pfifo qdiscs since the kernel will
 | |
| +	 * destroy them when needed. No cleanup from our part is required from
 | |
| +	 * this point on.
 | |
| +	 */
 | |
| +	kfree(priv->root.qdiscs);
 | |
| +	priv->root.qdiscs = NULL;
 | |
| +}
 | |
| +
 | |
| +static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_class *cl;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
 | |
| +		 __func__, classid, sch->handle);
 | |
| +	cl = dpaa2_ceetm_find(classid, sch);
 | |
| +
 | |
| +	return (unsigned long)cl;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
 | |
| +				       struct dpaa2_ceetm_tc_copt *copt,
 | |
| +				       struct net_device *dev)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(dev);
 | |
| +	struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
 | |
| +	int err = 0;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
 | |
| +		 cl->common.classid);
 | |
| +
 | |
| +	if (!cl->shaped)
 | |
| +		return 0;
 | |
| +
 | |
| +	if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
 | |
| +					 &scfg, &ecfg))
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
 | |
| +				       copt->shaping_cfg.coupled,
 | |
| +				       cl->root.ch_id);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
 | |
| +	       sizeof(struct dpaa2_ceetm_shaping_cfg));
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
 | |
| +				       struct dpaa2_ceetm_tc_copt *copt,
 | |
| +				       struct net_device *dev)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
 | |
| +	struct dpni_tx_schedule_cfg *sched_cfg;
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(dev);
 | |
| +	int err;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
 | |
| +		 __func__, cl->common.classid, copt->mode, copt->weight);
 | |
| +
 | |
| +	if (!cl->prio.cstats) {
 | |
| +		cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
 | |
| +		if (!cl->prio.cstats) {
 | |
| +			pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
 | |
| +			       __func__);
 | |
| +			return -ENOMEM;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	cl->prio.mode = copt->mode;
 | |
| +	cl->prio.weight = copt->weight;
 | |
| +
 | |
| +	sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
 | |
| +
 | |
| +	switch (copt->mode) {
 | |
| +	case STRICT_PRIORITY:
 | |
| +		sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
 | |
| +		break;
 | |
| +	case WEIGHTED_A:
 | |
| +		sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
 | |
| +		break;
 | |
| +	case WEIGHTED_B:
 | |
| +		sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Add a new ceetm class */
 | |
| +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
 | |
| +			       struct dpaa2_ceetm_tc_copt *copt,
 | |
| +			       unsigned long *arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
 | |
| +	struct dpaa2_ceetm_class *cl;
 | |
| +	int err;
 | |
| +
 | |
| +	if (copt->type == CEETM_ROOT &&
 | |
| +	    priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
 | |
| +		pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
 | |
| +		       dpaa2_eth_ch_count(priv_eth),
 | |
| +		       dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (copt->type == CEETM_PRIO &&
 | |
| +	    priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
 | |
| +		pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
 | |
| +		       dpaa2_eth_tc_count(priv_eth),
 | |
| +		       dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
 | |
| +	if (!cl)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	err = tcf_block_get(&cl->block, &cl->filter_list);
 | |
| +	if (err) {
 | |
| +		pr_err("%s: Unable to set new root class\n", __func__);
 | |
| +		goto out_free;
 | |
| +	}
 | |
| +
 | |
| +	cl->common.classid = classid;
 | |
| +	cl->parent = sch;
 | |
| +	cl->child = NULL;
 | |
| +
 | |
| +	/* Add class handle in Qdisc */
 | |
| +	dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
 | |
| +
 | |
| +	cl->shaped = copt->shaped;
 | |
| +	cl->type = copt->type;
 | |
| +
 | |
| +	/* Claim a CEETM channel / tc - DPAA2. will assume transition from
 | |
| +	 * classid to qdid/qpri, starting from qdid / qpri 0
 | |
| +	 */
 | |
| +	switch (copt->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		cl->root.ch_id = classid - sch->handle - 1;
 | |
| +		err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
 | |
| +		break;
 | |
| +	case CEETM_PRIO:
 | |
| +		cl->prio.qpri = classid - sch->handle - 1;
 | |
| +		err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	if (err) {
 | |
| +		pr_err("%s: Unable to set new %s class\n", __func__,
 | |
| +		       (copt->type == CEETM_ROOT ? "root" : "prio"));
 | |
| +		goto out_free;
 | |
| +	}
 | |
| +
 | |
| +	switch (copt->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
 | |
| +			 __func__, classid, cl->root.ch_id);
 | |
| +		break;
 | |
| +	case CEETM_PRIO:
 | |
| +		pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
 | |
| +			 __func__, classid, cl->prio.qpri);
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	*arg = (unsigned long)cl;
 | |
| +	return 0;
 | |
| +
 | |
| +out_free:
 | |
| +	kfree(cl);
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Add or configure a ceetm class */
 | |
| +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
 | |
| +				  struct nlattr **tca, unsigned long *arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv;
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
 | |
| +	struct nlattr *opt = tca[TCA_OPTIONS];
 | |
| +	struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
 | |
| +	struct dpaa2_ceetm_tc_copt *copt;
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	int err;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
 | |
| +		 __func__, classid, sch->handle);
 | |
| +
 | |
| +	if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
 | |
| +		pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	priv = qdisc_priv(sch);
 | |
| +
 | |
| +	if (!opt) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
 | |
| +			       dpaa2_ceetm_policy, NULL);
 | |
| +	if (err < 0) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
 | |
| +		       "nla_parse_nested");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (!tb[DPAA2_CEETM_TCA_COPT]) {
 | |
| +		pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
 | |
| +		       "tb");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
 | |
| +
 | |
| +	/* Configure an existing ceetm class */
 | |
| +	if (cl) {
 | |
| +		if (copt->type != cl->type) {
 | |
| +			pr_err("CEETM: class %X is not of the provided type\n",
 | |
| +			       cl->common.classid);
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +
 | |
| +		switch (copt->type) {
 | |
| +		case CEETM_ROOT:
 | |
| +			return dpaa2_ceetm_cls_change_root(cl, copt, dev);
 | |
| +		case CEETM_PRIO:
 | |
| +			return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
 | |
| +
 | |
| +		default:
 | |
| +			pr_err(KBUILD_BASENAME " : %s : invalid class\n",
 | |
| +			       __func__);
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct dpaa2_ceetm_class *cl;
 | |
| +	unsigned int i;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
 | |
| +
 | |
| +	if (arg->stop)
 | |
| +		return;
 | |
| +
 | |
| +	for (i = 0; i < priv->clhash.hashsize; i++) {
 | |
| +		hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
 | |
| +			if (arg->count < arg->skip) {
 | |
| +				arg->count++;
 | |
| +				continue;
 | |
| +			}
 | |
| +			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
 | |
| +				arg->stop = 1;
 | |
| +				return;
 | |
| +			}
 | |
| +			arg->count++;
 | |
| +		}
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
 | |
| +				struct sk_buff *skb, struct tcmsg *tcm)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
 | |
| +	struct nlattr *nest;
 | |
| +	struct dpaa2_ceetm_tc_copt copt;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
 | |
| +		 __func__, cl->common.classid, sch->handle);
 | |
| +
 | |
| +	sch_tree_lock(sch);
 | |
| +
 | |
| +	tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
 | |
| +	tcm->tcm_handle = cl->common.classid;
 | |
| +
 | |
| +	memset(&copt, 0, sizeof(copt));
 | |
| +
 | |
| +	copt.shaped = cl->shaped;
 | |
| +	copt.type = cl->type;
 | |
| +
 | |
| +	switch (cl->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		if (cl->child)
 | |
| +			tcm->tcm_info = cl->child->handle;
 | |
| +
 | |
| +		memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
 | |
| +		       sizeof(struct dpaa2_ceetm_shaping_cfg));
 | |
| +
 | |
| +		break;
 | |
| +
 | |
| +	case CEETM_PRIO:
 | |
| +		if (cl->child)
 | |
| +			tcm->tcm_info = cl->child->handle;
 | |
| +
 | |
| +		copt.mode = cl->prio.mode;
 | |
| +		copt.weight = cl->prio.weight;
 | |
| +
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	nest = nla_nest_start(skb, TCA_OPTIONS);
 | |
| +	if (!nest)
 | |
| +		goto nla_put_failure;
 | |
| +	if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
 | |
| +		goto nla_put_failure;
 | |
| +	nla_nest_end(skb, nest);
 | |
| +	sch_tree_unlock(sch);
 | |
| +	return skb->len;
 | |
| +
 | |
| +nla_put_failure:
 | |
| +	sch_tree_unlock(sch);
 | |
| +	nla_nest_cancel(skb, nest);
 | |
| +	return -EMSGSIZE;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
 | |
| +		 __func__, cl->common.classid, sch->handle);
 | |
| +
 | |
| +	sch_tree_lock(sch);
 | |
| +	qdisc_class_hash_remove(&priv->clhash, &cl->common);
 | |
| +	sch_tree_unlock(sch);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/* Get the class' child qdisc, if any */
 | |
| +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
 | |
| +		 __func__, cl->common.classid, sch->handle);
 | |
| +
 | |
| +	switch (cl->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +	case CEETM_PRIO:
 | |
| +		return cl->child;
 | |
| +	}
 | |
| +
 | |
| +	return NULL;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
 | |
| +				 struct Qdisc *new, struct Qdisc **old)
 | |
| +{
 | |
| +	if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
 | |
| +		pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
 | |
| +		return -EOPNOTSUPP;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
 | |
| +				      struct gnet_dump *d)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
 | |
| +	struct gnet_stats_basic_packed tmp_bstats;
 | |
| +	struct dpaa2_ceetm_tc_xstats xstats;
 | |
| +	union dpni_statistics dpni_stats;
 | |
| +	struct net_device *dev = qdisc_dev(sch);
 | |
| +	struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
 | |
| +	u8 ch_id = 0;
 | |
| +	int err;
 | |
| +
 | |
| +	memset(&xstats, 0, sizeof(xstats));
 | |
| +	memset(&tmp_bstats, 0, sizeof(tmp_bstats));
 | |
| +
 | |
| +	if (cl->type == CEETM_ROOT)
 | |
| +		return 0;
 | |
| +
 | |
| +	err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
 | |
| +				  DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
 | |
| +				  &dpni_stats);
 | |
| +	if (err)
 | |
| +		netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
 | |
| +
 | |
| +	xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
 | |
| +	xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
 | |
| +	xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
 | |
| +	xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
 | |
| +
 | |
| +	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
 | |
| +}
 | |
| +
 | |
| +static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch,
 | |
| +					       unsigned long arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
 | |
| +		 cl ? cl->common.classid : 0, sch->handle);
 | |
| +	return cl ? cl->block : priv->block;
 | |
| +}
 | |
| +
 | |
| +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
 | |
| +					  unsigned long parent,
 | |
| +					  u32 classid)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
 | |
| +		 cl ? cl->common.classid : 0, sch->handle);
 | |
| +	return (unsigned long)cl;
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
 | |
| +
 | |
| +	pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
 | |
| +		 cl ? cl->common.classid : 0, sch->handle);
 | |
| +}
 | |
| +
 | |
| +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
 | |
| +	.graft		=	dpaa2_ceetm_cls_graft,
 | |
| +	.leaf		=	dpaa2_ceetm_cls_leaf,
 | |
| +	.find		=	dpaa2_ceetm_cls_find,
 | |
| +	.change		=	dpaa2_ceetm_cls_change,
 | |
| +	.delete		=	dpaa2_ceetm_cls_delete,
 | |
| +	.walk		=	dpaa2_ceetm_cls_walk,
 | |
| +	.tcf_block	=	dpaa2_ceetm_tcf_block,
 | |
| +	.bind_tcf	=	dpaa2_ceetm_tcf_bind,
 | |
| +	.unbind_tcf	=	dpaa2_ceetm_tcf_unbind,
 | |
| +	.dump		=	dpaa2_ceetm_cls_dump,
 | |
| +	.dump_stats	=	dpaa2_ceetm_cls_dump_stats,
 | |
| +};
 | |
| +
 | |
| +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
 | |
| +	.id		=	"ceetm",
 | |
| +	.priv_size	=	sizeof(struct dpaa2_ceetm_qdisc),
 | |
| +	.cl_ops		=	&dpaa2_ceetm_cls_ops,
 | |
| +	.init		=	dpaa2_ceetm_init,
 | |
| +	.destroy	=	dpaa2_ceetm_destroy,
 | |
| +	.change		=	dpaa2_ceetm_change,
 | |
| +	.dump		=	dpaa2_ceetm_dump,
 | |
| +	.attach		=	dpaa2_ceetm_attach,
 | |
| +	.owner		=	THIS_MODULE,
 | |
| +};
 | |
| +
 | |
| +/* Run the filters and classifiers attached to the qdisc on the provided skb */
 | |
| +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
 | |
| +			 int *qdid, u8 *qpri)
 | |
| +{
 | |
| +	struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
 | |
| +	struct dpaa2_ceetm_class *cl = NULL;
 | |
| +	struct tcf_result res;
 | |
| +	struct tcf_proto *tcf;
 | |
| +	int result;
 | |
| +
 | |
| +	tcf = rcu_dereference_bh(priv->filter_list);
 | |
| +	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
 | |
| +#ifdef CONFIG_NET_CLS_ACT
 | |
| +		switch (result) {
 | |
| +		case TC_ACT_QUEUED:
 | |
| +		case TC_ACT_STOLEN:
 | |
| +		case TC_ACT_SHOT:
 | |
| +			/* No valid class found due to action */
 | |
| +			return -1;
 | |
| +		}
 | |
| +#endif
 | |
| +		cl = (void *)res.class;
 | |
| +		if (!cl) {
 | |
| +			/* The filter leads to the qdisc */
 | |
| +			if (res.classid == sch->handle)
 | |
| +				return 0;
 | |
| +
 | |
| +			cl = dpaa2_ceetm_find(res.classid, sch);
 | |
| +			/* The filter leads to an invalid class */
 | |
| +			if (!cl)
 | |
| +				break;
 | |
| +		}
 | |
| +
 | |
| +		/* The class might have its own filters attached */
 | |
| +		tcf = rcu_dereference_bh(cl->filter_list);
 | |
| +	}
 | |
| +
 | |
| +	/* No valid class found */
 | |
| +	if (!cl)
 | |
| +		return 0;
 | |
| +
 | |
| +	switch (cl->type) {
 | |
| +	case CEETM_ROOT:
 | |
| +		*qdid = cl->root.ch_id;
 | |
| +
 | |
| +		/* The root class does not have a child prio qdisc */
 | |
| +		if (!cl->child)
 | |
| +			return 0;
 | |
| +
 | |
| +		/* Run the prio qdisc classifiers */
 | |
| +		return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
 | |
| +
 | |
| +	case CEETM_PRIO:
 | |
| +		*qpri = cl->prio.qpri;
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +int __init dpaa2_ceetm_register(void)
 | |
| +{
 | |
| +	int err = 0;
 | |
| +
 | |
| +	pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
 | |
| +
 | |
| +	err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
 | |
| +	if (unlikely(err))
 | |
| +		pr_err(KBUILD_MODNAME
 | |
| +		       ": %s:%hu:%s(): register_qdisc() = %d\n",
 | |
| +		       KBUILD_BASENAME ".c", __LINE__, __func__, err);
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +void __exit dpaa2_ceetm_unregister(void)
 | |
| +{
 | |
| +	pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
 | |
| +		 KBUILD_BASENAME ".c", __func__);
 | |
| +
 | |
| +	unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
 | |
| +}
 | |
| --- /dev/null
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
 | |
| @@ -0,0 +1,183 @@
 | |
| +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 | |
| +/*
 | |
| + * Copyright 2017 NXP
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +#ifndef __DPAA2_ETH_CEETM_H
 | |
| +#define __DPAA2_ETH_CEETM_H
 | |
| +
 | |
| +#include <net/pkt_sched.h>
 | |
| +#include <net/pkt_cls.h>
 | |
| +#include <net/netlink.h>
 | |
| +
 | |
| +#include "dpaa2-eth.h"
 | |
| +
 | |
| +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
 | |
| + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
 | |
| + * are reserved for the maximum 32 CEETM channels (majors and minors are in
 | |
| + * hex).
 | |
| + */
 | |
| +#define PFIFO_MIN_OFFSET		0x21
 | |
| +
 | |
| +#define DPAA2_CEETM_MIN_WEIGHT		100
 | |
| +#define DPAA2_CEETM_MAX_WEIGHT		24800
 | |
| +
 | |
| +#define DPAA2_CEETM_TD_THRESHOLD	1000
 | |
| +
 | |
| +enum wbfs_group_type {
 | |
| +	WBFS_GRP_A,
 | |
| +	WBFS_GRP_B,
 | |
| +	WBFS_GRP_LARGE
 | |
| +};
 | |
| +
 | |
| +enum {
 | |
| +	DPAA2_CEETM_TCA_UNSPEC,
 | |
| +	DPAA2_CEETM_TCA_COPT,
 | |
| +	DPAA2_CEETM_TCA_QOPS,
 | |
| +	DPAA2_CEETM_TCA_MAX,
 | |
| +};
 | |
| +
 | |
| +/* CEETM configuration types */
 | |
| +enum dpaa2_ceetm_type {
 | |
| +	CEETM_ROOT = 1,
 | |
| +	CEETM_PRIO,
 | |
| +};
 | |
| +
 | |
| +enum {
 | |
| +	STRICT_PRIORITY = 0,
 | |
| +	WEIGHTED_A,
 | |
| +	WEIGHTED_B,
 | |
| +};
 | |
| +
 | |
| +struct dpaa2_ceetm_shaping_cfg {
 | |
| +	__u64 cir; /* committed information rate */
 | |
| +	__u64 eir; /* excess information rate */
 | |
| +	__u16 cbs; /* committed burst size */
 | |
| +	__u16 ebs; /* excess burst size */
 | |
| +	__u8 coupled; /* shaper coupling */
 | |
| +};
 | |
| +
 | |
| +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
 | |
| +
 | |
| +struct dpaa2_ceetm_class;
 | |
| +struct dpaa2_ceetm_qdisc_stats;
 | |
| +struct dpaa2_ceetm_class_stats;
 | |
| +
 | |
| +/* corresponds to CEETM shaping at LNI level */
 | |
| +struct dpaa2_root_q {
 | |
| +	struct Qdisc **qdiscs;
 | |
| +	struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
 | |
| +};
 | |
| +
 | |
| +/* corresponds to the number of priorities a channel serves */
 | |
| +struct dpaa2_prio_q {
 | |
| +	struct dpaa2_ceetm_class *parent;
 | |
| +	struct dpni_tx_priorities_cfg tx_prio_cfg;
 | |
| +};
 | |
| +
 | |
| +struct dpaa2_ceetm_qdisc {
 | |
| +	struct Qdisc_class_hash clhash;
 | |
| +	struct tcf_proto *filter_list; /* qdisc attached filters */
 | |
| +	struct tcf_block *block;
 | |
| +
 | |
| +	enum dpaa2_ceetm_type type; /* ROOT/PRIO */
 | |
| +	bool shaped;
 | |
| +	union {
 | |
| +		struct dpaa2_root_q root;
 | |
| +		struct dpaa2_prio_q prio;
 | |
| +	};
 | |
| +};
 | |
| +
 | |
| +/* CEETM Qdisc configuration parameters */
 | |
| +struct dpaa2_ceetm_tc_qopt {
 | |
| +	enum dpaa2_ceetm_type type;
 | |
| +	__u16 shaped;
 | |
| +	__u8 prio_group_A;
 | |
| +	__u8 prio_group_B;
 | |
| +	__u8 separate_groups;
 | |
| +};
 | |
| +
 | |
| +/* root class - corresponds to a channel */
 | |
| +struct dpaa2_root_c {
 | |
| +	struct dpaa2_ceetm_shaping_cfg shaping_cfg;
 | |
| +	u32 ch_id;
 | |
| +};
 | |
| +
 | |
| +/* prio class - corresponds to a strict priority queue (group) */
 | |
| +struct dpaa2_prio_c {
 | |
| +	struct dpaa2_ceetm_class_stats __percpu *cstats;
 | |
| +	u32 qpri;
 | |
| +	u8 mode;
 | |
| +	u16 weight;
 | |
| +};
 | |
| +
 | |
| +struct dpaa2_ceetm_class {
 | |
| +	struct Qdisc_class_common common;
 | |
| +	struct tcf_proto *filter_list; /* class attached filters */
 | |
| +	struct tcf_block *block;
 | |
| +	struct Qdisc *parent;
 | |
| +	struct Qdisc *child;
 | |
| +
 | |
| +	enum dpaa2_ceetm_type type; /* ROOT/PRIO */
 | |
| +	bool shaped;
 | |
| +	union {
 | |
| +		struct dpaa2_root_c root;
 | |
| +		struct dpaa2_prio_c prio;
 | |
| +	};
 | |
| +};
 | |
| +
 | |
| +/* CEETM Class configuration parameters */
 | |
| +struct dpaa2_ceetm_tc_copt {
 | |
| +	enum dpaa2_ceetm_type type;
 | |
| +	struct dpaa2_ceetm_shaping_cfg shaping_cfg;
 | |
| +	__u16 shaped;
 | |
| +	__u8 mode;
 | |
| +	__u16 weight;
 | |
| +};
 | |
| +
 | |
| +/* CEETM stats */
 | |
| +struct dpaa2_ceetm_qdisc_stats {
 | |
| +	__u32 drops;
 | |
| +};
 | |
| +
 | |
| +struct dpaa2_ceetm_class_stats {
 | |
| +	/* Software counters */
 | |
| +	struct gnet_stats_basic_packed bstats;
 | |
| +	__u32 ern_drop_count;
 | |
| +	__u32 congested_count;
 | |
| +};
 | |
| +
 | |
| +struct dpaa2_ceetm_tc_xstats {
 | |
| +	__u64 ceetm_dequeue_bytes;
 | |
| +	__u64 ceetm_dequeue_frames;
 | |
| +	__u64 ceetm_reject_bytes;
 | |
| +	__u64 ceetm_reject_frames;
 | |
| +};
 | |
| +
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
 | |
| +int __init dpaa2_ceetm_register(void);
 | |
| +void __exit dpaa2_ceetm_unregister(void);
 | |
| +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
 | |
| +			 int *qdid, u8 *qpri);
 | |
| +#else
 | |
| +static inline int dpaa2_ceetm_register(void)
 | |
| +{
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static inline void dpaa2_ceetm_unregister(void) {}
 | |
| +
 | |
| +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
 | |
| +				       int *qdid, u8 *qpri)
 | |
| +{
 | |
| +	return 0;
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	return priv->ceetm_en;
 | |
| +}
 | |
| +
 | |
| +#endif
 | |
| --- /dev/null
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
 | |
| @@ -0,0 +1,356 @@
 | |
| +
 | |
| +/* Copyright 2015 Freescale Semiconductor Inc.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions are met:
 | |
| + *     * Redistributions of source code must retain the above copyright
 | |
| + *	 notice, this list of conditions and the following disclaimer.
 | |
| + *     * Redistributions in binary form must reproduce the above copyright
 | |
| + *	 notice, this list of conditions and the following disclaimer in the
 | |
| + *	 documentation and/or other materials provided with the distribution.
 | |
| + *     * Neither the name of Freescale Semiconductor nor the
 | |
| + *	 names of its contributors may be used to endorse or promote products
 | |
| + *	 derived from this software without specific prior written permission.
 | |
| + *
 | |
| + *
 | |
| + * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| + * GNU General Public License ("GPL") as published by the Free Software
 | |
| + * Foundation, either version 2 of that License or (at your option) any
 | |
| + * later version.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 | |
| + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 | |
| + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | |
| + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | |
| + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | |
| + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/debugfs.h>
 | |
| +#include "dpaa2-eth.h"
 | |
| +#include "dpaa2-eth-debugfs.h"
 | |
| +
 | |
| +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
 | |
| +
 | |
| +static struct dentry *dpaa2_dbg_root;
 | |
| +
 | |
| +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
 | |
| +	struct rtnl_link_stats64 *stats;
 | |
| +	struct dpaa2_eth_drv_stats *extras;
 | |
| +	int i;
 | |
| +
 | |
| +	seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
 | |
| +	seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
 | |
| +		   "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
 | |
| +		   "Tx SG", "Tx realloc", "Enq busy");
 | |
| +
 | |
| +	for_each_online_cpu(i) {
 | |
| +		stats = per_cpu_ptr(priv->percpu_stats, i);
 | |
| +		extras = per_cpu_ptr(priv->percpu_extras, i);
 | |
| +		seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
 | |
| +			   i,
 | |
| +			   stats->rx_packets,
 | |
| +			   stats->rx_errors,
 | |
| +			   extras->rx_sg_frames,
 | |
| +			   stats->tx_packets,
 | |
| +			   stats->tx_errors,
 | |
| +			   extras->tx_conf_frames,
 | |
| +			   extras->tx_sg_frames,
 | |
| +			   extras->tx_reallocs,
 | |
| +			   extras->tx_portal_busy);
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
 | |
| +{
 | |
| +	int err;
 | |
| +	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
 | |
| +
 | |
| +	err = single_open(file, dpaa2_dbg_cpu_show, priv);
 | |
| +	if (err < 0)
 | |
| +		netdev_err(priv->net_dev, "single_open() failed\n");
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static const struct file_operations dpaa2_dbg_cpu_ops = {
 | |
| +	.open = dpaa2_dbg_cpu_open,
 | |
| +	.read = seq_read,
 | |
| +	.llseek = seq_lseek,
 | |
| +	.release = single_release,
 | |
| +};
 | |
| +
 | |
| +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
 | |
| +{
 | |
| +	switch (fq->type) {
 | |
| +	case DPAA2_RX_FQ:
 | |
| +		return "Rx";
 | |
| +	case DPAA2_TX_CONF_FQ:
 | |
| +		return "Tx conf";
 | |
| +	case DPAA2_RX_ERR_FQ:
 | |
| +		return "Rx err";
 | |
| +	default:
 | |
| +		return "N/A";
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
 | |
| +	struct dpaa2_eth_fq *fq;
 | |
| +	u32 fcnt, bcnt;
 | |
| +	int i, err;
 | |
| +
 | |
| +	seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
 | |
| +	seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
 | |
| +		   "VFQID", "CPU", "Traffic Class", "Type", "Frames",
 | |
| +		   "Pending frames");
 | |
| +
 | |
| +	for (i = 0; i <  priv->num_fqs; i++) {
 | |
| +		fq = &priv->fq[i];
 | |
| +		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
 | |
| +		if (err)
 | |
| +			fcnt = 0;
 | |
| +
 | |
| +		/* A lot of queues, no use displaying zero traffic ones */
 | |
| +		if (!fq->stats.frames && !fcnt)
 | |
| +			continue;
 | |
| +
 | |
| +		seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
 | |
| +			   fq->fqid,
 | |
| +			   fq->target_cpu,
 | |
| +			   fq->tc,
 | |
| +			   fq_type_to_str(fq),
 | |
| +			   fq->stats.frames,
 | |
| +			   fcnt);
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
 | |
| +{
 | |
| +	int err;
 | |
| +	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
 | |
| +
 | |
| +	err = single_open(file, dpaa2_dbg_fqs_show, priv);
 | |
| +	if (err < 0)
 | |
| +		netdev_err(priv->net_dev, "single_open() failed\n");
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static const struct file_operations dpaa2_dbg_fq_ops = {
 | |
| +	.open = dpaa2_dbg_fqs_open,
 | |
| +	.read = seq_read,
 | |
| +	.llseek = seq_lseek,
 | |
| +	.release = single_release,
 | |
| +};
 | |
| +
 | |
| +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
 | |
| +	struct dpaa2_eth_channel *ch;
 | |
| +	int i;
 | |
| +
 | |
| +	seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
 | |
| +	seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
 | |
| +		   "CHID", "CPU", "Deq busy", "Frames", "CDANs",
 | |
| +		   "Avg frm/CDAN", "Buf count");
 | |
| +
 | |
| +	for (i = 0; i < priv->num_channels; i++) {
 | |
| +		ch = priv->channel[i];
 | |
| +		seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
 | |
| +			   ch->ch_id,
 | |
| +			   ch->nctx.desired_cpu,
 | |
| +			   ch->stats.dequeue_portal_busy,
 | |
| +			   ch->stats.frames,
 | |
| +			   ch->stats.cdan,
 | |
| +			   ch->stats.frames / ch->stats.cdan,
 | |
| +			   ch->buf_count);
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
 | |
| +{
 | |
| +	int err;
 | |
| +	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
 | |
| +
 | |
| +	err = single_open(file, dpaa2_dbg_ch_show, priv);
 | |
| +	if (err < 0)
 | |
| +		netdev_err(priv->net_dev, "single_open() failed\n");
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static const struct file_operations dpaa2_dbg_ch_ops = {
 | |
| +	.open = dpaa2_dbg_ch_open,
 | |
| +	.read = seq_read,
 | |
| +	.llseek = seq_lseek,
 | |
| +	.release = single_release,
 | |
| +};
 | |
| +
 | |
| +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
 | |
| +				     size_t count, loff_t *offset)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = file->private_data;
 | |
| +	struct rtnl_link_stats64 *percpu_stats;
 | |
| +	struct dpaa2_eth_drv_stats *percpu_extras;
 | |
| +	struct dpaa2_eth_fq *fq;
 | |
| +	struct dpaa2_eth_channel *ch;
 | |
| +	int i;
 | |
| +
 | |
| +	for_each_online_cpu(i) {
 | |
| +		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
 | |
| +		memset(percpu_stats, 0, sizeof(*percpu_stats));
 | |
| +
 | |
| +		percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
 | |
| +		memset(percpu_extras, 0, sizeof(*percpu_extras));
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < priv->num_fqs; i++) {
 | |
| +		fq = &priv->fq[i];
 | |
| +		memset(&fq->stats, 0, sizeof(fq->stats));
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < priv->num_channels; i++) {
 | |
| +		ch = priv->channel[i];
 | |
| +		memset(&ch->stats, 0, sizeof(ch->stats));
 | |
| +	}
 | |
| +
 | |
| +	return count;
 | |
| +}
 | |
| +
 | |
| +static const struct file_operations dpaa2_dbg_reset_ops = {
 | |
| +	.open = simple_open,
 | |
| +	.write = dpaa2_dbg_reset_write,
 | |
| +};
 | |
| +
 | |
| +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
 | |
| +					const char __user *buf,
 | |
| +					size_t count, loff_t *offset)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = file->private_data;
 | |
| +	int err;
 | |
| +
 | |
| +	err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
 | |
| +	if (err)
 | |
| +		netdev_err(priv->net_dev,
 | |
| +			   "dpni_reset_statistics() failed %d\n", err);
 | |
| +
 | |
| +	return count;
 | |
| +}
 | |
| +
 | |
| +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
 | |
| +	.open = simple_open,
 | |
| +	.write = dpaa2_dbg_reset_mc_write,
 | |
| +};
 | |
| +
 | |
| +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	if (!dpaa2_dbg_root)
 | |
| +		return;
 | |
| +
 | |
| +	/* Create a directory for the interface */
 | |
| +	priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
 | |
| +					   dpaa2_dbg_root);
 | |
| +	if (!priv->dbg.dir) {
 | |
| +		netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	/* per-cpu stats file */
 | |
| +	priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
 | |
| +						  priv->dbg.dir, priv,
 | |
| +						  &dpaa2_dbg_cpu_ops);
 | |
| +	if (!priv->dbg.cpu_stats) {
 | |
| +		netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
 | |
| +		goto err_cpu_stats;
 | |
| +	}
 | |
| +
 | |
| +	/* per-fq stats file */
 | |
| +	priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
 | |
| +						 priv->dbg.dir, priv,
 | |
| +						 &dpaa2_dbg_fq_ops);
 | |
| +	if (!priv->dbg.fq_stats) {
 | |
| +		netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
 | |
| +		goto err_fq_stats;
 | |
| +	}
 | |
| +
 | |
| +	/* per-fq stats file */
 | |
| +	priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
 | |
| +						 priv->dbg.dir, priv,
 | |
| +						 &dpaa2_dbg_ch_ops);
 | |
| +	if (!priv->dbg.fq_stats) {
 | |
| +		netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
 | |
| +		goto err_ch_stats;
 | |
| +	}
 | |
| +
 | |
| +	/* reset stats */
 | |
| +	priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
 | |
| +						    priv->dbg.dir, priv,
 | |
| +						    &dpaa2_dbg_reset_ops);
 | |
| +	if (!priv->dbg.reset_stats) {
 | |
| +		netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
 | |
| +		goto err_reset_stats;
 | |
| +	}
 | |
| +
 | |
| +	/* reset MC stats */
 | |
| +	priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
 | |
| +						0222, priv->dbg.dir, priv,
 | |
| +						&dpaa2_dbg_reset_mc_ops);
 | |
| +	if (!priv->dbg.reset_mc_stats) {
 | |
| +		netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
 | |
| +		goto err_reset_mc_stats;
 | |
| +	}
 | |
| +
 | |
| +	return;
 | |
| +
 | |
| +err_reset_mc_stats:
 | |
| +	debugfs_remove(priv->dbg.reset_stats);
 | |
| +err_reset_stats:
 | |
| +	debugfs_remove(priv->dbg.ch_stats);
 | |
| +err_ch_stats:
 | |
| +	debugfs_remove(priv->dbg.fq_stats);
 | |
| +err_fq_stats:
 | |
| +	debugfs_remove(priv->dbg.cpu_stats);
 | |
| +err_cpu_stats:
 | |
| +	debugfs_remove(priv->dbg.dir);
 | |
| +}
 | |
| +
 | |
| +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	debugfs_remove(priv->dbg.reset_mc_stats);
 | |
| +	debugfs_remove(priv->dbg.reset_stats);
 | |
| +	debugfs_remove(priv->dbg.fq_stats);
 | |
| +	debugfs_remove(priv->dbg.ch_stats);
 | |
| +	debugfs_remove(priv->dbg.cpu_stats);
 | |
| +	debugfs_remove(priv->dbg.dir);
 | |
| +}
 | |
| +
 | |
| +void dpaa2_eth_dbg_init(void)
 | |
| +{
 | |
| +	dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
 | |
| +	if (!dpaa2_dbg_root) {
 | |
| +		pr_err("DPAA2-ETH: debugfs create failed\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	pr_info("DPAA2-ETH: debugfs created\n");
 | |
| +}
 | |
| +
 | |
| +void __exit dpaa2_eth_dbg_exit(void)
 | |
| +{
 | |
| +	debugfs_remove(dpaa2_dbg_root);
 | |
| +}
 | |
| --- /dev/null
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
 | |
| @@ -0,0 +1,60 @@
 | |
| +/* Copyright 2015 Freescale Semiconductor Inc.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions are met:
 | |
| + *     * Redistributions of source code must retain the above copyright
 | |
| + *	 notice, this list of conditions and the following disclaimer.
 | |
| + *     * Redistributions in binary form must reproduce the above copyright
 | |
| + *	 notice, this list of conditions and the following disclaimer in the
 | |
| + *	 documentation and/or other materials provided with the distribution.
 | |
| + *     * Neither the name of Freescale Semiconductor nor the
 | |
| + *	 names of its contributors may be used to endorse or promote products
 | |
| + *	 derived from this software without specific prior written permission.
 | |
| + *
 | |
| + *
 | |
| + * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| + * GNU General Public License ("GPL") as published by the Free Software
 | |
| + * Foundation, either version 2 of that License or (at your option) any
 | |
| + * later version.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 | |
| + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 | |
| + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | |
| + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | |
| + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | |
| + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +#ifndef DPAA2_ETH_DEBUGFS_H
 | |
| +#define DPAA2_ETH_DEBUGFS_H
 | |
| +
 | |
| +#include <linux/dcache.h>
 | |
| +
 | |
| +struct dpaa2_eth_priv;
 | |
| +
 | |
| +struct dpaa2_debugfs {
 | |
| +	struct dentry *dir;
 | |
| +	struct dentry *fq_stats;
 | |
| +	struct dentry *ch_stats;
 | |
| +	struct dentry *cpu_stats;
 | |
| +	struct dentry *reset_stats;
 | |
| +	struct dentry *reset_mc_stats;
 | |
| +};
 | |
| +
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
 | |
| +void dpaa2_eth_dbg_init(void);
 | |
| +void dpaa2_eth_dbg_exit(void);
 | |
| +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
 | |
| +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
 | |
| +#else
 | |
| +static inline void dpaa2_eth_dbg_init(void) {}
 | |
| +static inline void dpaa2_eth_dbg_exit(void) {}
 | |
| +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
 | |
| +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
 | |
| +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
 | |
| +
 | |
| +#endif /* DPAA2_ETH_DEBUGFS_H */
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
 | |
| @@ -1,32 +1,5 @@
 | |
| +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 | |
|  /* Copyright 2014-2015 Freescale Semiconductor Inc.
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - *     * Redistributions of source code must retain the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer.
 | |
| - *     * Redistributions in binary form must reproduce the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer in the
 | |
| - *	 documentation and/or other materials provided with the distribution.
 | |
| - *     * Neither the name of Freescale Semiconductor nor the
 | |
| - *	 names of its contributors may be used to endorse or promote products
 | |
| - *	 derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 | |
| - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 | |
| - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | |
| - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | |
| - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | |
| - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  
 | |
|  #undef TRACE_SYSTEM
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
 | |
| @@ -1,33 +1,6 @@
 | |
| +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 | |
|  /* Copyright 2014-2016 Freescale Semiconductor Inc.
 | |
|   * Copyright 2016-2017 NXP
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - *     * Redistributions of source code must retain the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer.
 | |
| - *     * Redistributions in binary form must reproduce the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer in the
 | |
| - *	 documentation and/or other materials provided with the distribution.
 | |
| - *     * Neither the name of Freescale Semiconductor nor the
 | |
| - *	 names of its contributors may be used to endorse or promote products
 | |
| - *	 derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 | |
| - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 | |
| - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | |
| - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | |
| - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | |
| - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  #include <linux/init.h>
 | |
|  #include <linux/module.h>
 | |
| @@ -38,9 +11,14 @@
 | |
|  #include <linux/msi.h>
 | |
|  #include <linux/kthread.h>
 | |
|  #include <linux/iommu.h>
 | |
| -
 | |
| +#include <linux/net_tstamp.h>
 | |
| +#include <linux/bpf.h>
 | |
| +#include <linux/filter.h>
 | |
| +#include <linux/atomic.h>
 | |
| +#include <net/sock.h>
 | |
|  #include "../../fsl-mc/include/mc.h"
 | |
|  #include "dpaa2-eth.h"
 | |
| +#include "dpaa2-eth-ceetm.h"
 | |
|  
 | |
|  /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
 | |
|   * using trace events only need to #include <trace/events/sched.h>
 | |
| @@ -52,8 +30,6 @@ MODULE_LICENSE("Dual BSD/GPL");
 | |
|  MODULE_AUTHOR("Freescale Semiconductor, Inc");
 | |
|  MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
 | |
|  
 | |
| -const char dpaa2_eth_drv_version[] = "0.1";
 | |
| -
 | |
|  static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
 | |
|  				dma_addr_t iova_addr)
 | |
|  {
 | |
| @@ -104,26 +80,27 @@ static void free_rx_fd(struct dpaa2_eth_
 | |
|  		/* We don't support any other format */
 | |
|  		return;
 | |
|  
 | |
| -	/* For S/G frames, we first need to free all SG entries */
 | |
| +	/* For S/G frames, we first need to free all SG entries
 | |
| +	 * except the first one, which was taken care of already
 | |
| +	 */
 | |
|  	sgt = vaddr + dpaa2_fd_get_offset(fd);
 | |
| -	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
 | |
| +	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
 | |
|  		addr = dpaa2_sg_get_addr(&sgt[i]);
 | |
|  		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 | |
| -		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| -				 DMA_FROM_DEVICE);
 | |
| +		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +			       DMA_BIDIRECTIONAL);
 | |
|  
 | |
| -		skb_free_frag(sg_vaddr);
 | |
| +		free_pages((unsigned long)sg_vaddr, 0);
 | |
|  		if (dpaa2_sg_is_final(&sgt[i]))
 | |
|  			break;
 | |
|  	}
 | |
|  
 | |
|  free_buf:
 | |
| -	skb_free_frag(vaddr);
 | |
| +	free_pages((unsigned long)vaddr, 0);
 | |
|  }
 | |
|  
 | |
|  /* Build a linear skb based on a single-buffer frame descriptor */
 | |
| -static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
 | |
| -					struct dpaa2_eth_channel *ch,
 | |
| +static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
 | |
|  					const struct dpaa2_fd *fd,
 | |
|  					void *fd_vaddr)
 | |
|  {
 | |
| @@ -133,8 +110,7 @@ static struct sk_buff *build_linear_skb(
 | |
|  
 | |
|  	ch->buf_count--;
 | |
|  
 | |
| -	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
 | |
| -			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 | |
| +	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
 | |
|  	if (unlikely(!skb))
 | |
|  		return NULL;
 | |
|  
 | |
| @@ -169,16 +145,20 @@ static struct sk_buff *build_frag_skb(st
 | |
|  		/* Get the address and length from the S/G entry */
 | |
|  		sg_addr = dpaa2_sg_get_addr(sge);
 | |
|  		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
 | |
| -		dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| -				 DMA_FROM_DEVICE);
 | |
| +		dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +			       DMA_BIDIRECTIONAL);
 | |
|  
 | |
|  		sg_length = dpaa2_sg_get_len(sge);
 | |
|  
 | |
|  		if (i == 0) {
 | |
|  			/* We build the skb around the first data buffer */
 | |
| -			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
 | |
| -				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 | |
| +			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
 | |
|  			if (unlikely(!skb)) {
 | |
| +				/* Free the first SG entry now, since we already
 | |
| +				 * unmapped it and obtained the virtual address
 | |
| +				 */
 | |
| +				free_pages((unsigned long)sg_vaddr, 0);
 | |
| +
 | |
|  				/* We still need to subtract the buffers used
 | |
|  				 * by this FD from our software counter
 | |
|  				 */
 | |
| @@ -213,17 +193,172 @@ static struct sk_buff *build_frag_skb(st
 | |
|  			break;
 | |
|  	}
 | |
|  
 | |
| +	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
 | |
| +
 | |
|  	/* Count all data buffers + SG table buffer */
 | |
|  	ch->buf_count -= i + 2;
 | |
|  
 | |
|  	return skb;
 | |
|  }
 | |
|  
 | |
| +static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
 | |
| +			    struct dpaa2_fd *fd,
 | |
| +			    void *buf_start,
 | |
| +			    u16 queue_id)
 | |
| +{
 | |
| +	struct dpaa2_eth_fq *fq;
 | |
| +	struct rtnl_link_stats64 *percpu_stats;
 | |
| +	struct dpaa2_eth_drv_stats *percpu_extras;
 | |
| +	struct dpaa2_faead *faead;
 | |
| +	u32 ctrl, frc;
 | |
| +	int i, err;
 | |
| +
 | |
| +	/* Mark the egress frame annotation area as valid */
 | |
| +	frc = dpaa2_fd_get_frc(fd);
 | |
| +	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
 | |
| +	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
 | |
| +
 | |
| +	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
 | |
| +	faead = dpaa2_get_faead(buf_start, false);
 | |
| +	faead->ctrl = cpu_to_le32(ctrl);
 | |
| +	faead->conf_fqid = 0;
 | |
| +
 | |
| +	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 | |
| +	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 | |
| +
 | |
| +	fq = &priv->fq[queue_id];
 | |
| +	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
 | |
| +		err = priv->enqueue(priv, fq, fd, 0);
 | |
| +		if (err != -EBUSY)
 | |
| +			break;
 | |
| +	}
 | |
| +
 | |
| +	percpu_extras->tx_portal_busy += i;
 | |
| +	if (unlikely(err)) {
 | |
| +		percpu_stats->tx_errors++;
 | |
| +	} else {
 | |
| +		percpu_stats->tx_packets++;
 | |
| +		percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Free buffers acquired from the buffer pool or which were meant to
 | |
| + * be released in the pool
 | |
| + */
 | |
| +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	void *vaddr;
 | |
| +	int i;
 | |
| +
 | |
| +	for (i = 0; i < count; i++) {
 | |
| +		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
 | |
| +		dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
 | |
| +			       DMA_BIDIRECTIONAL);
 | |
| +		free_pages((unsigned long)vaddr, 0);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static void release_fd_buf(struct dpaa2_eth_priv *priv,
 | |
| +			   struct dpaa2_eth_channel *ch,
 | |
| +			   dma_addr_t addr)
 | |
| +{
 | |
| +	int err;
 | |
| +
 | |
| +	ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
 | |
| +	if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
 | |
| +		return;
 | |
| +
 | |
| +	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
 | |
| +					       ch->rel_buf_array,
 | |
| +					       ch->rel_buf_cnt)) == -EBUSY)
 | |
| +		cpu_relax();
 | |
| +
 | |
| +	if (err)
 | |
| +		free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
 | |
| +
 | |
| +	ch->rel_buf_cnt = 0;
 | |
| +}
 | |
| +
 | |
| +static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
 | |
| +			     struct dpaa2_eth_channel *ch,
 | |
| +			     struct dpaa2_fd *fd,
 | |
| +			     u16 queue_id,
 | |
| +			     void *vaddr)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	dma_addr_t addr = dpaa2_fd_get_addr(fd);
 | |
| +	struct rtnl_link_stats64 *percpu_stats;
 | |
| +	struct bpf_prog *xdp_prog;
 | |
| +	struct xdp_buff xdp;
 | |
| +	u32 xdp_act = XDP_PASS;
 | |
| +
 | |
| +	xdp_prog = READ_ONCE(ch->xdp_prog);
 | |
| +	if (!xdp_prog)
 | |
| +		return xdp_act;
 | |
| +
 | |
| +	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 | |
| +
 | |
| +	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
 | |
| +	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
 | |
| +	/* Allow the XDP program to use the specially reserved headroom */
 | |
| +	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
 | |
| +
 | |
| +	rcu_read_lock();
 | |
| +	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
 | |
| +
 | |
| +	/* xdp.data pointer may have changed */
 | |
| +	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
 | |
| +	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
 | |
| +
 | |
| +	switch (xdp_act) {
 | |
| +	case XDP_PASS:
 | |
| +		break;
 | |
| +	default:
 | |
| +		bpf_warn_invalid_xdp_action(xdp_act);
 | |
| +	case XDP_ABORTED:
 | |
| +	case XDP_DROP:
 | |
| +		/* This is our buffer, so we can release it back to hardware */
 | |
| +		release_fd_buf(priv, ch, addr);
 | |
| +		percpu_stats->rx_dropped++;
 | |
| +		break;
 | |
| +	case XDP_TX:
 | |
| +		if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) {
 | |
| +			dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +					 DMA_BIDIRECTIONAL);
 | |
| +			free_rx_fd(priv, fd, vaddr);
 | |
| +			ch->buf_count--;
 | |
| +		}
 | |
| +		break;
 | |
| +	case XDP_REDIRECT:
 | |
| +		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +				 DMA_BIDIRECTIONAL);
 | |
| +		ch->buf_count--;
 | |
| +		ch->flush = true;
 | |
| +		/* Mark the actual start of the data buffer */
 | |
| +		xdp.data_hard_start = vaddr;
 | |
| +		if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog))
 | |
| +			free_rx_fd(priv, fd, vaddr);
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) {
 | |
| +		percpu_stats->rx_packets++;
 | |
| +		percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 | |
| +	}
 | |
| +
 | |
| +	rcu_read_unlock();
 | |
| +
 | |
| +	return xdp_act;
 | |
| +}
 | |
| +
 | |
|  /* Main Rx frame processing routine */
 | |
|  static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 | |
|  			 struct dpaa2_eth_channel *ch,
 | |
|  			 const struct dpaa2_fd *fd,
 | |
| -			 struct napi_struct *napi)
 | |
| +			 struct dpaa2_eth_fq *fq)
 | |
|  {
 | |
|  	dma_addr_t addr = dpaa2_fd_get_addr(fd);
 | |
|  	u8 fd_format = dpaa2_fd_get_format(fd);
 | |
| @@ -235,14 +370,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
 | |
|  	struct dpaa2_fas *fas;
 | |
|  	void *buf_data;
 | |
|  	u32 status = 0;
 | |
| +	u32 xdp_act;
 | |
|  
 | |
|  	/* Tracing point */
 | |
|  	trace_dpaa2_rx_fd(priv->net_dev, fd);
 | |
|  
 | |
|  	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 | |
| -	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
 | |
| +	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +				DMA_BIDIRECTIONAL);
 | |
|  
 | |
| -	fas = dpaa2_get_fas(vaddr);
 | |
| +	fas = dpaa2_get_fas(vaddr, false);
 | |
|  	prefetch(fas);
 | |
|  	buf_data = vaddr + dpaa2_fd_get_offset(fd);
 | |
|  	prefetch(buf_data);
 | |
| @@ -251,22 +388,43 @@ static void dpaa2_eth_rx(struct dpaa2_et
 | |
|  	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 | |
|  
 | |
|  	if (fd_format == dpaa2_fd_single) {
 | |
| -		skb = build_linear_skb(priv, ch, fd, vaddr);
 | |
| +		xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
 | |
| +					    fq->flowid, vaddr);
 | |
| +		if (xdp_act != XDP_PASS)
 | |
| +			return;
 | |
| +
 | |
| +		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +			       DMA_BIDIRECTIONAL);
 | |
| +		skb = build_linear_skb(ch, fd, vaddr);
 | |
|  	} else if (fd_format == dpaa2_fd_sg) {
 | |
| +		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +				 DMA_BIDIRECTIONAL);
 | |
|  		skb = build_frag_skb(priv, ch, buf_data);
 | |
| -		skb_free_frag(vaddr);
 | |
| +		free_pages((unsigned long)vaddr, 0);
 | |
|  		percpu_extras->rx_sg_frames++;
 | |
|  		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
 | |
|  	} else {
 | |
|  		/* We don't support any other format */
 | |
| -		goto err_frame_format;
 | |
| +		goto drop_cnt;
 | |
|  	}
 | |
|  
 | |
|  	if (unlikely(!skb))
 | |
| -		goto err_build_skb;
 | |
| +		goto drop_fd;
 | |
|  
 | |
|  	prefetch(skb->data);
 | |
|  
 | |
| +	/* Get the timestamp value */
 | |
| +	if (priv->ts_rx_en) {
 | |
| +		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 | |
| +		__le64 *ts = dpaa2_get_ts(vaddr, false);
 | |
| +		u64 ns;
 | |
| +
 | |
| +		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 | |
| +
 | |
| +		ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
 | |
| +		shhwtstamps->hwtstamp = ns_to_ktime(ns);
 | |
| +	}
 | |
| +
 | |
|  	/* Check if we need to validate the L4 csum */
 | |
|  	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
 | |
|  		status = le32_to_cpu(fas->status);
 | |
| @@ -274,30 +432,80 @@ static void dpaa2_eth_rx(struct dpaa2_et
 | |
|  	}
 | |
|  
 | |
|  	skb->protocol = eth_type_trans(skb, priv->net_dev);
 | |
| +	skb_record_rx_queue(skb, fq->flowid);
 | |
|  
 | |
|  	percpu_stats->rx_packets++;
 | |
|  	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 | |
|  
 | |
| -	napi_gro_receive(napi, skb);
 | |
| +	napi_gro_receive(&ch->napi, skb);
 | |
|  
 | |
|  	return;
 | |
|  
 | |
| -err_build_skb:
 | |
| +drop_fd:
 | |
|  	free_rx_fd(priv, fd, vaddr);
 | |
| -err_frame_format:
 | |
| +drop_cnt:
 | |
|  	percpu_stats->rx_dropped++;
 | |
|  }
 | |
|  
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
 | |
| +/* Processing of Rx frames received on the error FQ
 | |
| + * We check and print the error bits and then free the frame
 | |
| + */
 | |
| +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
 | |
| +			     struct dpaa2_eth_channel *ch,
 | |
| +			     const struct dpaa2_fd *fd,
 | |
| +			     struct napi_struct *napi __always_unused,
 | |
| +			     u16 queue_id __always_unused)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	dma_addr_t addr = dpaa2_fd_get_addr(fd);
 | |
| +	void *vaddr;
 | |
| +	struct rtnl_link_stats64 *percpu_stats;
 | |
| +	struct dpaa2_fas *fas;
 | |
| +	u32 status = 0;
 | |
| +	u32 fd_errors;
 | |
| +	bool has_fas_errors = false;
 | |
| +
 | |
| +	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 | |
| +	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
 | |
| +
 | |
| +	/* check frame errors in the FD field */
 | |
| +	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
 | |
| +	if (likely(fd_errors)) {
 | |
| +		has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
 | |
| +				 !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
 | |
| +		if (net_ratelimit())
 | |
| +			netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
 | |
| +				   fd_errors);
 | |
| +	}
 | |
| +
 | |
| +	/* check frame errors in the FAS field */
 | |
| +	if (has_fas_errors) {
 | |
| +		fas = dpaa2_get_fas(vaddr, false);
 | |
| +		status = le32_to_cpu(fas->status);
 | |
| +		if (net_ratelimit())
 | |
| +			netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
 | |
| +				   status & DPAA2_FAS_RX_ERR_MASK);
 | |
| +	}
 | |
| +	free_rx_fd(priv, fd, vaddr);
 | |
| +
 | |
| +	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 | |
| +	percpu_stats->rx_errors++;
 | |
| +	ch->buf_count--;
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
|  /* Consume all frames pull-dequeued into the store. This is the simplest way to
 | |
|   * make sure we don't accidentally issue another volatile dequeue which would
 | |
|   * overwrite (leak) frames already in the store.
 | |
|   *
 | |
|   * Observance of NAPI budget is not our concern, leaving that to the caller.
 | |
|   */
 | |
| -static int consume_frames(struct dpaa2_eth_channel *ch)
 | |
| +static int consume_frames(struct dpaa2_eth_channel *ch,
 | |
| +			  struct dpaa2_eth_fq **src)
 | |
|  {
 | |
|  	struct dpaa2_eth_priv *priv = ch->priv;
 | |
| -	struct dpaa2_eth_fq *fq;
 | |
| +	struct dpaa2_eth_fq *fq = NULL;
 | |
|  	struct dpaa2_dq *dq;
 | |
|  	const struct dpaa2_fd *fd;
 | |
|  	int cleaned = 0;
 | |
| @@ -315,16 +523,51 @@ static int consume_frames(struct dpaa2_e
 | |
|  		}
 | |
|  
 | |
|  		fd = dpaa2_dq_fd(dq);
 | |
| +		prefetch(fd);
 | |
| +
 | |
|  		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
 | |
| -		fq->stats.frames++;
 | |
|  
 | |
| -		fq->consume(priv, ch, fd, &ch->napi);
 | |
| +		fq->consume(priv, ch, fd, fq);
 | |
|  		cleaned++;
 | |
|  	} while (!is_last);
 | |
|  
 | |
| +	if (!cleaned)
 | |
| +		return 0;
 | |
| +
 | |
| +	fq->stats.frames += cleaned;
 | |
| +	ch->stats.frames += cleaned;
 | |
| +
 | |
| +	/* A dequeue operation only pulls frames from a single queue
 | |
| +	 * into the store. Return the frame queue as an out param.
 | |
| +	 */
 | |
| +	if (src)
 | |
| +		*src = fq;
 | |
| +
 | |
|  	return cleaned;
 | |
|  }
 | |
|  
 | |
| +/* Configure the egress frame annotation for timestamp update */
 | |
| +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
 | |
| +{
 | |
| +	struct dpaa2_faead *faead;
 | |
| +	u32 ctrl, frc;
 | |
| +
 | |
| +	/* Mark the egress frame annotation area as valid */
 | |
| +	frc = dpaa2_fd_get_frc(fd);
 | |
| +	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
 | |
| +
 | |
| +	/* Set hardware annotation size */
 | |
| +	ctrl = dpaa2_fd_get_ctrl(fd);
 | |
| +	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
 | |
| +
 | |
| +	/* enable UPD (update prepanded data) bit in FAEAD field of
 | |
| +	 * hardware frame annotation area
 | |
| +	 */
 | |
| +	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
 | |
| +	faead = dpaa2_get_faead(buf_start, true);
 | |
| +	faead->ctrl = cpu_to_le32(ctrl);
 | |
| +}
 | |
| +
 | |
|  /* Create a frame descriptor based on a fragmented skb */
 | |
|  static int build_sg_fd(struct dpaa2_eth_priv *priv,
 | |
|  		       struct sk_buff *skb,
 | |
| @@ -341,7 +584,6 @@ static int build_sg_fd(struct dpaa2_eth_
 | |
|  	int num_sg;
 | |
|  	int num_dma_bufs;
 | |
|  	struct dpaa2_eth_swa *swa;
 | |
| -	struct dpaa2_fas *fas;
 | |
|  
 | |
|  	/* Create and map scatterlist.
 | |
|  	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
 | |
| @@ -365,21 +607,14 @@ static int build_sg_fd(struct dpaa2_eth_
 | |
|  
 | |
|  	/* Prepare the HW SGT structure */
 | |
|  	sgt_buf_size = priv->tx_data_offset +
 | |
| -		       sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
 | |
| -	sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
 | |
| +		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
 | |
| +	sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
 | |
|  	if (unlikely(!sgt_buf)) {
 | |
|  		err = -ENOMEM;
 | |
|  		goto sgt_buf_alloc_failed;
 | |
|  	}
 | |
|  	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
 | |
| -
 | |
| -	/* PTA from egress side is passed as is to the confirmation side so
 | |
| -	 * we need to clear some fields here in order to find consistent values
 | |
| -	 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
 | |
| -	 * field from the hardware annotation area
 | |
| -	 */
 | |
| -	fas = dpaa2_get_fas(sgt_buf);
 | |
| -	memset(fas, 0, DPAA2_FAS_SIZE);
 | |
| +	memset(sgt_buf, 0, sgt_buf_size);
 | |
|  
 | |
|  	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
 | |
|  
 | |
| @@ -402,10 +637,11 @@ static int build_sg_fd(struct dpaa2_eth_
 | |
|  	 * all of them on Tx Conf.
 | |
|  	 */
 | |
|  	swa = (struct dpaa2_eth_swa *)sgt_buf;
 | |
| -	swa->skb = skb;
 | |
| -	swa->scl = scl;
 | |
| -	swa->num_sg = num_sg;
 | |
| -	swa->num_dma_bufs = num_dma_bufs;
 | |
| +	swa->type = DPAA2_ETH_SWA_SG;
 | |
| +	swa->sg.skb = skb;
 | |
| +	swa->sg.scl = scl;
 | |
| +	swa->sg.num_sg = num_sg;
 | |
| +	swa->sg.sgt_size = sgt_buf_size;
 | |
|  
 | |
|  	/* Separately map the SGT buffer */
 | |
|  	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
 | |
| @@ -417,13 +653,15 @@ static int build_sg_fd(struct dpaa2_eth_
 | |
|  	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
 | |
|  	dpaa2_fd_set_addr(fd, addr);
 | |
|  	dpaa2_fd_set_len(fd, skb->len);
 | |
| -	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
 | |
| -			  DPAA2_FD_CTRL_PTV1);
 | |
| +	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 | |
| +
 | |
| +	if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
 | |
| +		enable_tx_tstamp(fd, sgt_buf);
 | |
|  
 | |
|  	return 0;
 | |
|  
 | |
|  dma_map_single_failed:
 | |
| -	kfree(sgt_buf);
 | |
| +	skb_free_frag(sgt_buf);
 | |
|  sgt_buf_alloc_failed:
 | |
|  	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 | |
|  dma_map_sg_failed:
 | |
| @@ -437,29 +675,27 @@ static int build_single_fd(struct dpaa2_
 | |
|  			   struct dpaa2_fd *fd)
 | |
|  {
 | |
|  	struct device *dev = priv->net_dev->dev.parent;
 | |
| -	u8 *buffer_start;
 | |
| -	struct dpaa2_fas *fas;
 | |
| -	struct sk_buff **skbh;
 | |
| +	u8 *buffer_start, *aligned_start;
 | |
| +	struct dpaa2_eth_swa *swa;
 | |
|  	dma_addr_t addr;
 | |
|  
 | |
| -	buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
 | |
| -				 DPAA2_ETH_TX_BUF_ALIGN,
 | |
| -				 DPAA2_ETH_TX_BUF_ALIGN);
 | |
| -
 | |
| -	/* PTA from egress side is passed as is to the confirmation side so
 | |
| -	 * we need to clear some fields here in order to find consistent values
 | |
| -	 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
 | |
| -	 * field from the hardware annotation area
 | |
| +	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
 | |
| +
 | |
| +	/* If there's enough room to align the FD address, do it.
 | |
| +	 * It will help hardware optimize accesses.
 | |
|  	 */
 | |
| -	fas = dpaa2_get_fas(buffer_start);
 | |
| -	memset(fas, 0, DPAA2_FAS_SIZE);
 | |
| +	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
 | |
| +				  DPAA2_ETH_TX_BUF_ALIGN);
 | |
| +	if (aligned_start >= skb->head)
 | |
| +		buffer_start = aligned_start;
 | |
|  
 | |
|  	/* Store a backpointer to the skb at the beginning of the buffer
 | |
|  	 * (in the private data area) such that we can release it
 | |
|  	 * on Tx confirm
 | |
|  	 */
 | |
| -	skbh = (struct sk_buff **)buffer_start;
 | |
| -	*skbh = skb;
 | |
| +	swa = (struct dpaa2_eth_swa *)buffer_start;
 | |
| +	swa->type = DPAA2_ETH_SWA_SINGLE;
 | |
| +	swa->single.skb = skb;
 | |
|  
 | |
|  	addr = dma_map_single(dev, buffer_start,
 | |
|  			      skb_tail_pointer(skb) - buffer_start,
 | |
| @@ -471,8 +707,10 @@ static int build_single_fd(struct dpaa2_
 | |
|  	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
 | |
|  	dpaa2_fd_set_len(fd, skb->len);
 | |
|  	dpaa2_fd_set_format(fd, dpaa2_fd_single);
 | |
| -	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
 | |
| -			  DPAA2_FD_CTRL_PTV1);
 | |
| +	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 | |
| +
 | |
| +	if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
 | |
| +		enable_tx_tstamp(fd, buffer_start);
 | |
|  
 | |
|  	return 0;
 | |
|  }
 | |
| @@ -483,72 +721,75 @@ static int build_single_fd(struct dpaa2_
 | |
|   * back-pointed to is also freed.
 | |
|   * This can be called either from dpaa2_eth_tx_conf() or on the error path of
 | |
|   * dpaa2_eth_tx().
 | |
| - * Optionally, return the frame annotation status word (FAS), which needs
 | |
| - * to be checked if we're on the confirmation path.
 | |
|   */
 | |
|  static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 | |
| -		       const struct dpaa2_fd *fd,
 | |
| -		       u32 *status)
 | |
| +		       const struct dpaa2_fd *fd, bool in_napi)
 | |
|  {
 | |
|  	struct device *dev = priv->net_dev->dev.parent;
 | |
|  	dma_addr_t fd_addr;
 | |
| -	struct sk_buff **skbh, *skb;
 | |
| +	struct sk_buff *skb = NULL;
 | |
|  	unsigned char *buffer_start;
 | |
| -	int unmap_size;
 | |
| -	struct scatterlist *scl;
 | |
| -	int num_sg, num_dma_bufs;
 | |
|  	struct dpaa2_eth_swa *swa;
 | |
|  	u8 fd_format = dpaa2_fd_get_format(fd);
 | |
| -	struct dpaa2_fas *fas;
 | |
|  
 | |
|  	fd_addr = dpaa2_fd_get_addr(fd);
 | |
| -	skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
 | |
| -	fas = dpaa2_get_fas(skbh);
 | |
| +	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
 | |
| +	swa = (struct dpaa2_eth_swa *)buffer_start;
 | |
|  
 | |
|  	if (fd_format == dpaa2_fd_single) {
 | |
| -		skb = *skbh;
 | |
| -		buffer_start = (unsigned char *)skbh;
 | |
| -		/* Accessing the skb buffer is safe before dma unmap, because
 | |
| -		 * we didn't map the actual skb shell.
 | |
| -		 */
 | |
| -		dma_unmap_single(dev, fd_addr,
 | |
| -				 skb_tail_pointer(skb) - buffer_start,
 | |
| -				 DMA_BIDIRECTIONAL);
 | |
| +		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
 | |
| +			skb = swa->single.skb;
 | |
| +			/* Accessing the skb buffer is safe before dma unmap,
 | |
| +			 * because we didn't map the actual skb shell.
 | |
| +			 */
 | |
| +			dma_unmap_single(dev, fd_addr,
 | |
| +					 skb_tail_pointer(skb) - buffer_start,
 | |
| +					 DMA_BIDIRECTIONAL);
 | |
| +		} else {
 | |
| +			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP,
 | |
| +				  "Wrong SWA type");
 | |
| +			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
 | |
| +					 DMA_BIDIRECTIONAL);
 | |
| +		}
 | |
|  	} else if (fd_format == dpaa2_fd_sg) {
 | |
| -		swa = (struct dpaa2_eth_swa *)skbh;
 | |
| -		skb = swa->skb;
 | |
| -		scl = swa->scl;
 | |
| -		num_sg = swa->num_sg;
 | |
| -		num_dma_bufs = swa->num_dma_bufs;
 | |
| +		skb = swa->sg.skb;
 | |
|  
 | |
|  		/* Unmap the scatterlist */
 | |
| -		dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 | |
| -		kfree(scl);
 | |
| +		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
 | |
| +		kfree(swa->sg.scl);
 | |
|  
 | |
|  		/* Unmap the SGT buffer */
 | |
| -		unmap_size = priv->tx_data_offset +
 | |
| -		       sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
 | |
| -		dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
 | |
| +		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
 | |
| +				 DMA_BIDIRECTIONAL);
 | |
|  	} else {
 | |
| -		/* Unsupported format, mark it as errored and give up */
 | |
| -		if (status)
 | |
| -			*status = ~0;
 | |
| +		netdev_dbg(priv->net_dev, "Invalid FD format\n");
 | |
|  		return;
 | |
|  	}
 | |
|  
 | |
| -	/* Read the status from the Frame Annotation after we unmap the first
 | |
| -	 * buffer but before we free it. The caller function is responsible
 | |
| -	 * for checking the status value.
 | |
| -	 */
 | |
| -	if (status)
 | |
| -		*status = le32_to_cpu(fas->status);
 | |
| +	if (swa->type == DPAA2_ETH_SWA_XDP) {
 | |
| +		page_frag_free(buffer_start);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	/* Get the timestamp value */
 | |
| +	if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 | |
| +		struct skb_shared_hwtstamps shhwtstamps;
 | |
| +		__le64 *ts = dpaa2_get_ts(buffer_start, true);
 | |
| +		u64 ns;
 | |
| +
 | |
| +		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 | |
| +
 | |
| +		ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
 | |
| +		shhwtstamps.hwtstamp = ns_to_ktime(ns);
 | |
| +		skb_tstamp_tx(skb, &shhwtstamps);
 | |
| +	}
 | |
|  
 | |
| -	/* Free SGT buffer kmalloc'ed on tx */
 | |
| +	/* Free SGT buffer allocated on tx */
 | |
|  	if (fd_format != dpaa2_fd_single)
 | |
| -		kfree(skbh);
 | |
| +		skb_free_frag(buffer_start);
 | |
|  
 | |
|  	/* Move on with skb release */
 | |
| -	dev_kfree_skb(skb);
 | |
| +	napi_consume_skb(skb, in_napi);
 | |
|  }
 | |
|  
 | |
|  static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 | |
| @@ -558,20 +799,41 @@ static netdev_tx_t dpaa2_eth_tx(struct s
 | |
|  	struct rtnl_link_stats64 *percpu_stats;
 | |
|  	struct dpaa2_eth_drv_stats *percpu_extras;
 | |
|  	struct dpaa2_eth_fq *fq;
 | |
| +	struct netdev_queue *nq;
 | |
|  	u16 queue_mapping;
 | |
| -	int err, i;
 | |
| +	unsigned int needed_headroom;
 | |
| +	u32 fd_len;
 | |
| +	u8 prio;
 | |
| +	int err, i, ch_id = 0;
 | |
| +
 | |
| +	queue_mapping = skb_get_queue_mapping(skb);
 | |
| +	prio = netdev_txq_to_tc(net_dev, queue_mapping);
 | |
| +	/* Hardware interprets priority level 0 as being the highest,
 | |
| +	 * so we need to do a reverse mapping to the netdev tc index
 | |
| +	 */
 | |
| +	if (net_dev->num_tc)
 | |
| +		prio = net_dev->num_tc - prio - 1;
 | |
| +
 | |
| +	queue_mapping %= dpaa2_eth_queue_count(priv);
 | |
| +	fq = &priv->fq[queue_mapping];
 | |
|  
 | |
|  	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 | |
|  	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 | |
|  
 | |
| -	if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
 | |
| +	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
 | |
| +	if (skb_headroom(skb) < needed_headroom) {
 | |
|  		struct sk_buff *ns;
 | |
|  
 | |
| -		ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
 | |
| +		ns = skb_realloc_headroom(skb, needed_headroom);
 | |
|  		if (unlikely(!ns)) {
 | |
|  			percpu_stats->tx_dropped++;
 | |
|  			goto err_alloc_headroom;
 | |
|  		}
 | |
| +		percpu_extras->tx_reallocs++;
 | |
| +
 | |
| +		if (skb->sk)
 | |
| +			skb_set_owner_w(ns, skb->sk);
 | |
| +
 | |
|  		dev_kfree_skb(skb);
 | |
|  		skb = ns;
 | |
|  	}
 | |
| @@ -602,17 +864,24 @@ static netdev_tx_t dpaa2_eth_tx(struct s
 | |
|  		goto err_build_fd;
 | |
|  	}
 | |
|  
 | |
| +	if (dpaa2_eth_ceetm_is_enabled(priv)) {
 | |
| +		err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
 | |
| +		if (err)
 | |
| +			goto err_ceetm_classify;
 | |
| +	}
 | |
| +
 | |
|  	/* Tracing point */
 | |
|  	trace_dpaa2_tx_fd(net_dev, &fd);
 | |
|  
 | |
| -	/* TxConf FQ selection primarily based on cpu affinity; this is
 | |
| -	 * non-migratable context, so it's safe to call smp_processor_id().
 | |
| +	fd_len = dpaa2_fd_get_len(&fd);
 | |
| +	nq = netdev_get_tx_queue(net_dev, queue_mapping);
 | |
| +	netdev_tx_sent_queue(nq, fd_len);
 | |
| +
 | |
| +	/* Everything that happens after this enqueues might race with
 | |
| +	 * the Tx confirmation callback for this frame
 | |
|  	 */
 | |
| -	queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
 | |
| -	fq = &priv->fq[queue_mapping];
 | |
|  	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
 | |
| -		err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
 | |
| -						  fq->tx_qdbin, &fd);
 | |
| +		err = priv->enqueue(priv, fq, &fd, 0);
 | |
|  		if (err != -EBUSY)
 | |
|  			break;
 | |
|  	}
 | |
| @@ -620,14 +889,17 @@ static netdev_tx_t dpaa2_eth_tx(struct s
 | |
|  	if (unlikely(err < 0)) {
 | |
|  		percpu_stats->tx_errors++;
 | |
|  		/* Clean up everything, including freeing the skb */
 | |
| -		free_tx_fd(priv, &fd, NULL);
 | |
| +		free_tx_fd(priv, &fd, false);
 | |
| +		netdev_tx_completed_queue(nq, 1, fd_len);
 | |
|  	} else {
 | |
|  		percpu_stats->tx_packets++;
 | |
| -		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
 | |
| +		percpu_stats->tx_bytes += fd_len;
 | |
|  	}
 | |
|  
 | |
|  	return NETDEV_TX_OK;
 | |
|  
 | |
| +err_ceetm_classify:
 | |
| +	free_tx_fd(priv, &fd, false);
 | |
|  err_build_fd:
 | |
|  err_alloc_headroom:
 | |
|  	dev_kfree_skb(skb);
 | |
| @@ -637,48 +909,39 @@ err_alloc_headroom:
 | |
|  
 | |
|  /* Tx confirmation frame processing routine */
 | |
|  static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 | |
| -			      struct dpaa2_eth_channel *ch,
 | |
| +			      struct dpaa2_eth_channel *ch __always_unused,
 | |
|  			      const struct dpaa2_fd *fd,
 | |
| -			      struct napi_struct *napi __always_unused)
 | |
| +			      struct dpaa2_eth_fq *fq)
 | |
|  {
 | |
|  	struct rtnl_link_stats64 *percpu_stats;
 | |
|  	struct dpaa2_eth_drv_stats *percpu_extras;
 | |
| -	u32 status = 0;
 | |
| +	u32 fd_len = dpaa2_fd_get_len(fd);
 | |
|  	u32 fd_errors;
 | |
| -	bool has_fas_errors = false;
 | |
|  
 | |
|  	/* Tracing point */
 | |
|  	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
 | |
|  
 | |
|  	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 | |
|  	percpu_extras->tx_conf_frames++;
 | |
| -	percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
 | |
| +	percpu_extras->tx_conf_bytes += fd_len;
 | |
| +
 | |
| +	fq->dq_frames++;
 | |
| +	fq->dq_bytes += fd_len;
 | |
|  
 | |
|  	/* Check frame errors in the FD field */
 | |
|  	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
 | |
| -	if (unlikely(fd_errors)) {
 | |
| -		/* We only check error bits in the FAS field if corresponding
 | |
| -		 * FAERR bit is set in FD and the FAS field is marked as valid
 | |
| -		 */
 | |
| -		has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
 | |
| -				 !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
 | |
| -		if (net_ratelimit())
 | |
| -			netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
 | |
| -				   fd_errors);
 | |
| -	}
 | |
| -
 | |
| -	free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
 | |
| +	free_tx_fd(priv, fd, true);
 | |
|  
 | |
|  	if (likely(!fd_errors))
 | |
|  		return;
 | |
|  
 | |
| +	if (net_ratelimit())
 | |
| +		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
 | |
| +			   fd_errors);
 | |
| +
 | |
|  	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 | |
|  	/* Tx-conf logically pertains to the egress path. */
 | |
|  	percpu_stats->tx_errors++;
 | |
| -
 | |
| -	if (has_fas_errors && net_ratelimit())
 | |
| -		netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
 | |
| -			   status & DPAA2_FAS_TX_ERR_MASK);
 | |
|  }
 | |
|  
 | |
|  static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
 | |
| @@ -728,26 +991,29 @@ static int set_tx_csum(struct dpaa2_eth_
 | |
|  /* Perform a single release command to add buffers
 | |
|   * to the specified buffer pool
 | |
|   */
 | |
| -static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
 | |
| +static int add_bufs(struct dpaa2_eth_priv *priv,
 | |
| +		    struct dpaa2_eth_channel *ch, u16 bpid)
 | |
|  {
 | |
|  	struct device *dev = priv->net_dev->dev.parent;
 | |
|  	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
 | |
| -	void *buf;
 | |
| +	struct page *page;
 | |
|  	dma_addr_t addr;
 | |
| -	int i;
 | |
| +	int i, err;
 | |
|  
 | |
|  	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
 | |
|  		/* Allocate buffer visible to WRIOP + skb shared info +
 | |
|  		 * alignment padding
 | |
|  		 */
 | |
| -		buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
 | |
| -		if (unlikely(!buf))
 | |
| +		/* allocate one page for each Rx buffer. WRIOP sees
 | |
| +		 * the entire page except for a tailroom reserved for
 | |
| +		 * skb shared info
 | |
| +		 */
 | |
| +		page = dev_alloc_pages(0);
 | |
| +		if (!page)
 | |
|  			goto err_alloc;
 | |
|  
 | |
| -		buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
 | |
| -
 | |
| -		addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
 | |
| -				      DMA_FROM_DEVICE);
 | |
| +		addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
 | |
| +				    DMA_BIDIRECTIONAL);
 | |
|  		if (unlikely(dma_mapping_error(dev, addr)))
 | |
|  			goto err_map;
 | |
|  
 | |
| @@ -755,28 +1021,33 @@ static int add_bufs(struct dpaa2_eth_pri
 | |
|  
 | |
|  		/* tracing point */
 | |
|  		trace_dpaa2_eth_buf_seed(priv->net_dev,
 | |
| -					 buf, DPAA2_ETH_BUF_RAW_SIZE,
 | |
| +					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
 | |
|  					 addr, DPAA2_ETH_RX_BUF_SIZE,
 | |
|  					 bpid);
 | |
|  	}
 | |
|  
 | |
|  release_bufs:
 | |
| -	/* In case the portal is busy, retry until successful.
 | |
| -	 * The buffer release function would only fail if the QBMan portal
 | |
| -	 * was busy, which implies portal contention (i.e. more CPUs than
 | |
| -	 * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
 | |
| -	 * there is little we can realistically do, short of giving up -
 | |
| -	 * in which case we'd risk depleting the buffer pool and never again
 | |
| -	 * receiving the Rx interrupt which would kick-start the refill logic.
 | |
| -	 * So just keep retrying, at the risk of being moved to ksoftirqd.
 | |
| -	 */
 | |
| -	while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
 | |
| +	/* In case the portal is busy, retry until successful */
 | |
| +	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
 | |
| +					       buf_array, i)) == -EBUSY)
 | |
|  		cpu_relax();
 | |
| +
 | |
| +	/* If release command failed, clean up and bail out;
 | |
| +	 * not much else we can do about it
 | |
| +	 */
 | |
| +	if (err) {
 | |
| +		free_bufs(priv, buf_array, i);
 | |
| +		return 0;
 | |
| +	}
 | |
| +
 | |
|  	return i;
 | |
|  
 | |
|  err_map:
 | |
| -	skb_free_frag(buf);
 | |
| +	__free_pages(page, 0);
 | |
|  err_alloc:
 | |
| +	/* If we managed to allocate at least some buffers,
 | |
| +	 * release them to hardware
 | |
| +	 */
 | |
|  	if (i)
 | |
|  		goto release_bufs;
 | |
|  
 | |
| @@ -796,9 +1067,10 @@ static int seed_pool(struct dpaa2_eth_pr
 | |
|  	 */
 | |
|  	preempt_disable();
 | |
|  	for (j = 0; j < priv->num_channels; j++) {
 | |
| -		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
 | |
| +		priv->channel[j]->buf_count = 0;
 | |
| +		for (i = 0; i < priv->max_bufs_per_ch;
 | |
|  		     i += DPAA2_ETH_BUFS_PER_CMD) {
 | |
| -			new_count = add_bufs(priv, bpid);
 | |
| +			new_count = add_bufs(priv, priv->channel[j], bpid);
 | |
|  			priv->channel[j]->buf_count += new_count;
 | |
|  
 | |
|  			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
 | |
| @@ -818,10 +1090,8 @@ static int seed_pool(struct dpaa2_eth_pr
 | |
|   */
 | |
|  static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
 | |
|  {
 | |
| -	struct device *dev = priv->net_dev->dev.parent;
 | |
|  	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
 | |
| -	void *vaddr;
 | |
| -	int ret, i;
 | |
| +	int ret;
 | |
|  
 | |
|  	do {
 | |
|  		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
 | |
| @@ -830,27 +1100,16 @@ static void drain_bufs(struct dpaa2_eth_
 | |
|  			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
 | |
|  			return;
 | |
|  		}
 | |
| -		for (i = 0; i < ret; i++) {
 | |
| -			/* Same logic as on regular Rx path */
 | |
| -			vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
 | |
| -						   buf_array[i]);
 | |
| -			dma_unmap_single(dev, buf_array[i],
 | |
| -					 DPAA2_ETH_RX_BUF_SIZE,
 | |
| -					 DMA_FROM_DEVICE);
 | |
| -			skb_free_frag(vaddr);
 | |
| -		}
 | |
| +		free_bufs(priv, buf_array, ret);
 | |
|  	} while (ret);
 | |
|  }
 | |
|  
 | |
|  static void drain_pool(struct dpaa2_eth_priv *priv)
 | |
|  {
 | |
| -	int i;
 | |
| -
 | |
| +	preempt_disable();
 | |
|  	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
 | |
|  	drain_bufs(priv, 1);
 | |
| -
 | |
| -	for (i = 0; i < priv->num_channels; i++)
 | |
| -		priv->channel[i]->buf_count = 0;
 | |
| +	preempt_enable();
 | |
|  }
 | |
|  
 | |
|  /* Function is called from softirq context only, so we don't need to guard
 | |
| @@ -862,19 +1121,19 @@ static int refill_pool(struct dpaa2_eth_
 | |
|  {
 | |
|  	int new_count;
 | |
|  
 | |
| -	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
 | |
| +	if (likely(ch->buf_count >= priv->refill_thresh))
 | |
|  		return 0;
 | |
|  
 | |
|  	do {
 | |
| -		new_count = add_bufs(priv, bpid);
 | |
| +		new_count = add_bufs(priv, ch, bpid);
 | |
|  		if (unlikely(!new_count)) {
 | |
|  			/* Out of memory; abort for now, we'll try later on */
 | |
|  			break;
 | |
|  		}
 | |
|  		ch->buf_count += new_count;
 | |
| -	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
 | |
| +	} while (ch->buf_count < priv->max_bufs_per_ch);
 | |
|  
 | |
| -	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
 | |
| +	if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
 | |
|  		return -ENOMEM;
 | |
|  
 | |
|  	return 0;
 | |
| @@ -887,7 +1146,8 @@ static int pull_channel(struct dpaa2_eth
 | |
|  
 | |
|  	/* Retry while portal is busy */
 | |
|  	do {
 | |
| -		err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
 | |
| +		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
 | |
| +						    ch->store);
 | |
|  		dequeues++;
 | |
|  		cpu_relax();
 | |
|  	} while (err == -EBUSY);
 | |
| @@ -908,14 +1168,17 @@ static int pull_channel(struct dpaa2_eth
 | |
|  static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 | |
|  {
 | |
|  	struct dpaa2_eth_channel *ch;
 | |
| -	int cleaned = 0, store_cleaned;
 | |
|  	struct dpaa2_eth_priv *priv;
 | |
| +	int rx_cleaned = 0, txconf_cleaned = 0;
 | |
| +	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
 | |
| +	struct netdev_queue *nq;
 | |
| +	int store_cleaned, work_done;
 | |
|  	int err;
 | |
|  
 | |
|  	ch = container_of(napi, struct dpaa2_eth_channel, napi);
 | |
|  	priv = ch->priv;
 | |
|  
 | |
| -	while (cleaned < budget) {
 | |
| +	do {
 | |
|  		err = pull_channel(ch);
 | |
|  		if (unlikely(err))
 | |
|  			break;
 | |
| @@ -923,29 +1186,56 @@ static int dpaa2_eth_poll(struct napi_st
 | |
|  		/* Refill pool if appropriate */
 | |
|  		refill_pool(priv, ch, priv->bpid);
 | |
|  
 | |
| -		store_cleaned = consume_frames(ch);
 | |
| -		cleaned += store_cleaned;
 | |
| +		store_cleaned = consume_frames(ch, &fq);
 | |
| +		if (!store_cleaned)
 | |
| +			break;
 | |
| +		if (fq->type == DPAA2_RX_FQ) {
 | |
| +			rx_cleaned += store_cleaned;
 | |
| +			/* If these are XDP_REDIRECT frames, flush them now */
 | |
| +			/* TODO: Do we need this? */
 | |
| +			if (ch->flush) {
 | |
| +				xdp_do_flush_map();
 | |
| +				ch->flush = false;
 | |
| +			}
 | |
| +		} else {
 | |
| +			txconf_cleaned += store_cleaned;
 | |
| +			/* We have a single Tx conf FQ on this channel */
 | |
| +			txc_fq = fq;
 | |
| +		}
 | |
|  
 | |
| -		/* If we have enough budget left for a full store,
 | |
| -		 * try a new pull dequeue, otherwise we're done here
 | |
| +		/* If we either consumed the whole NAPI budget with Rx frames
 | |
| +		 * or we reached the Tx confirmations threshold, we're done.
 | |
|  		 */
 | |
| -		if (store_cleaned == 0 ||
 | |
| -		    cleaned > budget - DPAA2_ETH_STORE_SIZE)
 | |
| -			break;
 | |
| -	}
 | |
| +		if (rx_cleaned >= budget ||
 | |
| +		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
 | |
| +			work_done = budget;
 | |
| +			goto out;
 | |
| +		}
 | |
| +	} while (store_cleaned);
 | |
|  
 | |
| -	if (cleaned < budget) {
 | |
| -		napi_complete_done(napi, cleaned);
 | |
| -		/* Re-enable data available notifications */
 | |
| -		do {
 | |
| -			err = dpaa2_io_service_rearm(NULL, &ch->nctx);
 | |
| -			cpu_relax();
 | |
| -		} while (err == -EBUSY);
 | |
| -	}
 | |
| +	/* We didn't consume the entire budget, so finish napi and
 | |
| +	 * re-enable data availability notifications
 | |
| +	 */
 | |
| +	napi_complete_done(napi, rx_cleaned);
 | |
| +	do {
 | |
| +		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
 | |
| +		cpu_relax();
 | |
| +	} while (err == -EBUSY);
 | |
| +	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
 | |
| +		  ch->nctx.desired_cpu);
 | |
|  
 | |
| -	ch->stats.frames += cleaned;
 | |
| +	work_done = max(rx_cleaned, 1);
 | |
|  
 | |
| -	return cleaned;
 | |
| +out:
 | |
| +	if (txc_fq) {
 | |
| +		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
 | |
| +		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
 | |
| +					  txc_fq->dq_bytes);
 | |
| +		txc_fq->dq_frames = 0;
 | |
| +		txc_fq->dq_bytes = 0;
 | |
| +	}
 | |
| +
 | |
| +	return work_done;
 | |
|  }
 | |
|  
 | |
|  static void enable_ch_napi(struct dpaa2_eth_priv *priv)
 | |
| @@ -970,9 +1260,23 @@ static void disable_ch_napi(struct dpaa2
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| +static void update_tx_fqids(struct dpaa2_eth_priv *priv);
 | |
| +
 | |
| +static void update_pf(struct dpaa2_eth_priv *priv,
 | |
| +		      struct dpni_link_state *state)
 | |
| +{
 | |
| +	bool pause_frames;
 | |
| +
 | |
| +	pause_frames = !!(state->options & DPNI_LINK_OPT_PAUSE);
 | |
| +	if (priv->tx_pause_frames != pause_frames) {
 | |
| +		priv->tx_pause_frames = pause_frames;
 | |
| +		set_rx_taildrop(priv);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
|  static int link_state_update(struct dpaa2_eth_priv *priv)
 | |
|  {
 | |
| -	struct dpni_link_state state;
 | |
| +	struct dpni_link_state state = {0};
 | |
|  	int err;
 | |
|  
 | |
|  	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| @@ -988,6 +1292,8 @@ static int link_state_update(struct dpaa
 | |
|  
 | |
|  	priv->link_state = state;
 | |
|  	if (state.up) {
 | |
| +		update_tx_fqids(priv);
 | |
| +		update_pf(priv, &state);
 | |
|  		netif_carrier_on(priv->net_dev);
 | |
|  		netif_tx_start_all_queues(priv->net_dev);
 | |
|  	} else {
 | |
| @@ -1006,28 +1312,30 @@ static int dpaa2_eth_open(struct net_dev
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
|  	int err;
 | |
|  
 | |
| -	err = seed_pool(priv, priv->bpid);
 | |
| -	if (err) {
 | |
| -		/* Not much to do; the buffer pool, though not filled up,
 | |
| -		 * may still contain some buffers which would enable us
 | |
| -		 * to limp on.
 | |
| -		 */
 | |
| -		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
 | |
| -			   priv->dpbp_dev->obj_desc.id, priv->bpid);
 | |
| -	}
 | |
| -
 | |
|  	/* We'll only start the txqs when the link is actually ready; make sure
 | |
|  	 * we don't race against the link up notification, which may come
 | |
|  	 * immediately after dpni_enable();
 | |
|  	 */
 | |
|  	netif_tx_stop_all_queues(net_dev);
 | |
| -	enable_ch_napi(priv);
 | |
| +
 | |
|  	/* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
 | |
|  	 * return true and cause 'ip link show' to report the LOWER_UP flag,
 | |
|  	 * even though the link notification wasn't even received.
 | |
|  	 */
 | |
|  	netif_carrier_off(net_dev);
 | |
|  
 | |
| +	err = seed_pool(priv, priv->bpid);
 | |
| +	if (err) {
 | |
| +		/* Not much to do; the buffer pool, though not filled up,
 | |
| +		 * may still contain some buffers which would enable us
 | |
| +		 * to limp on.
 | |
| +		 */
 | |
| +		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
 | |
| +			   priv->dpbp_dev->obj_desc.id, priv->bpid);
 | |
| +	}
 | |
| +
 | |
| +	priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
 | |
| +
 | |
|  	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
 | |
|  	if (err < 0) {
 | |
|  		netdev_err(net_dev, "dpni_enable() failed\n");
 | |
| @@ -1047,48 +1355,17 @@ static int dpaa2_eth_open(struct net_dev
 | |
|  
 | |
|  link_state_err:
 | |
|  enable_err:
 | |
| -	disable_ch_napi(priv);
 | |
| +	priv->refill_thresh = 0;
 | |
|  	drain_pool(priv);
 | |
|  	return err;
 | |
|  }
 | |
|  
 | |
| -/* The DPIO store must be empty when we call this,
 | |
| - * at the end of every NAPI cycle.
 | |
| - */
 | |
| -static u32 drain_channel(struct dpaa2_eth_priv *priv,
 | |
| -			 struct dpaa2_eth_channel *ch)
 | |
| -{
 | |
| -	u32 drained = 0, total = 0;
 | |
| -
 | |
| -	do {
 | |
| -		pull_channel(ch);
 | |
| -		drained = consume_frames(ch);
 | |
| -		total += drained;
 | |
| -	} while (drained);
 | |
| -
 | |
| -	return total;
 | |
| -}
 | |
| -
 | |
| -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
 | |
| -{
 | |
| -	struct dpaa2_eth_channel *ch;
 | |
| -	int i;
 | |
| -	u32 drained = 0;
 | |
| -
 | |
| -	for (i = 0; i < priv->num_channels; i++) {
 | |
| -		ch = priv->channel[i];
 | |
| -		drained += drain_channel(priv, ch);
 | |
| -	}
 | |
| -
 | |
| -	return drained;
 | |
| -}
 | |
| -
 | |
|  static int dpaa2_eth_stop(struct net_device *net_dev)
 | |
|  {
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| -	int dpni_enabled;
 | |
| -	int retries = 10;
 | |
| -	u32 drained;
 | |
| +	int dpni_enabled = 0;
 | |
| +	int retries = 10, i;
 | |
| +	int err = 0;
 | |
|  
 | |
|  	netif_tx_stop_all_queues(net_dev);
 | |
|  	netif_carrier_off(net_dev);
 | |
| @@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
 | |
|  	} while (dpni_enabled && --retries);
 | |
|  	if (!retries) {
 | |
|  		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
 | |
| -		/* Must go on and disable NAPI nonetheless, so we don't crash at
 | |
| -		 * the next "ifconfig up"
 | |
| +		/* Must go on and finish processing pending frames, so we don't
 | |
| +		 * crash at the next "ifconfig up"
 | |
|  		 */
 | |
| +		err = -ETIMEDOUT;
 | |
|  	}
 | |
|  
 | |
| -	/* Wait for NAPI to complete on every core and disable it.
 | |
| -	 * In particular, this will also prevent NAPI from being rescheduled if
 | |
| -	 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
 | |
| -	 * don't even need to disarm the channels, except perhaps for the case
 | |
| -	 * of a huge coalescing value.
 | |
| -	 */
 | |
| -	disable_ch_napi(priv);
 | |
| +	priv->refill_thresh = 0;
 | |
|  
 | |
| -	 /* Manually drain the Rx and TxConf queues */
 | |
| -	drained = drain_ingress_frames(priv);
 | |
| -	if (drained)
 | |
| -		netdev_dbg(net_dev, "Drained %d frames.\n", drained);
 | |
| +	/* Wait for all running napi poll routines to finish, so that no
 | |
| +	 * new refill operations are started
 | |
| +	 */
 | |
| +	for (i = 0; i < priv->num_channels; i++)
 | |
| +		napi_synchronize(&priv->channel[i]->napi);
 | |
|  
 | |
|  	/* Empty the buffer pool */
 | |
|  	drain_pool(priv);
 | |
|  
 | |
| -	return 0;
 | |
| -}
 | |
| -
 | |
| -static int dpaa2_eth_init(struct net_device *net_dev)
 | |
| -{
 | |
| -	u64 supported = 0;
 | |
| -	u64 not_supported = 0;
 | |
| -	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| -	u32 options = priv->dpni_attrs.options;
 | |
| -
 | |
| -	/* Capabilities listing */
 | |
| -	supported |= IFF_LIVE_ADDR_CHANGE;
 | |
| -
 | |
| -	if (options & DPNI_OPT_NO_MAC_FILTER)
 | |
| -		not_supported |= IFF_UNICAST_FLT;
 | |
| -	else
 | |
| -		supported |= IFF_UNICAST_FLT;
 | |
| -
 | |
| -	net_dev->priv_flags |= supported;
 | |
| -	net_dev->priv_flags &= ~not_supported;
 | |
| -
 | |
| -	/* Features */
 | |
| -	net_dev->features = NETIF_F_RXCSUM |
 | |
| -			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 | |
| -			    NETIF_F_SG | NETIF_F_HIGHDMA |
 | |
| -			    NETIF_F_LLTX;
 | |
| -	net_dev->hw_features = net_dev->features;
 | |
| -
 | |
| -	return 0;
 | |
| +	return err;
 | |
|  }
 | |
|  
 | |
|  static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
 | |
| @@ -1200,25 +1445,6 @@ static void dpaa2_eth_get_stats(struct n
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| -static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
 | |
| -{
 | |
| -	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| -	int err;
 | |
| -
 | |
| -	/* Set the maximum Rx frame length to match the transmit side;
 | |
| -	 * account for L2 headers when computing the MFL
 | |
| -	 */
 | |
| -	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
 | |
| -					(u16)DPAA2_ETH_L2_MAX_FRM(mtu));
 | |
| -	if (err) {
 | |
| -		netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
 | |
| -		return err;
 | |
| -	}
 | |
| -
 | |
| -	net_dev->mtu = mtu;
 | |
| -	return 0;
 | |
| -}
 | |
| -
 | |
|  /* Copy mac unicast addresses from @net_dev to @priv.
 | |
|   * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
 | |
|   */
 | |
| @@ -1380,16 +1606,430 @@ static int dpaa2_eth_set_features(struct
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(dev);
 | |
| +	struct hwtstamp_config config;
 | |
| +
 | |
| +	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
 | |
| +		return -EFAULT;
 | |
| +
 | |
| +	switch (config.tx_type) {
 | |
| +	case HWTSTAMP_TX_OFF:
 | |
| +		priv->ts_tx_en = false;
 | |
| +		break;
 | |
| +	case HWTSTAMP_TX_ON:
 | |
| +		priv->ts_tx_en = true;
 | |
| +		break;
 | |
| +	default:
 | |
| +		return -ERANGE;
 | |
| +	}
 | |
| +
 | |
| +	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
 | |
| +		priv->ts_rx_en = false;
 | |
| +	} else {
 | |
| +		priv->ts_rx_en = true;
 | |
| +		/* TS is set for all frame types, not only those requested */
 | |
| +		config.rx_filter = HWTSTAMP_FILTER_ALL;
 | |
| +	}
 | |
| +
 | |
| +	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
 | |
| +			-EFAULT : 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 | |
| +{
 | |
| +	if (cmd == SIOCSHWTSTAMP)
 | |
| +		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
 | |
| +
 | |
| +	return -EINVAL;
 | |
| +}
 | |
| +
 | |
| +static int set_buffer_layout(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpni_buffer_layout buf_layout = {0};
 | |
| +	u16 rx_buf_align;
 | |
| +	int err;
 | |
| +
 | |
| +	/* We need to check for WRIOP version 1.0.0, but depending on the MC
 | |
| +	 * version, this number is not always provided correctly on rev1.
 | |
| +	 * We need to check for both alternatives in this situation.
 | |
| +	 */
 | |
| +	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
 | |
| +	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
 | |
| +		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
 | |
| +	else
 | |
| +		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
 | |
| +
 | |
| +	/* tx buffer */
 | |
| +	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
 | |
| +	buf_layout.pass_timestamp = true;
 | |
| +	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
 | |
| +			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
 | |
| +	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
 | |
| +				     DPNI_QUEUE_TX, &buf_layout);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	/* tx-confirm buffer */
 | |
| +	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
 | |
| +	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
 | |
| +				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	/* Now that we've set our tx buffer layout, retrieve the minimum
 | |
| +	 * required tx data offset.
 | |
| +	 */
 | |
| +	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
 | |
| +				      &priv->tx_data_offset);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	if ((priv->tx_data_offset % 64) != 0)
 | |
| +		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
 | |
| +			 priv->tx_data_offset);
 | |
| +
 | |
| +	/* rx buffer */
 | |
| +	buf_layout.pass_frame_status = true;
 | |
| +	buf_layout.pass_parser_result = true;
 | |
| +	buf_layout.data_align = rx_buf_align;
 | |
| +	buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
 | |
| +	buf_layout.private_data_size = 0;
 | |
| +	/* If XDP program is attached, reserve extra space for
 | |
| +	 * potential header expansions
 | |
| +	 */
 | |
| +	if (priv->has_xdp_prog)
 | |
| +		buf_layout.data_head_room += XDP_PACKET_HEADROOM;
 | |
| +	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
 | |
| +			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
 | |
| +			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
 | |
| +			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
 | |
| +			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
 | |
| +	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
 | |
| +				     DPNI_QUEUE_RX, &buf_layout);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +#define DPNI_ENQUEUE_FQID_VER_MAJOR	7
 | |
| +#define DPNI_ENQUEUE_FQID_VER_MINOR	9
 | |
| +
 | |
| +static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
 | |
| +				       struct dpaa2_eth_fq *fq,
 | |
| +				       struct dpaa2_fd *fd, u8 prio)
 | |
| +{
 | |
| +	return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
 | |
| +					   priv->tx_qdid, prio,
 | |
| +					   fq->tx_qdbin, fd);
 | |
| +}
 | |
| +
 | |
| +static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
 | |
| +				       struct dpaa2_eth_fq *fq,
 | |
| +				       struct dpaa2_fd *fd,
 | |
| +				       u8 prio __always_unused)
 | |
| +{
 | |
| +	return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
 | |
| +					   fq->tx_fqid, fd);
 | |
| +}
 | |
| +
 | |
| +static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
 | |
| +				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
 | |
| +		priv->enqueue = dpaa2_eth_enqueue_qd;
 | |
| +	else
 | |
| +		priv->enqueue = dpaa2_eth_enqueue_fq;
 | |
| +}
 | |
| +
 | |
| +static void update_tx_fqids(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	struct dpaa2_eth_fq *fq;
 | |
| +	struct dpni_queue queue;
 | |
| +	struct dpni_queue_id qid = {0};
 | |
| +	int i, err;
 | |
| +
 | |
| +	/* We only use Tx FQIDs for FQID-based enqueue, so check
 | |
| +	 * if DPNI version supports it before updating FQIDs
 | |
| +	 */
 | |
| +	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
 | |
| +				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
 | |
| +		return;
 | |
| +
 | |
| +	for (i = 0; i < priv->num_fqs; i++) {
 | |
| +		fq = &priv->fq[i];
 | |
| +		if (fq->type != DPAA2_TX_CONF_FQ)
 | |
| +			continue;
 | |
| +		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
 | |
| +				     DPNI_QUEUE_TX, 0, fq->flowid,
 | |
| +				     &queue, &qid);
 | |
| +		if (err)
 | |
| +			goto out_err;
 | |
| +
 | |
| +		fq->tx_fqid = qid.fqid;
 | |
| +		if (fq->tx_fqid == 0)
 | |
| +			goto out_err;
 | |
| +	}
 | |
| +
 | |
| +	return;
 | |
| +
 | |
| +out_err:
 | |
| +	netdev_info(priv->net_dev,
 | |
| +		    "Error reading Tx FQID, fallback to QDID-based enqueue");
 | |
| +	priv->enqueue = dpaa2_eth_enqueue_qd;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpaa2_eth_channel *ch;
 | |
| +	struct bpf_prog *old_prog = NULL;
 | |
| +	int i, err;
 | |
| +
 | |
| +	/* No support for SG frames */
 | |
| +	if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	if (netif_running(net_dev)) {
 | |
| +		err = dpaa2_eth_stop(net_dev);
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +	}
 | |
| +
 | |
| +	if (prog) {
 | |
| +		prog = bpf_prog_add(prog, priv->num_channels - 1);
 | |
| +		if (IS_ERR(prog))
 | |
| +			return PTR_ERR(prog);
 | |
| +	}
 | |
| +
 | |
| +	priv->has_xdp_prog = !!prog;
 | |
| +
 | |
| +	for (i = 0; i < priv->num_channels; i++) {
 | |
| +		ch = priv->channel[i];
 | |
| +		old_prog = xchg(&ch->xdp_prog, prog);
 | |
| +		if (old_prog)
 | |
| +			bpf_prog_put(old_prog);
 | |
| +	}
 | |
| +
 | |
| +	/* When turning XDP on/off we need to do some reconfiguring
 | |
| +	 * of the Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
 | |
| +	 * so we are sure no old format buffers will be used from now on
 | |
| +	 */
 | |
| +	if (priv->has_xdp_prog != !!old_prog)
 | |
| +		set_buffer_layout(priv);
 | |
| +
 | |
| +	if (netif_running(net_dev)) {
 | |
| +		err = dpaa2_eth_open(net_dev);
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(dev);
 | |
| +
 | |
| +	switch (xdp->command) {
 | |
| +	case XDP_SETUP_PROG:
 | |
| +		return dpaa2_eth_set_xdp(dev, xdp->prog);
 | |
| +	case XDP_QUERY_PROG:
 | |
| +		xdp->prog_attached = priv->has_xdp_prog;
 | |
| +		return 0;
 | |
| +	default:
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, struct xdp_buff *xdp)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct device *dev = net_dev->dev.parent;
 | |
| +	struct rtnl_link_stats64 *percpu_stats;
 | |
| +	struct dpaa2_eth_drv_stats *percpu_extras;
 | |
| +	unsigned int needed_headroom;
 | |
| +	struct dpaa2_eth_swa *swa;
 | |
| +	struct dpaa2_eth_fq *fq;
 | |
| +	struct dpaa2_fd fd;
 | |
| +	void *buffer_start, *aligned_start;
 | |
| +	dma_addr_t addr;
 | |
| +	int err, i;
 | |
| +
 | |
| +	if (!netif_running(net_dev))
 | |
| +		return -ENETDOWN;
 | |
| +
 | |
| +	/* We require a minimum headroom to be able to transmit the frame.
 | |
| +	 * Otherwise return an error and let the original net_device handle it
 | |
| +	 */
 | |
| +	/* TODO: Do we update i/f counters here or just on the Rx device? */
 | |
| +	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
 | |
| +	if (xdp->data < xdp->data_hard_start ||
 | |
| +	    xdp->data - xdp->data_hard_start < needed_headroom) {
 | |
| +		percpu_stats->tx_dropped++;
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 | |
| +	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 | |
| +
 | |
| +	/* Setup the FD fields */
 | |
| +	memset(&fd, 0, sizeof(fd));
 | |
| +
 | |
| +	/* Align FD address, if possible */
 | |
| +	buffer_start = xdp->data - needed_headroom;
 | |
| +	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
 | |
| +				  DPAA2_ETH_TX_BUF_ALIGN);
 | |
| +	if (aligned_start >= xdp->data_hard_start)
 | |
| +		buffer_start = aligned_start;
 | |
| +
 | |
| +	swa = (struct dpaa2_eth_swa *)buffer_start;
 | |
| +	/* fill in necessary fields here */
 | |
| +	swa->type = DPAA2_ETH_SWA_XDP;
 | |
| +	swa->xdp.dma_size = xdp->data_end - buffer_start;
 | |
| +
 | |
| +	addr = dma_map_single(dev, buffer_start,
 | |
| +			      xdp->data_end - buffer_start,
 | |
| +			      DMA_BIDIRECTIONAL);
 | |
| +	if (unlikely(dma_mapping_error(dev, addr))) {
 | |
| +		percpu_stats->tx_dropped++;
 | |
| +		return -ENOMEM;
 | |
| +	}
 | |
| +
 | |
| +	dpaa2_fd_set_addr(&fd, addr);
 | |
| +	dpaa2_fd_set_offset(&fd, xdp->data - buffer_start);
 | |
| +	dpaa2_fd_set_len(&fd, xdp->data_end - xdp->data);
 | |
| +	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
 | |
| +	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
 | |
| +
 | |
| +	fq = &priv->fq[smp_processor_id()];
 | |
| +	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
 | |
| +		err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
 | |
| +						  fq->tx_qdbin, &fd);
 | |
| +		if (err != -EBUSY)
 | |
| +			break;
 | |
| +	}
 | |
| +	percpu_extras->tx_portal_busy += i;
 | |
| +	if (unlikely(err < 0)) {
 | |
| +		percpu_stats->tx_errors++;
 | |
| +		/* let the Rx device handle the cleanup */
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	percpu_stats->tx_packets++;
 | |
| +	percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_eth_xdp_flush(struct net_device *net_dev)
 | |
| +{
 | |
| +	/* We don't have hardware support for Tx batching,
 | |
| +	 * so we do the actual frame enqueue in ndo_xdp_xmit
 | |
| +	 */
 | |
| +}
 | |
| +static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	struct net_device *net_dev = priv->net_dev;
 | |
| +	unsigned int i, num_queues;
 | |
| +	struct cpumask xps_mask;
 | |
| +	struct dpaa2_eth_fq *fq;
 | |
| +	int err = 0;
 | |
| +
 | |
| +	num_queues = (net_dev->num_tc ? : 1) * dpaa2_eth_queue_count(priv);
 | |
| +	for (i = 0; i < num_queues; i++) {
 | |
| +		fq = &priv->fq[i % dpaa2_eth_queue_count(priv)];
 | |
| +		cpumask_clear(&xps_mask);
 | |
| +		cpumask_set_cpu(fq->target_cpu, &xps_mask);
 | |
| +		err = netif_set_xps_queue(net_dev, &xps_mask, i);
 | |
| +		if (err) {
 | |
| +			dev_info_once(net_dev->dev.parent,
 | |
| +				      "Error setting XPS queue\n");
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_setup_tc(struct net_device *net_dev,
 | |
| +			      enum tc_setup_type type,
 | |
| +			      void *type_data)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct tc_mqprio_qopt *mqprio = (struct tc_mqprio_qopt *)type_data;
 | |
| +	int i, err = 0;
 | |
| +
 | |
| +	if (type != TC_SETUP_MQPRIO)
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	if (mqprio->num_tc > dpaa2_eth_tc_count(priv)) {
 | |
| +		netdev_err(net_dev, "Max %d traffic classes supported\n",
 | |
| +			   dpaa2_eth_tc_count(priv));
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (mqprio->num_tc == net_dev->num_tc)
 | |
| +		return 0;
 | |
| +
 | |
| +	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
 | |
| +
 | |
| +	if (!mqprio->num_tc) {
 | |
| +		netdev_reset_tc(net_dev);
 | |
| +		err = netif_set_real_num_tx_queues(net_dev,
 | |
| +						   dpaa2_eth_queue_count(priv));
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +
 | |
| +		goto update_xps;
 | |
| +	}
 | |
| +
 | |
| +	err = netdev_set_num_tc(net_dev, mqprio->num_tc);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	err = netif_set_real_num_tx_queues(net_dev, mqprio->num_tc *
 | |
| +					   dpaa2_eth_queue_count(priv));
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	for (i = 0; i < mqprio->num_tc; i++) {
 | |
| +		err = netdev_set_tc_queue(net_dev, i,
 | |
| +					  dpaa2_eth_queue_count(priv),
 | |
| +					  i * dpaa2_eth_queue_count(priv));
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +	}
 | |
| +
 | |
| +update_xps:
 | |
| +	err = dpaa2_eth_update_xps(priv);
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
|  static const struct net_device_ops dpaa2_eth_ops = {
 | |
|  	.ndo_open = dpaa2_eth_open,
 | |
|  	.ndo_start_xmit = dpaa2_eth_tx,
 | |
|  	.ndo_stop = dpaa2_eth_stop,
 | |
| -	.ndo_init = dpaa2_eth_init,
 | |
|  	.ndo_set_mac_address = dpaa2_eth_set_addr,
 | |
|  	.ndo_get_stats64 = dpaa2_eth_get_stats,
 | |
| -	.ndo_change_mtu = dpaa2_eth_change_mtu,
 | |
|  	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
 | |
|  	.ndo_set_features = dpaa2_eth_set_features,
 | |
| +	.ndo_do_ioctl = dpaa2_eth_ioctl,
 | |
| +	.ndo_xdp = dpaa2_eth_xdp,
 | |
| +	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
 | |
| +	.ndo_xdp_flush = dpaa2_eth_xdp_flush,
 | |
| +	.ndo_setup_tc = dpaa2_eth_setup_tc,
 | |
|  };
 | |
|  
 | |
|  static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
 | |
| @@ -1422,34 +2062,32 @@ static struct fsl_mc_device *setup_dpcon
 | |
|  	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpcon_open() failed\n");
 | |
| -		goto err_open;
 | |
| +		goto free;
 | |
|  	}
 | |
|  
 | |
|  	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpcon_reset() failed\n");
 | |
| -		goto err_reset;
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
|  	err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpcon_get_attributes() failed\n");
 | |
| -		goto err_get_attr;
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
|  	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpcon_enable() failed\n");
 | |
| -		goto err_enable;
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
|  	return dpcon;
 | |
|  
 | |
| -err_enable:
 | |
| -err_get_attr:
 | |
| -err_reset:
 | |
| +close:
 | |
|  	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
 | |
| -err_open:
 | |
| +free:
 | |
|  	fsl_mc_object_free(dpcon);
 | |
|  
 | |
|  	return NULL;
 | |
| @@ -1502,7 +2140,14 @@ err_setup:
 | |
|  static void free_channel(struct dpaa2_eth_priv *priv,
 | |
|  			 struct dpaa2_eth_channel *channel)
 | |
|  {
 | |
| +	struct bpf_prog *prog;
 | |
| +
 | |
|  	free_dpcon(priv, channel->dpcon);
 | |
| +
 | |
| +	prog = READ_ONCE(channel->xdp_prog);
 | |
| +	if (prog)
 | |
| +		bpf_prog_put(prog);
 | |
| +
 | |
|  	kfree(channel);
 | |
|  }
 | |
|  
 | |
| @@ -1546,7 +2191,8 @@ static int setup_dpio(struct dpaa2_eth_p
 | |
|  		nctx->desired_cpu = i;
 | |
|  
 | |
|  		/* Register the new context */
 | |
| -		err = dpaa2_io_service_register(NULL, nctx);
 | |
| +		channel->dpio = dpaa2_io_service_select(i);
 | |
| +		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
 | |
|  		if (err) {
 | |
|  			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
 | |
|  			/* If no affine DPIO for this core, there's probably
 | |
| @@ -1579,14 +2225,14 @@ static int setup_dpio(struct dpaa2_eth_p
 | |
|  		/* Stop if we already have enough channels to accommodate all
 | |
|  		 * RX and TX conf queues
 | |
|  		 */
 | |
| -		if (priv->num_channels == dpaa2_eth_queue_count(priv))
 | |
| +		if (priv->num_channels == priv->dpni_attrs.num_queues)
 | |
|  			break;
 | |
|  	}
 | |
|  
 | |
|  	return 0;
 | |
|  
 | |
|  err_set_cdan:
 | |
| -	dpaa2_io_service_deregister(NULL, nctx);
 | |
| +	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
 | |
|  err_service_reg:
 | |
|  	free_channel(priv, channel);
 | |
|  err_alloc_ch:
 | |
| @@ -1603,13 +2249,14 @@ err_alloc_ch:
 | |
|  
 | |
|  static void free_dpio(struct dpaa2_eth_priv *priv)
 | |
|  {
 | |
| -	int i;
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
|  	struct dpaa2_eth_channel *ch;
 | |
| +	int i;
 | |
|  
 | |
|  	/* deregister CDAN notifications and free channels */
 | |
|  	for (i = 0; i < priv->num_channels; i++) {
 | |
|  		ch = priv->channel[i];
 | |
| -		dpaa2_io_service_deregister(NULL, &ch->nctx);
 | |
| +		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
 | |
|  		free_channel(priv, ch);
 | |
|  	}
 | |
|  }
 | |
| @@ -1636,8 +2283,7 @@ static void set_fq_affinity(struct dpaa2
 | |
|  {
 | |
|  	struct device *dev = priv->net_dev->dev.parent;
 | |
|  	struct dpaa2_eth_fq *fq;
 | |
| -	int rx_cpu, txc_cpu;
 | |
| -	int i;
 | |
| +	int rx_cpu, txc_cpu, i;
 | |
|  
 | |
|  	/* For each FQ, pick one channel/CPU to deliver frames to.
 | |
|  	 * This may well change at runtime, either through irqbalance or
 | |
| @@ -1649,6 +2295,7 @@ static void set_fq_affinity(struct dpaa2
 | |
|  		fq = &priv->fq[i];
 | |
|  		switch (fq->type) {
 | |
|  		case DPAA2_RX_FQ:
 | |
| +		case DPAA2_RX_ERR_FQ:
 | |
|  			fq->target_cpu = rx_cpu;
 | |
|  			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
 | |
|  			if (rx_cpu >= nr_cpu_ids)
 | |
| @@ -1665,11 +2312,13 @@ static void set_fq_affinity(struct dpaa2
 | |
|  		}
 | |
|  		fq->channel = get_affine_channel(priv, fq->target_cpu);
 | |
|  	}
 | |
| +
 | |
| +	dpaa2_eth_update_xps(priv);
 | |
|  }
 | |
|  
 | |
|  static void setup_fqs(struct dpaa2_eth_priv *priv)
 | |
|  {
 | |
| -	int i;
 | |
| +	int i, j;
 | |
|  
 | |
|  	/* We have one TxConf FQ per Tx flow.
 | |
|  	 * The number of Tx and Rx queues is the same.
 | |
| @@ -1681,11 +2330,19 @@ static void setup_fqs(struct dpaa2_eth_p
 | |
|  		priv->fq[priv->num_fqs++].flowid = (u16)i;
 | |
|  	}
 | |
|  
 | |
| -	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
 | |
| -		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
 | |
| -		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
 | |
| -		priv->fq[priv->num_fqs++].flowid = (u16)i;
 | |
| -	}
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
 | |
| +		for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
 | |
| +			priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
 | |
| +			priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
 | |
| +			priv->fq[priv->num_fqs].tc = (u8)i;
 | |
| +			priv->fq[priv->num_fqs++].flowid = (u16)j;
 | |
| +		}
 | |
| +
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
 | |
| +	/* We have exactly one Rx error queue per DPNI */
 | |
| +	priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
 | |
| +	priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
 | |
| +#endif
 | |
|  
 | |
|  	/* For each FQ, decide on which core to process incoming frames */
 | |
|  	set_fq_affinity(priv);
 | |
| @@ -1735,6 +2392,9 @@ static int setup_dpbp(struct dpaa2_eth_p
 | |
|  	}
 | |
|  	priv->bpid = dpbp_attrs.bpid;
 | |
|  
 | |
| +	/* By default we start with flow control enabled */
 | |
| +	priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
 | |
| +
 | |
|  	return 0;
 | |
|  
 | |
|  err_get_attr:
 | |
| @@ -1762,7 +2422,7 @@ static int setup_dpni(struct fsl_mc_devi
 | |
|  	struct device *dev = &ls_dev->dev;
 | |
|  	struct dpaa2_eth_priv *priv;
 | |
|  	struct net_device *net_dev;
 | |
| -	struct dpni_buffer_layout buf_layout = {0};
 | |
| +	struct dpni_link_cfg cfg = {0};
 | |
|  	int err;
 | |
|  
 | |
|  	net_dev = dev_get_drvdata(dev);
 | |
| @@ -1772,7 +2432,22 @@ static int setup_dpni(struct fsl_mc_devi
 | |
|  	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpni_open() failed\n");
 | |
| -		goto err_open;
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	/* Check if we can work with this DPNI object */
 | |
| +	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
 | |
| +				   &priv->dpni_ver_minor);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_get_api_version() failed\n");
 | |
| +		goto close;
 | |
| +	}
 | |
| +	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
 | |
| +		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
 | |
| +			priv->dpni_ver_major, priv->dpni_ver_minor,
 | |
| +			DPNI_VER_MAJOR, DPNI_VER_MINOR);
 | |
| +		err = -ENOTSUPP;
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
|  	ls_dev->mc_io = priv->mc_io;
 | |
| @@ -1781,77 +2456,41 @@ static int setup_dpni(struct fsl_mc_devi
 | |
|  	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpni_reset() failed\n");
 | |
| -		goto err_reset;
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
|  	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
 | |
|  				  &priv->dpni_attrs);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
 | |
| -		goto err_get_attr;
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
| -	/* Configure buffer layouts */
 | |
| -	/* rx buffer */
 | |
| -	buf_layout.pass_parser_result = true;
 | |
| -	buf_layout.pass_frame_status = true;
 | |
| -	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
 | |
| -	buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
 | |
| -	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
 | |
| -			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
 | |
| -			     DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
 | |
| -			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
 | |
| -	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
 | |
| -				     DPNI_QUEUE_RX, &buf_layout);
 | |
| -	if (err) {
 | |
| -		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
 | |
| -		goto err_buf_layout;
 | |
| -	}
 | |
| +	err = set_buffer_layout(priv);
 | |
| +	if (err)
 | |
| +		goto close;
 | |
|  
 | |
| -	/* tx buffer */
 | |
| -	buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
 | |
| -			     DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
 | |
| -	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
 | |
| -				     DPNI_QUEUE_TX, &buf_layout);
 | |
| -	if (err) {
 | |
| -		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
 | |
| -		goto err_buf_layout;
 | |
| -	}
 | |
| +	set_enqueue_mode(priv);
 | |
|  
 | |
| -	/* tx-confirm buffer */
 | |
| -	buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
 | |
| -	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
 | |
| -				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
 | |
| -	if (err) {
 | |
| -		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
 | |
| -		goto err_buf_layout;
 | |
| -	}
 | |
| +	priv->cls_rule = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
 | |
| +				      dpaa2_eth_fs_count(priv), GFP_KERNEL);
 | |
| +	if (!priv->cls_rule)
 | |
| +		goto close;
 | |
|  
 | |
| -	/* Now that we've set our tx buffer layout, retrieve the minimum
 | |
| -	 * required tx data offset.
 | |
| -	 */
 | |
| -	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
 | |
| -				      &priv->tx_data_offset);
 | |
| +	/* Enable flow control */
 | |
| +	cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
 | |
| +	priv->tx_pause_frames = true;
 | |
| +	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
 | |
|  	if (err) {
 | |
| -		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
 | |
| -		goto err_data_offset;
 | |
| +		dev_err(dev, "dpni_set_link_cfg() failed\n");
 | |
| +		goto close;
 | |
|  	}
 | |
|  
 | |
| -	if ((priv->tx_data_offset % 64) != 0)
 | |
| -		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
 | |
| -			 priv->tx_data_offset);
 | |
| -
 | |
| -	/* Accommodate software annotation space (SWA) */
 | |
| -	priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
 | |
| -
 | |
|  	return 0;
 | |
|  
 | |
| -err_data_offset:
 | |
| -err_buf_layout:
 | |
| -err_get_attr:
 | |
| -err_reset:
 | |
| +close:
 | |
|  	dpni_close(priv->mc_io, 0, priv->mc_token);
 | |
| -err_open:
 | |
| +
 | |
|  	return err;
 | |
|  }
 | |
|  
 | |
| @@ -1865,6 +2504,7 @@ static void free_dpni(struct dpaa2_eth_p
 | |
|  			    err);
 | |
|  
 | |
|  	dpni_close(priv->mc_io, 0, priv->mc_token);
 | |
| +
 | |
|  }
 | |
|  
 | |
|  static int setup_rx_flow(struct dpaa2_eth_priv *priv,
 | |
| @@ -1873,11 +2513,10 @@ static int setup_rx_flow(struct dpaa2_et
 | |
|  	struct device *dev = priv->net_dev->dev.parent;
 | |
|  	struct dpni_queue queue;
 | |
|  	struct dpni_queue_id qid;
 | |
| -	struct dpni_taildrop td;
 | |
|  	int err;
 | |
|  
 | |
|  	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
 | |
| -			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
 | |
| +			     DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpni_get_queue(RX) failed\n");
 | |
|  		return err;
 | |
| @@ -1889,24 +2528,136 @@ static int setup_rx_flow(struct dpaa2_et
 | |
|  	queue.destination.type = DPNI_DEST_DPCON;
 | |
|  	queue.destination.priority = 1;
 | |
|  	queue.user_context = (u64)(uintptr_t)fq;
 | |
| +	queue.flc.stash_control = 1;
 | |
| +	queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
 | |
| +	/* 01 01 00 - data, annotation, flow context*/
 | |
| +	queue.flc.value |= 0x14;
 | |
| +
 | |
|  	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
 | |
| -			     DPNI_QUEUE_RX, 0, fq->flowid,
 | |
| -			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
 | |
| +			     DPNI_QUEUE_RX, fq->tc, fq->flowid,
 | |
| +			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
 | |
| +			     DPNI_QUEUE_OPT_FLC,
 | |
|  			     &queue);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpni_set_queue(RX) failed\n");
 | |
|  		return err;
 | |
|  	}
 | |
|  
 | |
| -	td.enable = 1;
 | |
| -	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
 | |
| -	err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
 | |
| -				DPNI_QUEUE_RX, 0, fq->flowid, &td);
 | |
| -	if (err) {
 | |
| -		dev_err(dev, "dpni_set_threshold() failed\n");
 | |
| -		return err;
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
 | |
| +			      struct dpni_taildrop *td)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	int i, err;
 | |
| +
 | |
| +	for (i = 0; i < priv->num_fqs; i++) {
 | |
| +		if (priv->fq[i].type != DPAA2_RX_FQ)
 | |
| +			continue;
 | |
| +
 | |
| +		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
 | |
| +					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
 | |
| +					priv->fq[i].tc, priv->fq[i].flowid,
 | |
| +					td);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
 | |
| +			return err;
 | |
| +		}
 | |
| +
 | |
| +		dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
 | |
| +			(td->enable ? "Enabled" : "Disabled"),
 | |
| +			priv->fq[i].flowid, priv->fq[i].tc);
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int set_group_taildrop(struct dpaa2_eth_priv *priv,
 | |
| +			      struct dpni_taildrop *td)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpni_taildrop disable_td, *tc_td;
 | |
| +	int i, err;
 | |
| +
 | |
| +	memset(&disable_td, 0, sizeof(struct dpni_taildrop));
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
 | |
| +			/* Do not set taildrop thresholds for PFC-enabled
 | |
| +			 * traffic classes. We will enable congestion
 | |
| +			 * notifications for them.
 | |
| +			 */
 | |
| +			tc_td = &disable_td;
 | |
| +		else
 | |
| +			tc_td = td;
 | |
| +
 | |
| +		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
 | |
| +					DPNI_CP_GROUP, DPNI_QUEUE_RX,
 | |
| +					i, 0, tc_td);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
 | |
| +			return err;
 | |
| +		}
 | |
| +
 | |
| +		dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
 | |
| +			(tc_td->enable ? "Enabled" : "Disabled"),
 | |
| +			i);
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/* Enable/disable Rx FQ taildrop
 | |
| + *
 | |
| + * Rx FQ taildrop is mutually exclusive with flow control and it only gets
 | |
| + * disabled when FC is active. Depending on FC status, we need to compute
 | |
| + * the maximum number of buffers in the pool differently, so use the
 | |
| + * opportunity to update max number of buffers as well.
 | |
| + */
 | |
| +int set_rx_taildrop(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
 | |
| +	struct dpni_taildrop td_queue, td_group;
 | |
| +	int err = 0;
 | |
| +
 | |
| +	switch (cfg) {
 | |
| +	case DPAA2_ETH_TD_NONE:
 | |
| +		memset(&td_queue, 0, sizeof(struct dpni_taildrop));
 | |
| +		memset(&td_group, 0, sizeof(struct dpni_taildrop));
 | |
| +		priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
 | |
| +					priv->num_channels;
 | |
| +		break;
 | |
| +	case DPAA2_ETH_TD_QUEUE:
 | |
| +		memset(&td_group, 0, sizeof(struct dpni_taildrop));
 | |
| +		td_queue.enable = 1;
 | |
| +		td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
 | |
| +		td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
 | |
| +				     dpaa2_eth_tc_count(priv);
 | |
| +		priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
 | |
| +		break;
 | |
| +	case DPAA2_ETH_TD_GROUP:
 | |
| +		memset(&td_queue, 0, sizeof(struct dpni_taildrop));
 | |
| +		td_group.enable = 1;
 | |
| +		td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
 | |
| +		td_group.threshold = NAPI_POLL_WEIGHT *
 | |
| +				     dpaa2_eth_queue_count(priv);
 | |
| +		priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
 | |
| +					dpaa2_eth_tc_count(priv);
 | |
| +		break;
 | |
| +	default:
 | |
| +		break;
 | |
|  	}
 | |
|  
 | |
| +	err = set_queue_taildrop(priv, &td_queue);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	err = set_group_taildrop(priv, &td_group);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
 | |
| +
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| @@ -1926,6 +2677,7 @@ static int setup_tx_flow(struct dpaa2_et
 | |
|  	}
 | |
|  
 | |
|  	fq->tx_qdbin = qid.qdbin;
 | |
| +	fq->tx_fqid = qid.fqid;
 | |
|  
 | |
|  	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
 | |
|  			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
 | |
| @@ -1953,23 +2705,88 @@ static int setup_tx_flow(struct dpaa2_et
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| -/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
 | |
| -static const struct dpaa2_eth_hash_fields hash_fields[] = {
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
 | |
| +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
 | |
| +			     struct dpaa2_eth_fq *fq)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpni_queue q = { { 0 } };
 | |
| +	struct dpni_queue_id qid;
 | |
| +	u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
 | |
| +	int err;
 | |
| +
 | |
| +	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
 | |
| +			     DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	fq->fqid = qid.fqid;
 | |
| +
 | |
| +	q.destination.id = fq->channel->dpcon_id;
 | |
| +	q.destination.type = DPNI_DEST_DPCON;
 | |
| +	q.destination.priority = 1;
 | |
| +	q.user_context = (u64)fq;
 | |
| +	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
 | |
| +			     DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +/* Supported header fields for Rx hash distribution key */
 | |
| +static const struct dpaa2_eth_dist_fields dist_fields[] = {
 | |
|  	{
 | |
| +		/* L2 header */
 | |
| +		.rxnfc_field = RXH_L2DA,
 | |
| +		.cls_prot = NET_PROT_ETH,
 | |
| +		.cls_field = NH_FLD_ETH_DA,
 | |
| +		.id = DPAA2_ETH_DIST_ETHDST,
 | |
| +		.size = 6,
 | |
| +	}, {
 | |
| +		.cls_prot = NET_PROT_ETH,
 | |
| +		.cls_field = NH_FLD_ETH_SA,
 | |
| +		.id = DPAA2_ETH_DIST_ETHSRC,
 | |
| +		.size = 6,
 | |
| +	}, {
 | |
| +		/* This is the last ethertype field parsed:
 | |
| +		 * depending on frame format, it can be the MAC ethertype
 | |
| +		 * or the VLAN etype.
 | |
| +		 */
 | |
| +		.cls_prot = NET_PROT_ETH,
 | |
| +		.cls_field = NH_FLD_ETH_TYPE,
 | |
| +		.id = DPAA2_ETH_DIST_ETHTYPE,
 | |
| +		.size = 2,
 | |
| +	}, {
 | |
| +		/* VLAN header */
 | |
| +		.rxnfc_field = RXH_VLAN,
 | |
| +		.cls_prot = NET_PROT_VLAN,
 | |
| +		.cls_field = NH_FLD_VLAN_TCI,
 | |
| +		.id = DPAA2_ETH_DIST_VLAN,
 | |
| +		.size = 2,
 | |
| +	}, {
 | |
|  		/* IP header */
 | |
|  		.rxnfc_field = RXH_IP_SRC,
 | |
|  		.cls_prot = NET_PROT_IP,
 | |
|  		.cls_field = NH_FLD_IP_SRC,
 | |
| +		.id = DPAA2_ETH_DIST_IPSRC,
 | |
|  		.size = 4,
 | |
|  	}, {
 | |
|  		.rxnfc_field = RXH_IP_DST,
 | |
|  		.cls_prot = NET_PROT_IP,
 | |
|  		.cls_field = NH_FLD_IP_DST,
 | |
| +		.id = DPAA2_ETH_DIST_IPDST,
 | |
|  		.size = 4,
 | |
|  	}, {
 | |
|  		.rxnfc_field = RXH_L3_PROTO,
 | |
|  		.cls_prot = NET_PROT_IP,
 | |
|  		.cls_field = NH_FLD_IP_PROTO,
 | |
| +		.id = DPAA2_ETH_DIST_IPPROTO,
 | |
|  		.size = 1,
 | |
|  	}, {
 | |
|  		/* Using UDP ports, this is functionally equivalent to raw
 | |
| @@ -1978,41 +2795,170 @@ static const struct dpaa2_eth_hash_field
 | |
|  		.rxnfc_field = RXH_L4_B_0_1,
 | |
|  		.cls_prot = NET_PROT_UDP,
 | |
|  		.cls_field = NH_FLD_UDP_PORT_SRC,
 | |
| +		.id = DPAA2_ETH_DIST_L4SRC,
 | |
|  		.size = 2,
 | |
|  	}, {
 | |
|  		.rxnfc_field = RXH_L4_B_2_3,
 | |
|  		.cls_prot = NET_PROT_UDP,
 | |
|  		.cls_field = NH_FLD_UDP_PORT_DST,
 | |
| +		.id = DPAA2_ETH_DIST_L4DST,
 | |
|  		.size = 2,
 | |
|  	},
 | |
|  };
 | |
|  
 | |
| -/* Set RX hash options
 | |
| +/* Configure the Rx hash key using the legacy API */
 | |
| +static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpni_rx_tc_dist_cfg dist_cfg;
 | |
| +	int i, err = 0;
 | |
| +
 | |
| +	memset(&dist_cfg, 0, sizeof(dist_cfg));
 | |
| +
 | |
| +	dist_cfg.key_cfg_iova = key;
 | |
| +	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
 | |
| +	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
 | |
| +
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
 | |
| +					  i, &dist_cfg);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "dpni_set_rx_tc_dist failed\n");
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Configure the Rx hash key using the new API */
 | |
| +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpni_rx_dist_cfg dist_cfg;
 | |
| +	int i, err = 0;
 | |
| +
 | |
| +	memset(&dist_cfg, 0, sizeof(dist_cfg));
 | |
| +
 | |
| +	dist_cfg.key_cfg_iova = key;
 | |
| +	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
 | |
| +	dist_cfg.enable = 1;
 | |
| +
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		dist_cfg.tc = i;
 | |
| +		err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
 | |
| +					    &dist_cfg);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "dpni_set_rx_hash_dist failed\n");
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Configure the Rx flow classification key */
 | |
| +static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpni_rx_dist_cfg dist_cfg;
 | |
| +	int i, err = 0;
 | |
| +
 | |
| +	memset(&dist_cfg, 0, sizeof(dist_cfg));
 | |
| +
 | |
| +	dist_cfg.key_cfg_iova = key;
 | |
| +	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
 | |
| +	dist_cfg.enable = 1;
 | |
| +
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		dist_cfg.tc = i;
 | |
| +		err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
 | |
| +					  &dist_cfg);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "dpni_set_rx_fs_dist failed\n");
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* Size of the Rx flow classification key */
 | |
| +int dpaa2_eth_cls_key_size(u64 fields)
 | |
| +{
 | |
| +	int i, size = 0;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
 | |
| +		if (!(fields & dist_fields[i].id))
 | |
| +			continue;
 | |
| +		size += dist_fields[i].size;
 | |
| +	}
 | |
| +
 | |
| +	return size;
 | |
| +}
 | |
| +
 | |
| +/* Offset of header field in Rx classification key */
 | |
| +int dpaa2_eth_cls_fld_off(int prot, int field)
 | |
| +{
 | |
| +	int i, off = 0;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
 | |
| +		if (dist_fields[i].cls_prot == prot &&
 | |
| +		    dist_fields[i].cls_field == field)
 | |
| +			return off;
 | |
| +		off += dist_fields[i].size;
 | |
| +	}
 | |
| +
 | |
| +	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/* Prune unused fields from the classification rule.
 | |
| + * Used when masking is not supported
 | |
| + */
 | |
| +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
 | |
| +{
 | |
| +	int off = 0, new_off = 0;
 | |
| +	int i, size;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
 | |
| +		size = dist_fields[i].size;
 | |
| +		if (dist_fields[i].id & fields) {
 | |
| +			memcpy(key_mem + new_off, key_mem + off, size);
 | |
| +			new_off += size;
 | |
| +		}
 | |
| +		off += size;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/* Set Rx distribution (hash or flow classification) key
 | |
|   * flags is a combination of RXH_ bits
 | |
|   */
 | |
| -static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 | |
| +static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
 | |
| +				  enum dpaa2_eth_rx_dist type, u64 flags)
 | |
|  {
 | |
|  	struct device *dev = net_dev->dev.parent;
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
|  	struct dpkg_profile_cfg cls_cfg;
 | |
| -	struct dpni_rx_tc_dist_cfg dist_cfg;
 | |
| +	u32 rx_hash_fields = 0;
 | |
| +	dma_addr_t key_iova;
 | |
|  	u8 *dma_mem;
 | |
|  	int i;
 | |
|  	int err = 0;
 | |
|  
 | |
| -	if (!dpaa2_eth_hash_enabled(priv)) {
 | |
| -		dev_dbg(dev, "Hashing support is not enabled\n");
 | |
| -		return 0;
 | |
| -	}
 | |
| -
 | |
|  	memset(&cls_cfg, 0, sizeof(cls_cfg));
 | |
|  
 | |
| -	for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
 | |
| +	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
 | |
|  		struct dpkg_extract *key =
 | |
|  			&cls_cfg.extracts[cls_cfg.num_extracts];
 | |
|  
 | |
| -		if (!(flags & hash_fields[i].rxnfc_field))
 | |
| +		/* For both Rx hashing and classification keys
 | |
| +		 * we set only the selected fields.
 | |
| +		 */
 | |
| +		if (!(flags & dist_fields[i].id))
 | |
|  			continue;
 | |
| +		if (type == DPAA2_ETH_RX_DIST_HASH)
 | |
| +			rx_hash_fields |= dist_fields[i].rxnfc_field;
 | |
|  
 | |
|  		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
 | |
|  			dev_err(dev, "error adding key extraction rule, too many rules?\n");
 | |
| @@ -2020,12 +2966,10 @@ static int dpaa2_eth_set_hash(struct net
 | |
|  		}
 | |
|  
 | |
|  		key->type = DPKG_EXTRACT_FROM_HDR;
 | |
| -		key->extract.from_hdr.prot = hash_fields[i].cls_prot;
 | |
| +		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
 | |
|  		key->extract.from_hdr.type = DPKG_FULL_FIELD;
 | |
| -		key->extract.from_hdr.field = hash_fields[i].cls_field;
 | |
| +		key->extract.from_hdr.field = dist_fields[i].cls_field;
 | |
|  		cls_cfg.num_extracts++;
 | |
| -
 | |
| -		priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
 | |
|  	}
 | |
|  
 | |
|  	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
 | |
| @@ -2035,36 +2979,96 @@ static int dpaa2_eth_set_hash(struct net
 | |
|  	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
 | |
|  	if (err) {
 | |
|  		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
 | |
| -		goto err_prep_key;
 | |
| +		goto free_key;
 | |
|  	}
 | |
|  
 | |
| -	memset(&dist_cfg, 0, sizeof(dist_cfg));
 | |
| -
 | |
|  	/* Prepare for setting the rx dist */
 | |
| -	dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
 | |
| -					       DPAA2_CLASSIFIER_DMA_SIZE,
 | |
| -					       DMA_TO_DEVICE);
 | |
| -	if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
 | |
| +	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
 | |
| +				  DMA_TO_DEVICE);
 | |
| +	if (dma_mapping_error(dev, key_iova)) {
 | |
|  		dev_err(dev, "DMA mapping failed\n");
 | |
|  		err = -ENOMEM;
 | |
| -		goto err_dma_map;
 | |
| +		goto free_key;
 | |
|  	}
 | |
|  
 | |
| -	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
 | |
| -	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
 | |
| +	if (type == DPAA2_ETH_RX_DIST_HASH) {
 | |
| +		if (dpaa2_eth_has_legacy_dist(priv))
 | |
| +			err = config_legacy_hash_key(priv, key_iova);
 | |
| +		else
 | |
| +			err = config_hash_key(priv, key_iova);
 | |
| +	} else {
 | |
| +		err = config_cls_key(priv, key_iova);
 | |
| +	}
 | |
|  
 | |
| -	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
 | |
| -	dma_unmap_single(dev, dist_cfg.key_cfg_iova,
 | |
| -			 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
 | |
| -	if (err)
 | |
| -		dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
 | |
| +	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
 | |
| +			 DMA_TO_DEVICE);
 | |
| +	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
 | |
| +		priv->rx_hash_fields = rx_hash_fields;
 | |
|  
 | |
| -err_dma_map:
 | |
| -err_prep_key:
 | |
| +free_key:
 | |
|  	kfree(dma_mem);
 | |
|  	return err;
 | |
|  }
 | |
|  
 | |
| +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	u64 key = 0;
 | |
| +	int i;
 | |
| +
 | |
| +	if (!dpaa2_eth_hash_enabled(priv))
 | |
| +		return -EOPNOTSUPP;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
 | |
| +		if (dist_fields[i].rxnfc_field & flags)
 | |
| +			key |= dist_fields[i].id;
 | |
| +
 | |
| +	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
 | |
| +}
 | |
| +
 | |
| +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
 | |
| +{
 | |
| +	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	int err;
 | |
| +
 | |
| +	/* Check if we actually support Rx flow classification */
 | |
| +	if (dpaa2_eth_has_legacy_dist(priv)) {
 | |
| +		dev_dbg(dev, "Rx cls not supported by current MC version\n");
 | |
| +		return -EOPNOTSUPP;
 | |
| +	}
 | |
| +
 | |
| +	if (!dpaa2_eth_fs_enabled(priv)) {
 | |
| +		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
 | |
| +		return -EOPNOTSUPP;
 | |
| +	}
 | |
| +
 | |
| +	if (!dpaa2_eth_hash_enabled(priv)) {
 | |
| +		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
 | |
| +		return -EOPNOTSUPP;
 | |
| +	}
 | |
| +
 | |
| +	/* If there is no support for masking in the classification table,
 | |
| +	 * we don't set a default key, as it will depend on the rules
 | |
| +	 * added by the user at runtime.
 | |
| +	 */
 | |
| +	if (!dpaa2_eth_fs_mask_enabled(priv))
 | |
| +		goto out;
 | |
| +
 | |
| +	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +out:
 | |
| +	priv->rx_cls_enabled = 1;
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
|  /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
 | |
|   * frame queues and channels
 | |
|   */
 | |
| @@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr
 | |
|  	pools_params.num_dpbp = 1;
 | |
|  	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
 | |
|  	pools_params.pools[0].backup_pool = 0;
 | |
| +	pools_params.pools[0].priority_mask = 0xff;
 | |
|  	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
 | |
|  	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
 | |
|  	if (err) {
 | |
| @@ -2087,17 +3092,28 @@ static int bind_dpni(struct dpaa2_eth_pr
 | |
|  		return err;
 | |
|  	}
 | |
|  
 | |
| -	/* have the interface implicitly distribute traffic based on supported
 | |
| -	 * header fields
 | |
| +	/* have the interface implicitly distribute traffic based on
 | |
| +	 * the default hash key
 | |
|  	 */
 | |
| -	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
 | |
| -	if (err)
 | |
| -		netdev_err(net_dev, "Failed to configure hashing\n");
 | |
| +	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
 | |
| +	if (err && err != -EOPNOTSUPP)
 | |
| +		dev_err(dev, "Failed to configure hashing\n");
 | |
| +
 | |
| +	/* Configure the flow classification key; it includes all
 | |
| +	 * supported header fields and cannot be modified at runtime
 | |
| +	 */
 | |
| +	err = dpaa2_eth_set_default_cls(priv);
 | |
| +	if (err && err != -EOPNOTSUPP)
 | |
| +		dev_err(dev, "Failed to configure Rx classification key\n");
 | |
|  
 | |
|  	/* Configure handling of error frames */
 | |
|  	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
 | |
|  	err_cfg.set_frame_annotation = 1;
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
 | |
| +	err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
 | |
| +#else
 | |
|  	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
 | |
| +#endif
 | |
|  	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
 | |
|  				       &err_cfg);
 | |
|  	if (err) {
 | |
| @@ -2114,6 +3130,11 @@ static int bind_dpni(struct dpaa2_eth_pr
 | |
|  		case DPAA2_TX_CONF_FQ:
 | |
|  			err = setup_tx_flow(priv, &priv->fq[i]);
 | |
|  			break;
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
 | |
| +		case DPAA2_RX_ERR_FQ:
 | |
| +			err = setup_rx_err_flow(priv, &priv->fq[i]);
 | |
| +			break;
 | |
| +#endif
 | |
|  		default:
 | |
|  			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
 | |
|  			return -EINVAL;
 | |
| @@ -2237,11 +3258,14 @@ static int netdev_init(struct net_device
 | |
|  {
 | |
|  	struct device *dev = net_dev->dev.parent;
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	u32 options = priv->dpni_attrs.options;
 | |
| +	u64 supported = 0, not_supported = 0;
 | |
|  	u8 bcast_addr[ETH_ALEN];
 | |
|  	u8 num_queues;
 | |
|  	int err;
 | |
|  
 | |
|  	net_dev->netdev_ops = &dpaa2_eth_ops;
 | |
| +	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
 | |
|  
 | |
|  	err = set_mac_addr(priv);
 | |
|  	if (err)
 | |
| @@ -2255,14 +3279,14 @@ static int netdev_init(struct net_device
 | |
|  		return err;
 | |
|  	}
 | |
|  
 | |
| -	/* Reserve enough space to align buffer as per hardware requirement;
 | |
| -	 * NOTE: priv->tx_data_offset MUST be initialized at this point.
 | |
| -	 */
 | |
| -	net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
 | |
| -
 | |
| -	/* Set MTU limits */
 | |
| -	net_dev->min_mtu = 68;
 | |
| +	/* Set MTU upper limit; lower limit is 68B (default value) */
 | |
|  	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
 | |
| +	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
 | |
| +					DPAA2_ETH_MFL);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_max_frame_length() failed\n");
 | |
| +		return err;
 | |
| +	}
 | |
|  
 | |
|  	/* Set actual number of queues in the net device */
 | |
|  	num_queues = dpaa2_eth_queue_count(priv);
 | |
| @@ -2277,12 +3301,23 @@ static int netdev_init(struct net_device
 | |
|  		return err;
 | |
|  	}
 | |
|  
 | |
| -	/* Our .ndo_init will be called herein */
 | |
| -	err = register_netdev(net_dev);
 | |
| -	if (err < 0) {
 | |
| -		dev_err(dev, "register_netdev() failed\n");
 | |
| -		return err;
 | |
| -	}
 | |
| +	/* Capabilities listing */
 | |
| +	supported |= IFF_LIVE_ADDR_CHANGE;
 | |
| +
 | |
| +	if (options & DPNI_OPT_NO_MAC_FILTER)
 | |
| +		not_supported |= IFF_UNICAST_FLT;
 | |
| +	else
 | |
| +		supported |= IFF_UNICAST_FLT;
 | |
| +
 | |
| +	net_dev->priv_flags |= supported;
 | |
| +	net_dev->priv_flags &= ~not_supported;
 | |
| +
 | |
| +	/* Features */
 | |
| +	net_dev->features = NETIF_F_RXCSUM |
 | |
| +			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 | |
| +			    NETIF_F_SG | NETIF_F_HIGHDMA |
 | |
| +			    NETIF_F_LLTX;
 | |
| +	net_dev->hw_features = net_dev->features;
 | |
|  
 | |
|  	return 0;
 | |
|  }
 | |
| @@ -2303,14 +3338,9 @@ static int poll_link_state(void *arg)
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| -static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
 | |
| -{
 | |
| -	return IRQ_WAKE_THREAD;
 | |
| -}
 | |
| -
 | |
|  static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
 | |
|  {
 | |
| -	u32 status = 0, clear = 0;
 | |
| +	u32 status = ~0;
 | |
|  	struct device *dev = (struct device *)arg;
 | |
|  	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
 | |
|  	struct net_device *net_dev = dev_get_drvdata(dev);
 | |
| @@ -2320,18 +3350,12 @@ static irqreturn_t dpni_irq0_handler_thr
 | |
|  				  DPNI_IRQ_INDEX, &status);
 | |
|  	if (unlikely(err)) {
 | |
|  		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
 | |
| -		clear = 0xffffffff;
 | |
| -		goto out;
 | |
| +		return IRQ_HANDLED;
 | |
|  	}
 | |
|  
 | |
| -	if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
 | |
| -		clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
 | |
| +	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
 | |
|  		link_state_update(netdev_priv(net_dev));
 | |
| -	}
 | |
|  
 | |
| -out:
 | |
| -	dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
 | |
| -			      DPNI_IRQ_INDEX, clear);
 | |
|  	return IRQ_HANDLED;
 | |
|  }
 | |
|  
 | |
| @@ -2348,8 +3372,7 @@ static int setup_irqs(struct fsl_mc_devi
 | |
|  
 | |
|  	irq = ls_dev->irqs[0];
 | |
|  	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
 | |
| -					dpni_irq0_handler,
 | |
| -					dpni_irq0_handler_thread,
 | |
| +					NULL, dpni_irq0_handler_thread,
 | |
|  					IRQF_NO_SUSPEND | IRQF_ONESHOT,
 | |
|  					dev_name(&ls_dev->dev), &ls_dev->dev);
 | |
|  	if (err < 0) {
 | |
| @@ -2405,6 +3428,393 @@ static void del_ch_napi(struct dpaa2_eth
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| +/* SysFS support */
 | |
| +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
 | |
| +					 struct device_attribute *attr,
 | |
| +					 char *buf)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
 | |
| +	/* No MC API for getting the shaping config. We're stateful. */
 | |
| +	struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
 | |
| +
 | |
| +	return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
 | |
| +}
 | |
| +
 | |
| +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
 | |
| +					  struct device_attribute *attr,
 | |
| +					  const char *buf,
 | |
| +					  size_t count)
 | |
| +{
 | |
| +	int err, items;
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
 | |
| +	struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
 | |
| +
 | |
| +	items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
 | |
| +	if (items != 2) {
 | |
| +		pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +	/* Size restriction as per MC API documentation */
 | |
| +	if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
 | |
| +		pr_err("max_burst_size must be <= %d\n",
 | |
| +		       DPAA2_ETH_MAX_BURST_SIZE);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
 | |
| +				  &ercfg, 0);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_tx_shaping() failed\n");
 | |
| +		return -EPERM;
 | |
| +	}
 | |
| +	/* If successful, save the current configuration for future inquiries */
 | |
| +	priv->shaping_cfg = scfg;
 | |
| +
 | |
| +	return count;
 | |
| +}
 | |
| +
 | |
| +static struct device_attribute dpaa2_eth_attrs[] = {
 | |
| +	__ATTR(tx_shaping,
 | |
| +	       0600,
 | |
| +	       dpaa2_eth_show_tx_shaping,
 | |
| +	       dpaa2_eth_write_tx_shaping),
 | |
| +};
 | |
| +
 | |
| +static void dpaa2_eth_sysfs_init(struct device *dev)
 | |
| +{
 | |
| +	int i, err;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
 | |
| +		err = device_create_file(dev, &dpaa2_eth_attrs[i]);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "ERROR creating sysfs file\n");
 | |
| +			goto undo;
 | |
| +		}
 | |
| +	}
 | |
| +	return;
 | |
| +
 | |
| +undo:
 | |
| +	while (i > 0)
 | |
| +		device_remove_file(dev, &dpaa2_eth_attrs[--i]);
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_eth_sysfs_remove(struct device *dev)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
 | |
| +		device_remove_file(dev, &dpaa2_eth_attrs[i]);
 | |
| +}
 | |
| +
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
 | |
| +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
 | |
| +				       struct ieee_pfc *pfc)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpni_congestion_notification_cfg notification_cfg;
 | |
| +	struct dpni_link_state state;
 | |
| +	int err, i;
 | |
| +
 | |
| +	priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
 | |
| +
 | |
| +	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| +	if (err) {
 | |
| +		netdev_err(net_dev, "ERROR %d getting link state", err);
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
 | |
| +		return 0;
 | |
| +
 | |
| +	priv->pfc.pfc_en = 0;
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		err = dpni_get_congestion_notification(priv->mc_io, 0,
 | |
| +						       priv->mc_token,
 | |
| +						       DPNI_QUEUE_RX,
 | |
| +						       i, ¬ification_cfg);
 | |
| +		if (err) {
 | |
| +			netdev_err(net_dev, "Error %d getting congestion notif",
 | |
| +				   err);
 | |
| +			return err;
 | |
| +		}
 | |
| +
 | |
| +		if (notification_cfg.threshold_entry)
 | |
| +			priv->pfc.pfc_en |= 1 << i;
 | |
| +	}
 | |
| +
 | |
| +	memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/* Configure ingress classification based on VLAN PCP */
 | |
| +static int set_vlan_qos(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	struct device *dev = priv->net_dev->dev.parent;
 | |
| +	struct dpkg_profile_cfg kg_cfg = {0};
 | |
| +	struct dpni_qos_tbl_cfg qos_cfg = {0};
 | |
| +	struct dpni_rule_cfg key_params;
 | |
| +	u8 *params_iova, *key, *mask = NULL;
 | |
| +	/* We only need the trailing 16 bits, without the TPID */
 | |
| +	u8 key_size = VLAN_HLEN / 2;
 | |
| +	int err = 0, i, j = 0;
 | |
| +
 | |
| +	if (priv->vlan_clsf_set)
 | |
| +		return 0;
 | |
| +
 | |
| +	params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
 | |
| +	if (!params_iova)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	kg_cfg.num_extracts = 1;
 | |
| +	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
 | |
| +	kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
 | |
| +	kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
 | |
| +	kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
 | |
| +
 | |
| +	err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
 | |
| +		goto out_free;
 | |
| +	}
 | |
| +
 | |
| +	/* Set QoS table */
 | |
| +	qos_cfg.default_tc = 0;
 | |
| +	qos_cfg.discard_on_miss = 0;
 | |
| +	qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
 | |
| +					      DPAA2_CLASSIFIER_DMA_SIZE,
 | |
| +					      DMA_TO_DEVICE);
 | |
| +	if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
 | |
| +		dev_err(dev, "%s: DMA mapping failed\n", __func__);
 | |
| +		err = -ENOMEM;
 | |
| +		goto out_free;
 | |
| +	}
 | |
| +	err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
 | |
| +	dma_unmap_single(dev, qos_cfg.key_cfg_iova,
 | |
| +			 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
 | |
| +
 | |
| +	if (err) {
 | |
| +		dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
 | |
| +		goto out_free;
 | |
| +	}
 | |
| +
 | |
| +	key_params.key_size = key_size;
 | |
| +
 | |
| +	if (dpaa2_eth_fs_mask_enabled(priv)) {
 | |
| +		mask = kzalloc(key_size, GFP_KERNEL);
 | |
| +		if (!mask)
 | |
| +			goto out_free;
 | |
| +
 | |
| +		*mask = cpu_to_be16(VLAN_PRIO_MASK);
 | |
| +
 | |
| +		key_params.mask_iova = dma_map_single(dev, mask, key_size,
 | |
| +						      DMA_TO_DEVICE);
 | |
| +		if (dma_mapping_error(dev, key_params.mask_iova)) {
 | |
| +			dev_err(dev, "DMA mapping failed %s\n", __func__);
 | |
| +			err = -ENOMEM;
 | |
| +			goto out_free_mask;
 | |
| +		}
 | |
| +	} else {
 | |
| +		key_params.mask_iova = 0;
 | |
| +	}
 | |
| +
 | |
| +	key = kzalloc(key_size, GFP_KERNEL);
 | |
| +	if (!key)
 | |
| +		goto out_cleanup_mask;
 | |
| +
 | |
| +	key_params.key_iova = dma_map_single(dev, key, key_size,
 | |
| +					     DMA_TO_DEVICE);
 | |
| +	if (dma_mapping_error(dev, key_params.key_iova)) {
 | |
| +		dev_err(dev, "%s: DMA mapping failed\n", __func__);
 | |
| +		err = -ENOMEM;
 | |
| +		goto out_free_key;
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		*key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
 | |
| +
 | |
| +		dma_sync_single_for_device(dev, key_params.key_iova,
 | |
| +					   key_size, DMA_TO_DEVICE);
 | |
| +
 | |
| +		err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
 | |
| +					 &key_params, i, j++);
 | |
| +		if (err) {
 | |
| +			dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
 | |
| +			goto out_remove;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	priv->vlan_clsf_set = true;
 | |
| +	dev_dbg(dev, "Vlan PCP QoS classification set\n");
 | |
| +	goto out_cleanup;
 | |
| +
 | |
| +out_remove:
 | |
| +	for (j = 0; j < i; j++) {
 | |
| +		*key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
 | |
| +
 | |
| +		dma_sync_single_for_device(dev, key_params.key_iova, key_size,
 | |
| +					   DMA_TO_DEVICE);
 | |
| +
 | |
| +		err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
 | |
| +					    &key_params);
 | |
| +		if (err)
 | |
| +			dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
 | |
| +	}
 | |
| +
 | |
| +out_cleanup:
 | |
| +	dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
 | |
| +out_free_key:
 | |
| +	kfree(key);
 | |
| +out_cleanup_mask:
 | |
| +	if (key_params.mask_iova)
 | |
| +		dma_unmap_single(dev, key_params.mask_iova, key_size,
 | |
| +				 DMA_TO_DEVICE);
 | |
| +out_free_mask:
 | |
| +	kfree(mask);
 | |
| +out_free:
 | |
| +	kfree(params_iova);
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
 | |
| +				       struct ieee_pfc *pfc)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpni_congestion_notification_cfg notification_cfg = {0};
 | |
| +	struct dpni_link_state state = {0};
 | |
| +	struct dpni_link_cfg cfg = {0};
 | |
| +	struct ieee_pfc old_pfc;
 | |
| +	int err = 0, i;
 | |
| +
 | |
| +	if (dpaa2_eth_tc_count(priv) == 1) {
 | |
| +		netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
 | |
| +		return 0;
 | |
| +	}
 | |
| +
 | |
| +	/* Zero out pfc_enabled prios greater than tc_count */
 | |
| +	pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
 | |
| +
 | |
| +	if (priv->pfc.pfc_en == pfc->pfc_en)
 | |
| +		/* Same enabled mask, nothing to be done */
 | |
| +		return 0;
 | |
| +
 | |
| +	err = set_vlan_qos(priv);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| +	if (err) {
 | |
| +		netdev_err(net_dev, "ERROR %d getting link state", err);
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	cfg.rate = state.rate;
 | |
| +	cfg.options = state.options;
 | |
| +	if (pfc->pfc_en)
 | |
| +		cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
 | |
| +	else
 | |
| +		cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
 | |
| +
 | |
| +	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
 | |
| +	if (err) {
 | |
| +		netdev_err(net_dev, "ERROR %d setting link cfg", err);
 | |
| +		return err;
 | |
| +	}
 | |
| +
 | |
| +	memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
 | |
| +	memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
 | |
| +
 | |
| +	err = set_rx_taildrop(priv);
 | |
| +	if (err)
 | |
| +		goto out_restore_config;
 | |
| +
 | |
| +	/* configure congestion notifications */
 | |
| +	notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
 | |
| +	notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
 | |
| +	notification_cfg.message_iova = 0ULL;
 | |
| +	notification_cfg.message_ctx = 0ULL;
 | |
| +
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		if (dpaa2_eth_is_pfc_enabled(priv, i)) {
 | |
| +			notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
 | |
| +			notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
 | |
| +		} else {
 | |
| +			notification_cfg.threshold_entry = 0;
 | |
| +			notification_cfg.threshold_exit = 0;
 | |
| +		}
 | |
| +
 | |
| +		err = dpni_set_congestion_notification(priv->mc_io, 0,
 | |
| +						       priv->mc_token,
 | |
| +						       DPNI_QUEUE_RX,
 | |
| +						       i, ¬ification_cfg);
 | |
| +		if (err) {
 | |
| +			netdev_err(net_dev, "Error %d setting congestion notif",
 | |
| +				   err);
 | |
| +			goto out_restore_config;
 | |
| +		}
 | |
| +
 | |
| +		netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
 | |
| +			   (notification_cfg.threshold_entry ?
 | |
| +			    "Enabled" : "Disabled"), i);
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +
 | |
| +out_restore_config:
 | |
| +	memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +
 | |
| +	return priv->dcbx_mode;
 | |
| +}
 | |
| +
 | |
| +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +
 | |
| +	priv->dcbx_mode = mode;
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +
 | |
| +	switch (capid) {
 | |
| +	case DCB_CAP_ATTR_PFC:
 | |
| +		*cap = true;
 | |
| +		break;
 | |
| +	case DCB_CAP_ATTR_PFC_TCS:
 | |
| +		/* bitmap where each bit represents a number of traffic
 | |
| +		 * classes the device can be configured to use for Priority
 | |
| +		 * Flow Control
 | |
| +		 */
 | |
| +		*cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
 | |
| +		break;
 | |
| +	case DCB_CAP_ATTR_DCBX:
 | |
| +		*cap = priv->dcbx_mode;
 | |
| +		break;
 | |
| +	default:
 | |
| +		*cap = false;
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
 | |
| +	.ieee_getpfc	= dpaa2_eth_dcbnl_ieee_getpfc,
 | |
| +	.ieee_setpfc	= dpaa2_eth_dcbnl_ieee_setpfc,
 | |
| +	.getdcbx	= dpaa2_eth_dcbnl_getdcbx,
 | |
| +	.setdcbx	= dpaa2_eth_dcbnl_setdcbx,
 | |
| +	.getcap		= dpaa2_eth_dcbnl_getcap,
 | |
| +};
 | |
| +#endif
 | |
| +
 | |
|  static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
 | |
|  {
 | |
|  	struct device *dev;
 | |
| @@ -2415,7 +3825,7 @@ static int dpaa2_eth_probe(struct fsl_mc
 | |
|  	dev = &dpni_dev->dev;
 | |
|  
 | |
|  	/* Net device */
 | |
| -	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
 | |
| +	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
 | |
|  	if (!net_dev) {
 | |
|  		dev_err(dev, "alloc_etherdev_mq() failed\n");
 | |
|  		return -ENOMEM;
 | |
| @@ -2433,7 +3843,10 @@ static int dpaa2_eth_probe(struct fsl_mc
 | |
|  	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
 | |
|  				     &priv->mc_io);
 | |
|  	if (err) {
 | |
| -		dev_err(dev, "MC portal allocation failed\n");
 | |
| +		if (err == -ENXIO)
 | |
| +			err = -EPROBE_DEFER;
 | |
| +		else
 | |
| +			dev_err(dev, "MC portal allocation failed\n");
 | |
|  		goto err_portal_alloc;
 | |
|  	}
 | |
|  
 | |
| @@ -2456,9 +3869,6 @@ static int dpaa2_eth_probe(struct fsl_mc
 | |
|  	if (err)
 | |
|  		goto err_bind;
 | |
|  
 | |
| -	/* Add a NAPI context for each channel */
 | |
| -	add_ch_napi(priv);
 | |
| -
 | |
|  	/* Percpu statistics */
 | |
|  	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
 | |
|  	if (!priv->percpu_stats) {
 | |
| @@ -2491,7 +3901,14 @@ static int dpaa2_eth_probe(struct fsl_mc
 | |
|  	if (err)
 | |
|  		goto err_alloc_rings;
 | |
|  
 | |
| -	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
 | |
| +	net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
 | |
| +	priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
 | |
| +#endif
 | |
| +
 | |
| +	/* Add a NAPI context for each channel */
 | |
| +	add_ch_napi(priv);
 | |
| +	enable_ch_napi(priv);
 | |
|  
 | |
|  	err = setup_irqs(dpni_dev);
 | |
|  	if (err) {
 | |
| @@ -2499,25 +3916,41 @@ static int dpaa2_eth_probe(struct fsl_mc
 | |
|  		priv->poll_thread = kthread_run(poll_link_state, priv,
 | |
|  						"%s_poll_link", net_dev->name);
 | |
|  		if (IS_ERR(priv->poll_thread)) {
 | |
| -			netdev_err(net_dev, "Error starting polling thread\n");
 | |
| +			dev_err(dev, "Error starting polling thread\n");
 | |
|  			goto err_poll_thread;
 | |
|  		}
 | |
|  		priv->do_link_poll = true;
 | |
|  	}
 | |
|  
 | |
| +	err = register_netdev(net_dev);
 | |
| +	if (err < 0) {
 | |
| +		dev_err(dev, "register_netdev() failed\n");
 | |
| +		goto err_netdev_reg;
 | |
| +	}
 | |
| +
 | |
| +	dpaa2_eth_sysfs_init(&net_dev->dev);
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
 | |
| +	dpaa2_dbg_add(priv);
 | |
| +#endif
 | |
| +
 | |
|  	dev_info(dev, "Probed interface %s\n", net_dev->name);
 | |
|  	return 0;
 | |
|  
 | |
| +err_netdev_reg:
 | |
| +	if (priv->do_link_poll)
 | |
| +		kthread_stop(priv->poll_thread);
 | |
| +	else
 | |
| +		fsl_mc_free_irqs(dpni_dev);
 | |
|  err_poll_thread:
 | |
|  	free_rings(priv);
 | |
|  err_alloc_rings:
 | |
|  err_csum:
 | |
| -	unregister_netdev(net_dev);
 | |
|  err_netdev_init:
 | |
|  	free_percpu(priv->percpu_extras);
 | |
|  err_alloc_percpu_extras:
 | |
|  	free_percpu(priv->percpu_stats);
 | |
|  err_alloc_percpu_stats:
 | |
| +	disable_ch_napi(priv);
 | |
|  	del_ch_napi(priv);
 | |
|  err_bind:
 | |
|  	free_dpbp(priv);
 | |
| @@ -2544,8 +3977,15 @@ static int dpaa2_eth_remove(struct fsl_m
 | |
|  	net_dev = dev_get_drvdata(dev);
 | |
|  	priv = netdev_priv(net_dev);
 | |
|  
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
 | |
| +	dpaa2_dbg_remove(priv);
 | |
| +#endif
 | |
| +	dpaa2_eth_sysfs_remove(&net_dev->dev);
 | |
| +
 | |
|  	unregister_netdev(net_dev);
 | |
| -	dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
 | |
| +
 | |
| +	disable_ch_napi(priv);
 | |
| +	del_ch_napi(priv);
 | |
|  
 | |
|  	if (priv->do_link_poll)
 | |
|  		kthread_stop(priv->poll_thread);
 | |
| @@ -2555,17 +3995,16 @@ static int dpaa2_eth_remove(struct fsl_m
 | |
|  	free_rings(priv);
 | |
|  	free_percpu(priv->percpu_stats);
 | |
|  	free_percpu(priv->percpu_extras);
 | |
| -
 | |
| -	del_ch_napi(priv);
 | |
|  	free_dpbp(priv);
 | |
|  	free_dpio(priv);
 | |
|  	free_dpni(priv);
 | |
|  
 | |
|  	fsl_mc_portal_free(priv->mc_io);
 | |
|  
 | |
| -	dev_set_drvdata(dev, NULL);
 | |
|  	free_netdev(net_dev);
 | |
|  
 | |
| +	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
 | |
| +
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| @@ -2588,4 +4027,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
 | |
|  	.match_id_table = dpaa2_eth_match_id_table
 | |
|  };
 | |
|  
 | |
| -module_fsl_mc_driver(dpaa2_eth_driver);
 | |
| +static int __init dpaa2_eth_driver_init(void)
 | |
| +{
 | |
| +	int err;
 | |
| +
 | |
| +	dpaa2_eth_dbg_init();
 | |
| +	err = fsl_mc_driver_register(&dpaa2_eth_driver);
 | |
| +	if (err)
 | |
| +		goto out_debugfs_err;
 | |
| +
 | |
| +	err = dpaa2_ceetm_register();
 | |
| +	if (err)
 | |
| +		goto out_ceetm_err;
 | |
| +
 | |
| +	return 0;
 | |
| +
 | |
| +out_ceetm_err:
 | |
| +	fsl_mc_driver_unregister(&dpaa2_eth_driver);
 | |
| +out_debugfs_err:
 | |
| +	dpaa2_eth_dbg_exit();
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static void __exit dpaa2_eth_driver_exit(void)
 | |
| +{
 | |
| +	dpaa2_ceetm_unregister();
 | |
| +	fsl_mc_driver_unregister(&dpaa2_eth_driver);
 | |
| +	dpaa2_eth_dbg_exit();
 | |
| +}
 | |
| +
 | |
| +module_init(dpaa2_eth_driver_init);
 | |
| +module_exit(dpaa2_eth_driver_exit);
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
 | |
| @@ -1,40 +1,15 @@
 | |
| +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 | |
|  /* Copyright 2014-2016 Freescale Semiconductor Inc.
 | |
|   * Copyright 2016 NXP
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - *     * Redistributions of source code must retain the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer.
 | |
| - *     * Redistributions in binary form must reproduce the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer in the
 | |
| - *	 documentation and/or other materials provided with the distribution.
 | |
| - *     * Neither the name of Freescale Semiconductor nor the
 | |
| - *	 names of its contributors may be used to endorse or promote products
 | |
| - *	 derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 | |
| - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 | |
| - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | |
| - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | |
| - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | |
| - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  
 | |
|  #ifndef __DPAA2_ETH_H
 | |
|  #define __DPAA2_ETH_H
 | |
|  
 | |
| +#include <linux/dcbnl.h>
 | |
|  #include <linux/netdevice.h>
 | |
|  #include <linux/if_vlan.h>
 | |
| +#include <linux/filter.h>
 | |
|  
 | |
|  #include "../../fsl-mc/include/dpaa2-io.h"
 | |
|  #include "../../fsl-mc/include/dpaa2-fd.h"
 | |
| @@ -44,6 +19,9 @@
 | |
|  #include "dpni-cmd.h"
 | |
|  
 | |
|  #include "dpaa2-eth-trace.h"
 | |
| +#include "dpaa2-eth-debugfs.h"
 | |
| +
 | |
| +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
 | |
|  
 | |
|  #define DPAA2_ETH_STORE_SIZE		16
 | |
|  
 | |
| @@ -60,43 +38,59 @@
 | |
|  /* Convert L3 MTU to L2 MFL */
 | |
|  #define DPAA2_ETH_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN)
 | |
|  
 | |
| -/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
 | |
| - * frames in the Rx queues (length of the current frame is not
 | |
| - * taken into account when making the taildrop decision)
 | |
| - */
 | |
| -#define DPAA2_ETH_TAILDROP_THRESH	(64 * 1024)
 | |
| -
 | |
| -/* Buffer quota per queue. Must be large enough such that for minimum sized
 | |
| - * frames taildrop kicks in before the bpool gets depleted, so we compute
 | |
| - * how many 64B frames fit inside the taildrop threshold and add a margin
 | |
| - * to accommodate the buffer refill delay.
 | |
| - */
 | |
| -#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE	(DPAA2_ETH_TAILDROP_THRESH / 64)
 | |
| -#define DPAA2_ETH_NUM_BUFS		(DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
 | |
| -#define DPAA2_ETH_REFILL_THRESH		DPAA2_ETH_MAX_FRAMES_PER_QUEUE
 | |
| +/* Maximum burst size value for Tx shaping */
 | |
| +#define DPAA2_ETH_MAX_BURST_SIZE	0xF7FF
 | |
|  
 | |
|  /* Maximum number of buffers that can be acquired/released through a single
 | |
|   * QBMan command
 | |
|   */
 | |
|  #define DPAA2_ETH_BUFS_PER_CMD		7
 | |
|  
 | |
| -/* Hardware requires alignment for ingress/egress buffer addresses
 | |
| - * and ingress buffer lengths.
 | |
| +/* Set the taildrop threshold to 1MB to allow the enqueue of a sufficiently
 | |
| + * large number of jumbo frames in the Rx queues (length of the current frame
 | |
| + * is not taken into account when making the taildrop decision)
 | |
| + */
 | |
| +#define DPAA2_ETH_TAILDROP_THRESH	(1024 * 1024)
 | |
| +
 | |
| +/* Maximum number of Tx confirmation frames to be processed
 | |
| + * in a single NAPI call
 | |
| + */
 | |
| +#define DPAA2_ETH_TXCONF_PER_NAPI	256
 | |
| +
 | |
| +/* Buffer quota per channel.
 | |
| + * We want to keep in check number of ingress frames in flight: for small
 | |
| + * sized frames, buffer pool depletion will kick in first; for large sizes,
 | |
| + * Rx FQ taildrop threshold will ensure only a reasonable number of frames
 | |
| + * will be pending at any given time.
 | |
|   */
 | |
| -#define DPAA2_ETH_RX_BUF_SIZE		2048
 | |
| +#define DPAA2_ETH_NUM_BUFS_PER_CH	1024
 | |
| +#define DPAA2_ETH_REFILL_THRESH(priv)	\
 | |
| +	((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
 | |
| +
 | |
| +/* Global buffer quota in case flow control is enabled */
 | |
| +#define DPAA2_ETH_NUM_BUFS_FC		256
 | |
| +
 | |
| +/* Hardware requires alignment for ingress/egress buffer addresses */
 | |
|  #define DPAA2_ETH_TX_BUF_ALIGN		64
 | |
| -#define DPAA2_ETH_RX_BUF_ALIGN		256
 | |
| -#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
 | |
| -	((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
 | |
| -
 | |
| -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
 | |
| - * buffers large enough to allow building an skb around them and also account
 | |
| - * for alignment restrictions
 | |
| - */
 | |
| -#define DPAA2_ETH_BUF_RAW_SIZE \
 | |
| -	(DPAA2_ETH_RX_BUF_SIZE + \
 | |
| -	SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
 | |
| -	DPAA2_ETH_RX_BUF_ALIGN)
 | |
| +
 | |
| +#define DPAA2_ETH_RX_BUF_RAW_SIZE	PAGE_SIZE
 | |
| +#define DPAA2_ETH_RX_BUF_TAILROOM \
 | |
| +	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 | |
| +#define DPAA2_ETH_RX_BUF_SIZE \
 | |
| +	(DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
 | |
| +
 | |
| +/* Hardware annotation area in RX/TX buffers */
 | |
| +#define DPAA2_ETH_RX_HWA_SIZE		64
 | |
| +#define DPAA2_ETH_TX_HWA_SIZE		128
 | |
| +
 | |
| +/* PTP nominal frequency 1GHz */
 | |
| +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
 | |
| +
 | |
| +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
 | |
| + * to 256B. For newer revisions, the requirement is only for 64B alignment
 | |
| + */
 | |
| +#define DPAA2_ETH_RX_BUF_ALIGN_REV1	256
 | |
| +#define DPAA2_ETH_RX_BUF_ALIGN		64
 | |
|  
 | |
|  /* We are accommodating a skb backpointer and some S/G info
 | |
|   * in the frame's software annotation. The hardware
 | |
| @@ -104,12 +98,32 @@
 | |
|   */
 | |
|  #define DPAA2_ETH_SWA_SIZE		64
 | |
|  
 | |
| +/* We store different information in the software annotation area of a Tx frame
 | |
| + * based on what type of frame it is
 | |
| + */
 | |
| +enum dpaa2_eth_swa_type {
 | |
| +	DPAA2_ETH_SWA_SINGLE,
 | |
| +	DPAA2_ETH_SWA_SG,
 | |
| +	DPAA2_ETH_SWA_XDP,
 | |
| +};
 | |
| +
 | |
|  /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
 | |
|  struct dpaa2_eth_swa {
 | |
| -	struct sk_buff *skb;
 | |
| -	struct scatterlist *scl;
 | |
| -	int num_sg;
 | |
| -	int num_dma_bufs;
 | |
| +	enum dpaa2_eth_swa_type type;
 | |
| +	union {
 | |
| +		struct {
 | |
| +			struct sk_buff *skb;
 | |
| +		} single;
 | |
| +		struct {
 | |
| +			struct sk_buff *skb;
 | |
| +			struct scatterlist *scl;
 | |
| +			int num_sg;
 | |
| +			int sgt_size;
 | |
| +		} sg;
 | |
| +		struct {
 | |
| +			int dma_size;
 | |
| +		} xdp;
 | |
| +	};
 | |
|  };
 | |
|  
 | |
|  /* Annotation valid bits in FD FRC */
 | |
| @@ -121,22 +135,14 @@ struct dpaa2_eth_swa {
 | |
|  #define DPAA2_FD_FRC_FAICFDV		0x0400
 | |
|  
 | |
|  /* Error bits in FD CTRL */
 | |
| -#define DPAA2_FD_CTRL_UFD		0x00000004
 | |
| -#define DPAA2_FD_CTRL_SBE		0x00000008
 | |
| -#define DPAA2_FD_CTRL_FSE		0x00000020
 | |
| -#define DPAA2_FD_CTRL_FAERR		0x00000040
 | |
| -
 | |
| -#define DPAA2_FD_RX_ERR_MASK		(DPAA2_FD_CTRL_SBE	| \
 | |
| -					 DPAA2_FD_CTRL_FAERR)
 | |
| -#define DPAA2_FD_TX_ERR_MASK		(DPAA2_FD_CTRL_UFD	| \
 | |
| -					 DPAA2_FD_CTRL_SBE	| \
 | |
| -					 DPAA2_FD_CTRL_FSE	| \
 | |
| -					 DPAA2_FD_CTRL_FAERR)
 | |
| +#define DPAA2_FD_RX_ERR_MASK		(FD_CTRL_SBE | FD_CTRL_FAERR)
 | |
| +#define DPAA2_FD_TX_ERR_MASK		(FD_CTRL_UFD	| \
 | |
| +					 FD_CTRL_SBE	| \
 | |
| +					 FD_CTRL_FSE	| \
 | |
| +					 FD_CTRL_FAERR)
 | |
|  
 | |
|  /* Annotation bits in FD CTRL */
 | |
| -#define DPAA2_FD_CTRL_ASAL		0x00020000	/* ASAL = 128 */
 | |
| -#define DPAA2_FD_CTRL_PTA		0x00800000
 | |
| -#define DPAA2_FD_CTRL_PTV1		0x00400000
 | |
| +#define DPAA2_FD_CTRL_ASAL		0x00020000	/* ASAL = 128B */
 | |
|  
 | |
|  /* Frame annotation status */
 | |
|  struct dpaa2_fas {
 | |
| @@ -144,7 +150,7 @@ struct dpaa2_fas {
 | |
|  	u8 ppid;
 | |
|  	__le16 ifpid;
 | |
|  	__le32 status;
 | |
| -} __packed;
 | |
| +};
 | |
|  
 | |
|  /* Frame annotation status word is located in the first 8 bytes
 | |
|   * of the buffer's hardware annoatation area
 | |
| @@ -152,11 +158,45 @@ struct dpaa2_fas {
 | |
|  #define DPAA2_FAS_OFFSET		0
 | |
|  #define DPAA2_FAS_SIZE			(sizeof(struct dpaa2_fas))
 | |
|  
 | |
| +/* Timestamp is located in the next 8 bytes of the buffer's
 | |
| + * hardware annotation area
 | |
| + */
 | |
| +#define DPAA2_TS_OFFSET			0x8
 | |
| +
 | |
| +/* Frame annotation egress action descriptor */
 | |
| +#define DPAA2_FAEAD_OFFSET		0x58
 | |
| +
 | |
| +struct dpaa2_faead {
 | |
| +	__le32 conf_fqid;
 | |
| +	__le32 ctrl;
 | |
| +};
 | |
| +
 | |
| +#define DPAA2_FAEAD_A2V			0x20000000
 | |
| +#define DPAA2_FAEAD_A4V			0x08000000
 | |
| +#define DPAA2_FAEAD_UPDV		0x00001000
 | |
| +#define DPAA2_FAEAD_EBDDV		0x00002000
 | |
| +#define DPAA2_FAEAD_UPD			0x00000010
 | |
| +
 | |
|  /* Accessors for the hardware annotation fields that we use */
 | |
| -#define dpaa2_get_hwa(buf_addr) \
 | |
| -	((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
 | |
| -#define dpaa2_get_fas(buf_addr) \
 | |
| -	(struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
 | |
| +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
 | |
| +{
 | |
| +	return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
 | |
| +}
 | |
| +
 | |
| +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
 | |
| +{
 | |
| +	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
 | |
| +}
 | |
| +
 | |
| +static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
 | |
| +{
 | |
| +	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
 | |
| +}
 | |
| +
 | |
| +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
 | |
| +{
 | |
| +	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
 | |
| +}
 | |
|  
 | |
|  /* Error and status bits in the frame annotation status word */
 | |
|  /* Debug frame, otherwise supposed to be discarded */
 | |
| @@ -203,11 +243,6 @@ struct dpaa2_fas {
 | |
|  					 DPAA2_FAS_BLE		| \
 | |
|  					 DPAA2_FAS_L3CE		| \
 | |
|  					 DPAA2_FAS_L4CE)
 | |
| -/* Tx errors */
 | |
| -#define DPAA2_FAS_TX_ERR_MASK		(DPAA2_FAS_KSE		| \
 | |
| -					 DPAA2_FAS_EOFHE	| \
 | |
| -					 DPAA2_FAS_MNLE		| \
 | |
| -					 DPAA2_FAS_TIDE)
 | |
|  
 | |
|  /* Time in milliseconds between link state updates */
 | |
|  #define DPAA2_ETH_LINK_STATE_REFRESH	1000
 | |
| @@ -226,6 +261,7 @@ struct dpaa2_eth_drv_stats {
 | |
|  	__u64	tx_conf_bytes;
 | |
|  	__u64	tx_sg_frames;
 | |
|  	__u64	tx_sg_bytes;
 | |
| +	__u64	tx_reallocs;
 | |
|  	__u64	rx_sg_frames;
 | |
|  	__u64	rx_sg_bytes;
 | |
|  	/* Enqueues retried due to portal busy */
 | |
| @@ -250,17 +286,23 @@ struct dpaa2_eth_ch_stats {
 | |
|  	__u64 pull_err;
 | |
|  };
 | |
|  
 | |
| +#define DPAA2_ETH_MAX_TCS		8
 | |
| +
 | |
|  /* Maximum number of queues associated with a DPNI */
 | |
| -#define DPAA2_ETH_MAX_RX_QUEUES		16
 | |
| -#define DPAA2_ETH_MAX_TX_QUEUES		NR_CPUS
 | |
| +#define DPAA2_ETH_MAX_RX_QUEUES		(DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
 | |
| +#define DPAA2_ETH_MAX_TX_QUEUES		DPNI_MAX_SENDERS
 | |
| +#define DPAA2_ETH_MAX_RX_ERR_QUEUES	1
 | |
|  #define DPAA2_ETH_MAX_QUEUES		(DPAA2_ETH_MAX_RX_QUEUES + \
 | |
| -					DPAA2_ETH_MAX_TX_QUEUES)
 | |
| +					DPAA2_ETH_MAX_TX_QUEUES + \
 | |
| +					DPAA2_ETH_MAX_RX_ERR_QUEUES)
 | |
| +#define DPAA2_ETH_MAX_NETDEV_QUEUES	(DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
 | |
|  
 | |
| -#define DPAA2_ETH_MAX_DPCONS		NR_CPUS
 | |
| +#define DPAA2_ETH_MAX_DPCONS		16
 | |
|  
 | |
|  enum dpaa2_eth_fq_type {
 | |
|  	DPAA2_RX_FQ = 0,
 | |
|  	DPAA2_TX_CONF_FQ,
 | |
| +	DPAA2_RX_ERR_FQ
 | |
|  };
 | |
|  
 | |
|  struct dpaa2_eth_priv;
 | |
| @@ -268,15 +310,19 @@ struct dpaa2_eth_priv;
 | |
|  struct dpaa2_eth_fq {
 | |
|  	u32 fqid;
 | |
|  	u32 tx_qdbin;
 | |
| +	u32 tx_fqid;
 | |
|  	u16 flowid;
 | |
| +	u8 tc;
 | |
|  	int target_cpu;
 | |
| +	u32 dq_frames;
 | |
| +	u32 dq_bytes;
 | |
|  	struct dpaa2_eth_channel *channel;
 | |
|  	enum dpaa2_eth_fq_type type;
 | |
|  
 | |
| -	void (*consume)(struct dpaa2_eth_priv *,
 | |
| -			struct dpaa2_eth_channel *,
 | |
| -			const struct dpaa2_fd *,
 | |
| -			struct napi_struct *);
 | |
| +	void (*consume)(struct dpaa2_eth_priv *priv,
 | |
| +			struct dpaa2_eth_channel *ch,
 | |
| +			const struct dpaa2_fd *fd,
 | |
| +			struct dpaa2_eth_fq *fq);
 | |
|  	struct dpaa2_eth_fq_stats stats;
 | |
|  };
 | |
|  
 | |
| @@ -285,19 +331,29 @@ struct dpaa2_eth_channel {
 | |
|  	struct fsl_mc_device *dpcon;
 | |
|  	int dpcon_id;
 | |
|  	int ch_id;
 | |
| -	int dpio_id;
 | |
|  	struct napi_struct napi;
 | |
| +	struct dpaa2_io *dpio;
 | |
|  	struct dpaa2_io_store *store;
 | |
|  	struct dpaa2_eth_priv *priv;
 | |
|  	int buf_count;
 | |
|  	struct dpaa2_eth_ch_stats stats;
 | |
| +	struct bpf_prog *xdp_prog;
 | |
| +	u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
 | |
| +	u8 rel_buf_cnt;
 | |
| +	bool flush;
 | |
|  };
 | |
|  
 | |
| -struct dpaa2_eth_hash_fields {
 | |
| +struct dpaa2_eth_dist_fields {
 | |
|  	u64 rxnfc_field;
 | |
|  	enum net_prot cls_prot;
 | |
|  	int cls_field;
 | |
|  	int size;
 | |
| +	u64 id;
 | |
| +};
 | |
| +
 | |
| +struct dpaa2_eth_cls_rule {
 | |
| +	struct ethtool_rx_flow_spec fs;
 | |
| +	u8 in_use;
 | |
|  };
 | |
|  
 | |
|  /* Driver private data */
 | |
| @@ -306,17 +362,29 @@ struct dpaa2_eth_priv {
 | |
|  
 | |
|  	u8 num_fqs;
 | |
|  	struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
 | |
| +	int (*enqueue)(struct dpaa2_eth_priv *priv,
 | |
| +		       struct dpaa2_eth_fq *fq,
 | |
| +		       struct dpaa2_fd *fd, u8 prio);
 | |
|  
 | |
|  	u8 num_channels;
 | |
|  	struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
 | |
| +	int max_bufs_per_ch;
 | |
| +	int refill_thresh;
 | |
| +
 | |
| +	bool has_xdp_prog;
 | |
|  
 | |
|  	struct dpni_attr dpni_attrs;
 | |
| +	u16 dpni_ver_major;
 | |
| +	u16 dpni_ver_minor;
 | |
|  	u16 tx_data_offset;
 | |
|  
 | |
|  	struct fsl_mc_device *dpbp_dev;
 | |
|  	u16 bpid;
 | |
|  	struct iommu_domain *iommu_domain;
 | |
|  
 | |
| +	bool ts_tx_en; /* Tx timestamping enabled */
 | |
| +	bool ts_rx_en; /* Rx timestamping enabled */
 | |
| +
 | |
|  	u16 tx_qdid;
 | |
|  	struct fsl_mc_io *mc_io;
 | |
|  	/* Cores which have an affine DPIO/DPCON.
 | |
| @@ -337,13 +405,30 @@ struct dpaa2_eth_priv {
 | |
|  
 | |
|  	/* enabled ethtool hashing bits */
 | |
|  	u64 rx_hash_fields;
 | |
| +	u64 rx_cls_fields;
 | |
| +	struct dpaa2_eth_cls_rule *cls_rule;
 | |
| +	u8 rx_cls_enabled;
 | |
| +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
 | |
| +	struct dpaa2_debugfs dbg;
 | |
| +#endif
 | |
| +	struct dpni_tx_shaping_cfg shaping_cfg;
 | |
| +
 | |
| +	u8 dcbx_mode;
 | |
| +	struct ieee_pfc pfc;
 | |
| +	bool vlan_clsf_set;
 | |
| +	bool tx_pause_frames;
 | |
| +
 | |
| +	bool ceetm_en;
 | |
|  };
 | |
|  
 | |
| -/* default Rx hash options, set during probing */
 | |
|  #define DPAA2_RXH_SUPPORTED	(RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
 | |
|  				| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
 | |
|  				| RXH_L4_B_2_3)
 | |
|  
 | |
| +/* default Rx hash options, set during probing */
 | |
| +#define DPAA2_RXH_DEFAULT	(RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
 | |
| +				 RXH_L4_B_0_1 | RXH_L4_B_2_3)
 | |
| +
 | |
|  #define dpaa2_eth_hash_enabled(priv)	\
 | |
|  	((priv)->dpni_attrs.num_queues > 1)
 | |
|  
 | |
| @@ -352,10 +437,127 @@ struct dpaa2_eth_priv {
 | |
|  
 | |
|  extern const struct ethtool_ops dpaa2_ethtool_ops;
 | |
|  extern const char dpaa2_eth_drv_version[];
 | |
| +extern int dpaa2_phc_index;
 | |
| +
 | |
| +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
 | |
| +					 u16 ver_major, u16 ver_minor)
 | |
| +{
 | |
| +	if (priv->dpni_ver_major == ver_major)
 | |
| +		return priv->dpni_ver_minor - ver_minor;
 | |
| +	return priv->dpni_ver_major - ver_major;
 | |
| +}
 | |
| +
 | |
| +/* Minimum firmware version that supports a more flexible API
 | |
| + * for configuring the Rx flow hash key
 | |
| + */
 | |
| +#define DPNI_RX_DIST_KEY_VER_MAJOR	7
 | |
| +#define DPNI_RX_DIST_KEY_VER_MINOR	5
 | |
| +
 | |
| +#define dpaa2_eth_has_legacy_dist(priv)					\
 | |
| +	(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR,	\
 | |
| +				DPNI_RX_DIST_KEY_VER_MINOR) < 0)
 | |
| +
 | |
| +#define dpaa2_eth_fs_enabled(priv)	\
 | |
| +	(!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
 | |
| +
 | |
| +#define dpaa2_eth_fs_mask_enabled(priv)	\
 | |
| +	((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
 | |
| +
 | |
| +#define dpaa2_eth_fs_count(priv)	\
 | |
| +	((priv)->dpni_attrs.fs_entries)
 | |
| +
 | |
| +#define dpaa2_eth_queue_count(priv)	\
 | |
| +	((priv)->num_channels)
 | |
| +
 | |
| +#define dpaa2_eth_tc_count(priv)	\
 | |
| +	((priv)->dpni_attrs.num_tcs)
 | |
| +
 | |
| +enum dpaa2_eth_rx_dist {
 | |
| +	DPAA2_ETH_RX_DIST_HASH,
 | |
| +	DPAA2_ETH_RX_DIST_CLS
 | |
| +};
 | |
| +
 | |
| +/* Unique IDs for the supported Rx classification header fields */
 | |
| +#define DPAA2_ETH_DIST_ETHDST		BIT(0)
 | |
| +#define DPAA2_ETH_DIST_ETHSRC		BIT(1)
 | |
| +#define DPAA2_ETH_DIST_ETHTYPE		BIT(2)
 | |
| +#define DPAA2_ETH_DIST_VLAN		BIT(3)
 | |
| +#define DPAA2_ETH_DIST_IPSRC		BIT(4)
 | |
| +#define DPAA2_ETH_DIST_IPDST		BIT(5)
 | |
| +#define DPAA2_ETH_DIST_IPPROTO		BIT(6)
 | |
| +#define DPAA2_ETH_DIST_L4SRC		BIT(7)
 | |
| +#define DPAA2_ETH_DIST_L4DST		BIT(8)
 | |
| +#define DPAA2_ETH_DIST_ALL		(~0U)
 | |
| +
 | |
| +static inline
 | |
| +unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
 | |
| +				       struct sk_buff *skb)
 | |
| +{
 | |
| +	unsigned int headroom = DPAA2_ETH_SWA_SIZE;
 | |
| +
 | |
| +	/* If we don't have an skb (e.g. XDP buffer), we only need space for
 | |
| +	 * the software annotation area
 | |
| +	 */
 | |
| +	if (!skb)
 | |
| +		return headroom;
 | |
|  
 | |
| -static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
 | |
| +	/* For non-linear skbs we have no headroom requirement, as we build a
 | |
| +	 * SG frame with a newly allocated SGT buffer
 | |
| +	 */
 | |
| +	if (skb_is_nonlinear(skb))
 | |
| +		return 0;
 | |
| +
 | |
| +	/* If we have Tx timestamping, need 128B hardware annotation */
 | |
| +	if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
 | |
| +		headroom += DPAA2_ETH_TX_HWA_SIZE;
 | |
| +
 | |
| +	return headroom;
 | |
| +}
 | |
| +
 | |
| +/* Extra headroom space requested to hardware, in order to make sure there's
 | |
| + * no realloc'ing in forwarding scenarios
 | |
| + */
 | |
| +static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
 | |
| +}
 | |
| +
 | |
| +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
 | |
| +					    int traffic_class)
 | |
| +{
 | |
| +	return priv->pfc.pfc_en & (1 << traffic_class);
 | |
| +}
 | |
| +
 | |
| +enum dpaa2_eth_td_cfg {
 | |
| +	DPAA2_ETH_TD_NONE,
 | |
| +	DPAA2_ETH_TD_QUEUE,
 | |
| +	DPAA2_ETH_TD_GROUP
 | |
| +};
 | |
| +
 | |
| +static inline enum dpaa2_eth_td_cfg
 | |
| +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	bool pfc_enabled = !!(priv->pfc.pfc_en);
 | |
| +
 | |
| +	if (pfc_enabled)
 | |
| +		return DPAA2_ETH_TD_GROUP;
 | |
| +	else if (priv->tx_pause_frames)
 | |
| +		return DPAA2_ETH_TD_NONE;
 | |
| +	else
 | |
| +		return DPAA2_ETH_TD_QUEUE;
 | |
| +}
 | |
| +
 | |
| +static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
 | |
|  {
 | |
| -	return priv->dpni_attrs.num_queues;
 | |
| +	return 1;
 | |
|  }
 | |
|  
 | |
| +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
 | |
| +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
 | |
| +int dpaa2_eth_cls_key_size(u64 key);
 | |
| +int dpaa2_eth_cls_fld_off(int prot, int field);
 | |
| +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
 | |
| +
 | |
| +int set_rx_taildrop(struct dpaa2_eth_priv *priv);
 | |
| +
 | |
|  #endif	/* __DPAA2_H */
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
 | |
| @@ -1,35 +1,10 @@
 | |
| +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 | |
|  /* Copyright 2014-2016 Freescale Semiconductor Inc.
 | |
| - * Copyright 2016 NXP
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - *     * Redistributions of source code must retain the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer.
 | |
| - *     * Redistributions in binary form must reproduce the above copyright
 | |
| - *	 notice, this list of conditions and the following disclaimer in the
 | |
| - *	 documentation and/or other materials provided with the distribution.
 | |
| - *     * Neither the name of Freescale Semiconductor nor the
 | |
| - *	 names of its contributors may be used to endorse or promote products
 | |
| - *	 derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 | |
| - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 | |
| - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | |
| - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | |
| - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | |
| - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + * Copyright 2016-2017 NXP
 | |
|   */
 | |
|  
 | |
| +#include <linux/net_tstamp.h>
 | |
| +
 | |
|  #include "dpni.h"	/* DPNI_LINK_OPT_* */
 | |
|  #include "dpaa2-eth.h"
 | |
|  
 | |
| @@ -52,6 +27,10 @@ static char dpaa2_ethtool_stats[][ETH_GS
 | |
|  	"[hw] rx nobuffer discards",
 | |
|  	"[hw] tx discarded frames",
 | |
|  	"[hw] tx confirmed frames",
 | |
| +	"[hw] tx dequeued bytes",
 | |
| +	"[hw] tx dequeued frames",
 | |
| +	"[hw] tx rejected bytes",
 | |
| +	"[hw] tx rejected frames",
 | |
|  };
 | |
|  
 | |
|  #define DPAA2_ETH_NUM_STATS	ARRAY_SIZE(dpaa2_ethtool_stats)
 | |
| @@ -62,6 +41,7 @@ static char dpaa2_ethtool_extras[][ETH_G
 | |
|  	"[drv] tx conf bytes",
 | |
|  	"[drv] tx sg frames",
 | |
|  	"[drv] tx sg bytes",
 | |
| +	"[drv] tx realloc frames",
 | |
|  	"[drv] rx sg frames",
 | |
|  	"[drv] rx sg bytes",
 | |
|  	"[drv] enqueue portal busy",
 | |
| @@ -69,6 +49,12 @@ static char dpaa2_ethtool_extras[][ETH_G
 | |
|  	"[drv] dequeue portal busy",
 | |
|  	"[drv] channel pull errors",
 | |
|  	"[drv] cdan",
 | |
| +	/* FQ stats */
 | |
| +	"rx pending frames",
 | |
| +	"rx pending bytes",
 | |
| +	"tx conf pending frames",
 | |
| +	"tx conf pending bytes",
 | |
| +	"buffer count"
 | |
|  };
 | |
|  
 | |
|  #define DPAA2_ETH_NUM_EXTRA_STATS	ARRAY_SIZE(dpaa2_ethtool_extras)
 | |
| @@ -76,14 +62,55 @@ static char dpaa2_ethtool_extras[][ETH_G
 | |
|  static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
 | |
|  				  struct ethtool_drvinfo *drvinfo)
 | |
|  {
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +
 | |
|  	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
 | |
| -	strlcpy(drvinfo->version, dpaa2_eth_drv_version,
 | |
| -		sizeof(drvinfo->version));
 | |
| -	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 | |
| +
 | |
| +	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
 | |
| +		 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
 | |
| +
 | |
|  	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
 | |
|  		sizeof(drvinfo->bus_info));
 | |
|  }
 | |
|  
 | |
| +#define DPNI_LINK_AUTONEG_VER_MAJOR		7
 | |
| +#define DPNI_LINK_AUTONEG_VER_MINOR		8
 | |
| +
 | |
| +struct dpaa2_eth_link_mode_map {
 | |
| +	u64 dpni_lm;
 | |
| +	u64 ethtool_lm;
 | |
| +};
 | |
| +
 | |
| +static const struct dpaa2_eth_link_mode_map dpaa2_eth_lm_map[] = {
 | |
| +	{DPNI_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
 | |
| +	{DPNI_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
 | |
| +	{DPNI_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
 | |
| +	{DPNI_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
 | |
| +	{DPNI_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
 | |
| +	{DPNI_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
 | |
| +};
 | |
| +
 | |
| +static void link_mode_dpni2ethtool(u64 dpni_lm, unsigned long *ethtool_lm)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
 | |
| +		if (dpni_lm & dpaa2_eth_lm_map[i].dpni_lm)
 | |
| +			__set_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static void link_mode_ethtool2dpni(const unsigned long *ethtool_lm,
 | |
| +				   u64 *dpni_lm)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
 | |
| +		if (test_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm))
 | |
| +			*dpni_lm |= dpaa2_eth_lm_map[i].dpni_lm;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
|  static int
 | |
|  dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
 | |
|  			     struct ethtool_link_ksettings *link_settings)
 | |
| @@ -92,17 +119,27 @@ dpaa2_eth_get_link_ksettings(struct net_
 | |
|  	int err = 0;
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
|  
 | |
| -	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| -	if (err) {
 | |
| -		netdev_err(net_dev, "ERROR %d getting link state\n", err);
 | |
| -		goto out;
 | |
| +	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
 | |
| +				   DPNI_LINK_AUTONEG_VER_MINOR) < 0) {
 | |
| +		err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token,
 | |
| +					  &state);
 | |
| +		if (err) {
 | |
| +			netdev_err(net_dev, "dpni_get_link_state failed\n");
 | |
| +			goto out;
 | |
| +		}
 | |
| +	} else {
 | |
| +		err = dpni_get_link_state_v2(priv->mc_io, 0, priv->mc_token,
 | |
| +					     &state);
 | |
| +		if (err) {
 | |
| +			netdev_err(net_dev, "dpni_get_link_state_v2 failed\n");
 | |
| +			goto out;
 | |
| +		}
 | |
| +		link_mode_dpni2ethtool(state.supported,
 | |
| +				       link_settings->link_modes.supported);
 | |
| +		link_mode_dpni2ethtool(state.advertising,
 | |
| +				       link_settings->link_modes.advertising);
 | |
|  	}
 | |
|  
 | |
| -	/* At the moment, we have no way of interrogating the DPMAC
 | |
| -	 * from the DPNI side - and for that matter there may exist
 | |
| -	 * no DPMAC at all. So for now we just don't report anything
 | |
| -	 * beyond the DPNI attributes.
 | |
| -	 */
 | |
|  	if (state.options & DPNI_LINK_OPT_AUTONEG)
 | |
|  		link_settings->base.autoneg = AUTONEG_ENABLE;
 | |
|  	if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
 | |
| @@ -113,25 +150,37 @@ out:
 | |
|  	return err;
 | |
|  }
 | |
|  
 | |
| +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR		7
 | |
| +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR		1
 | |
|  static int
 | |
|  dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
 | |
|  			     const struct ethtool_link_ksettings *link_settings)
 | |
|  {
 | |
| -	struct dpni_link_cfg cfg = {0};
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpni_link_state state = {0};
 | |
| +	struct dpni_link_cfg cfg = {0};
 | |
|  	int err = 0;
 | |
|  
 | |
| -	netdev_dbg(net_dev, "Setting link parameters...");
 | |
| +	/* If using an older MC version, the DPNI must be down
 | |
| + 	 * in order to be able to change link settings. Taking steps to let
 | |
| + 	 * the user know that.
 | |
| + 	 */
 | |
| +	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
 | |
| +				   DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
 | |
| +		if (netif_running(net_dev)) {
 | |
| +			netdev_info(net_dev, "Interface must be brought down first.\n");
 | |
| +			return -EACCES;
 | |
| +		}
 | |
| +	}
 | |
|  
 | |
| -	/* Due to a temporary MC limitation, the DPNI must be down
 | |
| -	 * in order to be able to change link settings. Taking steps to let
 | |
| -	 * the user know that.
 | |
| -	 */
 | |
| -	if (netif_running(net_dev)) {
 | |
| -		netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
 | |
| -		return -EACCES;
 | |
| +	/* Need to interrogate link state to get flow control params */
 | |
| +	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| +	if (err) {
 | |
| +		netdev_err(net_dev, "Error getting link state\n");
 | |
| +		goto out;
 | |
|  	}
 | |
|  
 | |
| +	cfg.options = state.options;
 | |
|  	cfg.rate = link_settings->base.speed;
 | |
|  	if (link_settings->base.autoneg == AUTONEG_ENABLE)
 | |
|  		cfg.options |= DPNI_LINK_OPT_AUTONEG;
 | |
| @@ -142,13 +191,92 @@ dpaa2_eth_set_link_ksettings(struct net_
 | |
|  	else
 | |
|  		cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
 | |
|  
 | |
| +	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
 | |
| +				   DPNI_LINK_AUTONEG_VER_MINOR)) {
 | |
| +		err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
 | |
| +	} else {
 | |
| +		link_mode_ethtool2dpni(link_settings->link_modes.advertising,
 | |
| +				       &cfg.advertising);
 | |
| +		dpni_set_link_cfg_v2(priv->mc_io, 0, priv->mc_token, &cfg);
 | |
| +	}
 | |
| +	if (err)
 | |
| +		netdev_err(net_dev, "dpni_set_link_cfg failed");
 | |
| +
 | |
| +out:
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
 | |
| +				     struct ethtool_pauseparam *pause)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpni_link_state state = {0};
 | |
| +	int err;
 | |
| +
 | |
| +	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| +	if (err)
 | |
| +		netdev_dbg(net_dev, "Error getting link state\n");
 | |
| +
 | |
| +	/* Report general port autonegotiation status */
 | |
| +	pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
 | |
| +	pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
 | |
| +	pause->tx_pause = pause->rx_pause ^
 | |
| +			  !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
 | |
| +				    struct ethtool_pauseparam *pause)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpni_link_state state = {0};
 | |
| +	struct dpni_link_cfg cfg = {0};
 | |
| +	u32 current_tx_pause;
 | |
| +	int err = 0;
 | |
| +
 | |
| +	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 | |
| +	if (err) {
 | |
| +		netdev_dbg(net_dev, "Error getting link state\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	cfg.rate = state.rate;
 | |
| +	cfg.options = state.options;
 | |
| +	current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
 | |
| +			   !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
 | |
| +
 | |
| +	/* We don't support changing pause frame autonegotiation separately
 | |
| +	 * from general port autoneg
 | |
| +	 */
 | |
| +	if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
 | |
| +		netdev_warn(net_dev,
 | |
| +			    "Cannot change pause frame autoneg separately\n");
 | |
| +
 | |
| +	if (pause->rx_pause)
 | |
| +		cfg.options |= DPNI_LINK_OPT_PAUSE;
 | |
| +	else
 | |
| +		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 | |
| +
 | |
| +	if (pause->rx_pause ^ pause->tx_pause)
 | |
| +		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 | |
| +	else
 | |
| +		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 | |
| +
 | |
|  	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
 | |
| +	if (err) {
 | |
| +		netdev_dbg(net_dev, "Error setting link\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
 | |
| +	if (current_tx_pause == pause->tx_pause)
 | |
| +		goto out;
 | |
| +
 | |
| +	priv->tx_pause_frames = pause->tx_pause;
 | |
| +	err = set_rx_taildrop(priv);
 | |
|  	if (err)
 | |
| -		/* ethtool will be loud enough if we return an error; no point
 | |
| -		 * in putting our own error message on the console by default
 | |
| -		 */
 | |
| -		netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
 | |
| +		netdev_dbg(net_dev, "Error configuring taildrop\n");
 | |
|  
 | |
| +out:
 | |
|  	return err;
 | |
|  }
 | |
|  
 | |
| @@ -192,6 +320,10 @@ static void dpaa2_eth_get_ethtool_stats(
 | |
|  	int j, k, err;
 | |
|  	int num_cnt;
 | |
|  	union dpni_statistics dpni_stats;
 | |
| +	u32 fcnt, bcnt;
 | |
| +	u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
 | |
| +	u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
 | |
| +	u32 buf_cnt;
 | |
|  	u64 cdan = 0;
 | |
|  	u64 portal_busy = 0, pull_err = 0;
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| @@ -202,9 +334,9 @@ static void dpaa2_eth_get_ethtool_stats(
 | |
|  	       sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
 | |
|  
 | |
|  	/* Print standard counters, from DPNI statistics */
 | |
| -	for (j = 0; j <= 2; j++) {
 | |
| +	for (j = 0; j <= 3; j++) {
 | |
|  		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
 | |
| -					  j, &dpni_stats);
 | |
| +					  j, 0, &dpni_stats);
 | |
|  		if (err != 0)
 | |
|  			netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
 | |
|  		switch (j) {
 | |
| @@ -217,6 +349,9 @@ static void dpaa2_eth_get_ethtool_stats(
 | |
|  		case 2:
 | |
|  			num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
 | |
|  			break;
 | |
| +		case 3:
 | |
| +			num_cnt = sizeof(dpni_stats.page_3) / sizeof(u64);
 | |
| +			break;
 | |
|  		}
 | |
|  		for (k = 0; k < num_cnt; k++)
 | |
|  			*(data + i++) = dpni_stats.raw.counter[k];
 | |
| @@ -240,12 +375,410 @@ static void dpaa2_eth_get_ethtool_stats(
 | |
|  	*(data + i++) = portal_busy;
 | |
|  	*(data + i++) = pull_err;
 | |
|  	*(data + i++) = cdan;
 | |
| +
 | |
| +	for (j = 0; j < priv->num_fqs; j++) {
 | |
| +		/* Print FQ instantaneous counts */
 | |
| +		err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
 | |
| +					      &fcnt, &bcnt);
 | |
| +		if (err) {
 | |
| +			netdev_warn(net_dev, "FQ query error %d", err);
 | |
| +			return;
 | |
| +		}
 | |
| +
 | |
| +		if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
 | |
| +			fcnt_tx_total += fcnt;
 | |
| +			bcnt_tx_total += bcnt;
 | |
| +		} else {
 | |
| +			fcnt_rx_total += fcnt;
 | |
| +			bcnt_rx_total += bcnt;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	*(data + i++) = fcnt_rx_total;
 | |
| +	*(data + i++) = bcnt_rx_total;
 | |
| +	*(data + i++) = fcnt_tx_total;
 | |
| +	*(data + i++) = bcnt_tx_total;
 | |
| +
 | |
| +	err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
 | |
| +	if (err) {
 | |
| +		netdev_warn(net_dev, "Buffer count query error %d\n", err);
 | |
| +		return;
 | |
| +	}
 | |
| +	*(data + i++) = buf_cnt;
 | |
| +}
 | |
| +
 | |
| +static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
 | |
| +			 void *key, void *mask, u64 *fields)
 | |
| +{
 | |
| +	int off;
 | |
| +
 | |
| +	if (eth_mask->h_proto) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
 | |
| +		*(__be16 *)(key + off) = eth_value->h_proto;
 | |
| +		*(__be16 *)(mask + off) = eth_mask->h_proto;
 | |
| +		*fields |= DPAA2_ETH_DIST_ETHTYPE;
 | |
| +	}
 | |
| +
 | |
| +	if (!is_zero_ether_addr(eth_mask->h_source)) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
 | |
| +		ether_addr_copy(key + off, eth_value->h_source);
 | |
| +		ether_addr_copy(mask + off, eth_mask->h_source);
 | |
| +		*fields |= DPAA2_ETH_DIST_ETHSRC;
 | |
| +	}
 | |
| +
 | |
| +	if (!is_zero_ether_addr(eth_mask->h_dest)) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
 | |
| +		ether_addr_copy(key + off, eth_value->h_dest);
 | |
| +		ether_addr_copy(mask + off, eth_mask->h_dest);
 | |
| +		*fields |= DPAA2_ETH_DIST_ETHDST;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int prep_user_ip_rule(struct ethtool_usrip4_spec *uip_value,
 | |
| +			     struct ethtool_usrip4_spec *uip_mask,
 | |
| +			     void *key, void *mask, u64 *fields)
 | |
| +{
 | |
| +	int off;
 | |
| +	u32 tmp_value, tmp_mask;
 | |
| +
 | |
| +	if (uip_mask->tos || uip_mask->ip_ver)
 | |
| +		return -EOPNOTSUPP;
 | |
| +
 | |
| +	if (uip_mask->ip4src) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
 | |
| +		*(__be32 *)(key + off) = uip_value->ip4src;
 | |
| +		*(__be32 *)(mask + off) = uip_mask->ip4src;
 | |
| +		*fields |= DPAA2_ETH_DIST_IPSRC;
 | |
| +	}
 | |
| +
 | |
| +	if (uip_mask->ip4dst) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
 | |
| +		*(__be32 *)(key + off) = uip_value->ip4dst;
 | |
| +		*(__be32 *)(mask + off) = uip_mask->ip4dst;
 | |
| +		*fields |= DPAA2_ETH_DIST_IPDST;
 | |
| +	}
 | |
| +
 | |
| +	if (uip_mask->proto) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
 | |
| +		*(u8 *)(key + off) = uip_value->proto;
 | |
| +		*(u8 *)(mask + off) = uip_mask->proto;
 | |
| +		*fields |= DPAA2_ETH_DIST_IPPROTO;
 | |
| +	}
 | |
| +
 | |
| +	if (uip_mask->l4_4_bytes) {
 | |
| +		tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
 | |
| +		tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
 | |
| +
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
 | |
| +		*(__be16 *)(key + off) = htons(tmp_value >> 16);
 | |
| +		*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
 | |
| +		*fields |= DPAA2_ETH_DIST_L4SRC;
 | |
| +
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
 | |
| +		*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
 | |
| +		*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
 | |
| +		*fields |= DPAA2_ETH_DIST_L4DST;
 | |
| +	}
 | |
| +
 | |
| +	/* Only apply the rule for IPv4 frames */
 | |
| +	off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
 | |
| +	*(__be16 *)(key + off) = htons(ETH_P_IP);
 | |
| +	*(__be16 *)(mask + off) = htons(0xFFFF);
 | |
| +	*fields |= DPAA2_ETH_DIST_ETHTYPE;
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
 | |
| +			struct ethtool_tcpip4_spec *l4_mask,
 | |
| +			void *key, void *mask, u8 l4_proto, u64 *fields)
 | |
| +{
 | |
| +	int off;
 | |
| +
 | |
| +	if (l4_mask->tos)
 | |
| +		return -EOPNOTSUPP;
 | |
| +	if (l4_mask->ip4src) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
 | |
| +		*(__be32 *)(key + off) = l4_value->ip4src;
 | |
| +		*(__be32 *)(mask + off) = l4_mask->ip4src;
 | |
| +		*fields |= DPAA2_ETH_DIST_IPSRC;
 | |
| +	}
 | |
| +
 | |
| +	if (l4_mask->ip4dst) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
 | |
| +		*(__be32 *)(key + off) = l4_value->ip4dst;
 | |
| +		*(__be32 *)(mask + off) = l4_mask->ip4dst;
 | |
| +		*fields |= DPAA2_ETH_DIST_IPDST;
 | |
| +	}
 | |
| +
 | |
| +	if (l4_mask->psrc) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
 | |
| +		*(__be16 *)(key + off) = l4_value->psrc;
 | |
| +		*(__be16 *)(mask + off) = l4_mask->psrc;
 | |
| +		*fields |= DPAA2_ETH_DIST_L4SRC;
 | |
| +	}
 | |
| +
 | |
| +	if (l4_mask->pdst) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
 | |
| +		*(__be16 *)(key + off) = l4_value->pdst;
 | |
| +		*(__be16 *)(mask + off) = l4_mask->pdst;
 | |
| +		*fields |= DPAA2_ETH_DIST_L4DST;
 | |
| +	}
 | |
| +
 | |
| +	/* Only apply the rule for the user-specified L4 protocol
 | |
| +	 * and if ethertype matches IPv4
 | |
| +	 */
 | |
| +	off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
 | |
| +	*(__be16 *)(key + off) = htons(ETH_P_IP);
 | |
| +	*(__be16 *)(mask + off) = htons(0xFFFF);
 | |
| +	*fields |= DPAA2_ETH_DIST_ETHTYPE;
 | |
| +
 | |
| +	off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
 | |
| +	*(u8 *)(key + off) = l4_proto;
 | |
| +	*(u8 *)(mask + off) = 0xFF;
 | |
| +	*fields |= DPAA2_ETH_DIST_IPPROTO;
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
 | |
| +			 struct ethtool_flow_ext *ext_mask,
 | |
| +			 void *key, void *mask, u64 *fields)
 | |
| +{
 | |
| +	int off;
 | |
| +
 | |
| +	if (ext_mask->vlan_etype)
 | |
| +		return -EOPNOTSUPP;
 | |
| +
 | |
| +	if (ext_mask->vlan_tci) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
 | |
| +		*(__be16 *)(key + off) = ext_value->vlan_tci;
 | |
| +		*(__be16 *)(mask + off) = ext_mask->vlan_tci;
 | |
| +		*fields |= DPAA2_ETH_DIST_VLAN;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
 | |
| +			     struct ethtool_flow_ext *ext_mask,
 | |
| +			     void *key, void *mask, u64 *fields)
 | |
| +{
 | |
| +	int off;
 | |
| +
 | |
| +	if (!is_zero_ether_addr(ext_mask->h_dest)) {
 | |
| +		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
 | |
| +		ether_addr_copy(key + off, ext_value->h_dest);
 | |
| +		ether_addr_copy(mask + off, ext_mask->h_dest);
 | |
| +		*fields |= DPAA2_ETH_DIST_ETHDST;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
 | |
| +			 u64 *fields)
 | |
| +{
 | |
| +	int err;
 | |
| +
 | |
| +	switch (fs->flow_type & 0xFF) {
 | |
| +	case ETHER_FLOW:
 | |
| +		err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
 | |
| +				    key, mask, fields);
 | |
| +		break;
 | |
| +	case IP_USER_FLOW:
 | |
| +		err = prep_user_ip_rule(&fs->h_u.usr_ip4_spec,
 | |
| +				    &fs->m_u.usr_ip4_spec, key, mask, fields);
 | |
| +		break;
 | |
| +	case TCP_V4_FLOW:
 | |
| +		err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
 | |
| +				   key, mask, IPPROTO_TCP, fields);
 | |
| +		break;
 | |
| +	case UDP_V4_FLOW:
 | |
| +		err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
 | |
| +				   key, mask, IPPROTO_UDP, fields);
 | |
| +		break;
 | |
| +	case SCTP_V4_FLOW:
 | |
| +		err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, &fs->m_u.sctp_ip4_spec,
 | |
| +				   key, mask, IPPROTO_SCTP, fields);
 | |
| +		break;
 | |
| +	default:
 | |
| +		return -EOPNOTSUPP;
 | |
| +	}
 | |
| +
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	if (fs->flow_type & FLOW_EXT) {
 | |
| +		err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +	}
 | |
| +
 | |
| +	if (fs->flow_type & FLOW_MAC_EXT) {
 | |
| +		err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
 | |
| +					fields);
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int do_cls_rule(struct net_device *net_dev,
 | |
| +		       struct ethtool_rx_flow_spec *fs,
 | |
| +		       bool add)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct device *dev = net_dev->dev.parent;
 | |
| +	struct dpni_rule_cfg rule_cfg = { 0 };
 | |
| +	struct dpni_fs_action_cfg fs_act = { 0 };
 | |
| +	dma_addr_t key_iova;
 | |
| +	u64 fields = 0;
 | |
| +	void *key_buf;
 | |
| +	int i, err = 0;
 | |
| +
 | |
| +	if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
 | |
| +	    fs->ring_cookie >= dpaa2_eth_queue_count(priv))
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
 | |
| +
 | |
| +	/* allocate twice the key size, for the actual key and for mask */
 | |
| +	key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
 | |
| +	if (!key_buf)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	/* Fill the key and mask memory areas */
 | |
| +	err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
 | |
| +	if (err)
 | |
| +		goto free_mem;
 | |
| +
 | |
| +	if (!dpaa2_eth_fs_mask_enabled(priv)) {
 | |
| +		/* Masking allows us to configure a maximal key during init and
 | |
| +		 * use it for all flow steering rules. Without it, we include
 | |
| +		 * in the key only the fields actually used, so we need to
 | |
| +		 * extract the others from the final key buffer.
 | |
| +		 *
 | |
| +		 * Program the FS key if needed, or return error if previously
 | |
| +		 * set key can't be used for the current rule. User needs to
 | |
| +		 * delete existing rules in this case to allow for the new one.
 | |
| +		 */
 | |
| +		if (!priv->rx_cls_fields) {
 | |
| +			err = dpaa2_eth_set_cls(net_dev, fields);
 | |
| +			if (err)
 | |
| +				goto free_mem;
 | |
| +
 | |
| +			priv->rx_cls_fields = fields;
 | |
| +		} else if (priv->rx_cls_fields != fields) {
 | |
| +			netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
 | |
| +			err = -EOPNOTSUPP;
 | |
| +			goto free_mem;
 | |
| +		}
 | |
| +
 | |
| +		dpaa2_eth_cls_trim_rule(key_buf, fields);
 | |
| +		rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
 | |
| +	}
 | |
| +
 | |
| +	key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
 | |
| +				  DMA_TO_DEVICE);
 | |
| +	if (dma_mapping_error(dev, key_iova)) {
 | |
| +		err = -ENOMEM;
 | |
| +		goto free_mem;
 | |
| +	}
 | |
| +
 | |
| +	rule_cfg.key_iova = key_iova;
 | |
| +	if (dpaa2_eth_fs_mask_enabled(priv))
 | |
| +		rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
 | |
| +
 | |
| +	if (add) {
 | |
| +		if (fs->ring_cookie == RX_CLS_FLOW_DISC)
 | |
| +			fs_act.options |= DPNI_FS_OPT_DISCARD;
 | |
| +		else
 | |
| +			fs_act.flow_id = fs->ring_cookie;
 | |
| +	}
 | |
| +	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
 | |
| +		if (add)
 | |
| +			err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
 | |
| +						i, fs->location, &rule_cfg,
 | |
| +						&fs_act);
 | |
| +		else
 | |
| +			err = dpni_remove_fs_entry(priv->mc_io, 0,
 | |
| +						   priv->mc_token, i,
 | |
| +						   &rule_cfg);
 | |
| +		if (err)
 | |
| +			break;
 | |
| +	}
 | |
| +
 | |
| +	dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
 | |
| +
 | |
| +free_mem:
 | |
| +	kfree(key_buf);
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static int num_rules(struct dpaa2_eth_priv *priv)
 | |
| +{
 | |
| +	int i, rules = 0;
 | |
| +
 | |
| +	for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
 | |
| +		if (priv->cls_rule[i].in_use)
 | |
| +			rules++;
 | |
| +
 | |
| +	return rules;
 | |
| +}
 | |
| +
 | |
| +static int update_cls_rule(struct net_device *net_dev,
 | |
| +			   struct ethtool_rx_flow_spec *new_fs,
 | |
| +			   int location)
 | |
| +{
 | |
| +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	struct dpaa2_eth_cls_rule *rule;
 | |
| +	int err = -EINVAL;
 | |
| +
 | |
| +	if (!priv->rx_cls_enabled)
 | |
| +		return -EOPNOTSUPP;
 | |
| +
 | |
| +	if (location >= dpaa2_eth_fs_count(priv))
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	rule = &priv->cls_rule[location];
 | |
| +
 | |
| +	/* If a rule is present at the specified location, delete it. */
 | |
| +	if (rule->in_use) {
 | |
| +		err = do_cls_rule(net_dev, &rule->fs, false);
 | |
| +		if (err)
 | |
| +			return err;
 | |
| +
 | |
| +		rule->in_use = 0;
 | |
| +
 | |
| +		if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
 | |
| +			priv->rx_cls_fields = 0;
 | |
| +	}
 | |
| +
 | |
| +	/* If no new entry to add, return here */
 | |
| +	if (!new_fs)
 | |
| +		return err;
 | |
| +
 | |
| +	err = do_cls_rule(net_dev, new_fs, true);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	rule->in_use = 1;
 | |
| +	rule->fs = *new_fs;
 | |
| +
 | |
| +	return 0;
 | |
|  }
 | |
|  
 | |
|  static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
 | |
|  			       struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
 | |
|  {
 | |
|  	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 | |
| +	int rule_cnt = dpaa2_eth_fs_count(priv);
 | |
| +	int i, j = 0;
 | |
|  
 | |
|  	switch (rxnfc->cmd) {
 | |
|  	case ETHTOOL_GRXFH:
 | |
| @@ -258,6 +791,29 @@ static int dpaa2_eth_get_rxnfc(struct ne
 | |
|  	case ETHTOOL_GRXRINGS:
 | |
|  		rxnfc->data = dpaa2_eth_queue_count(priv);
 | |
|  		break;
 | |
| +	case ETHTOOL_GRXCLSRLCNT:
 | |
| +		rxnfc->rule_cnt = 0;
 | |
| +		rxnfc->rule_cnt = num_rules(priv);
 | |
| +		rxnfc->data = rule_cnt;
 | |
| +		break;
 | |
| +	case ETHTOOL_GRXCLSRULE:
 | |
| +		if (rxnfc->fs.location >= rule_cnt)
 | |
| +			return -EINVAL;
 | |
| +		if (!priv->cls_rule[rxnfc->fs.location].in_use)
 | |
| +			return -EINVAL;
 | |
| +		rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
 | |
| +		break;
 | |
| +	case ETHTOOL_GRXCLSRLALL:
 | |
| +		for (i = 0; i < rule_cnt; i++) {
 | |
| +			if (!priv->cls_rule[i].in_use)
 | |
| +				continue;
 | |
| +			if (j == rxnfc->rule_cnt)
 | |
| +				return -EMSGSIZE;
 | |
| +			rule_locs[j++] = i;
 | |
| +		}
 | |
| +		rxnfc->rule_cnt = j;
 | |
| +		rxnfc->data = rule_cnt;
 | |
| +		break;
 | |
|  	default:
 | |
|  		return -EOPNOTSUPP;
 | |
|  	}
 | |
| @@ -265,13 +821,61 @@ static int dpaa2_eth_get_rxnfc(struct ne
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| +int dpaa2_phc_index = -1;
 | |
| +EXPORT_SYMBOL(dpaa2_phc_index);
 | |
| +
 | |
| +static int dpaa2_eth_get_ts_info(struct net_device *dev,
 | |
| +				 struct ethtool_ts_info *info)
 | |
| +{
 | |
| +	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
 | |
| +				SOF_TIMESTAMPING_RX_HARDWARE |
 | |
| +				SOF_TIMESTAMPING_RAW_HARDWARE;
 | |
| +
 | |
| +	info->phc_index = dpaa2_phc_index;
 | |
| +
 | |
| +	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
 | |
| +			 (1 << HWTSTAMP_TX_ON);
 | |
| +
 | |
| +	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
 | |
| +			   (1 << HWTSTAMP_FILTER_ALL);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
 | |
| +			       struct ethtool_rxnfc *rxnfc)
 | |
| +{
 | |
| +	int err = 0;
 | |
| +
 | |
| +	switch (rxnfc->cmd) {
 | |
| +	case ETHTOOL_SRXFH:
 | |
| +		if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
 | |
| +			return -EOPNOTSUPP;
 | |
| +		err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
 | |
| +		break;
 | |
| +	case ETHTOOL_SRXCLSRLINS:
 | |
| +		err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
 | |
| +		break;
 | |
| +	case ETHTOOL_SRXCLSRLDEL:
 | |
| +		err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
 | |
| +		break;
 | |
| +	default:
 | |
| +		err = -EOPNOTSUPP;
 | |
| +	}
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
|  const struct ethtool_ops dpaa2_ethtool_ops = {
 | |
|  	.get_drvinfo = dpaa2_eth_get_drvinfo,
 | |
|  	.get_link = ethtool_op_get_link,
 | |
|  	.get_link_ksettings = dpaa2_eth_get_link_ksettings,
 | |
|  	.set_link_ksettings = dpaa2_eth_set_link_ksettings,
 | |
| +	.get_pauseparam = dpaa2_eth_get_pauseparam,
 | |
| +	.set_pauseparam = dpaa2_eth_set_pauseparam,
 | |
|  	.get_sset_count = dpaa2_eth_get_sset_count,
 | |
|  	.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
 | |
|  	.get_strings = dpaa2_eth_get_strings,
 | |
|  	.get_rxnfc = dpaa2_eth_get_rxnfc,
 | |
| +	.set_rxnfc = dpaa2_eth_set_rxnfc,
 | |
| +	.get_ts_info = dpaa2_eth_get_ts_info,
 | |
|  };
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
 | |
| @@ -1,39 +1,10 @@
 | |
| +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 | |
|  /* Copyright 2013-2015 Freescale Semiconductor Inc.
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - * * Redistributions of source code must retain the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer.
 | |
| - * * Redistributions in binary form must reproduce the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer in the
 | |
| - * documentation and/or other materials provided with the distribution.
 | |
| - * * Neither the name of the above-listed copyright holders nor the
 | |
| - * names of any contributors may be used to endorse or promote products
 | |
| - * derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 | |
| - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
 | |
| - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 | |
| - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 | |
| - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 | |
| - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 | |
| - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 | |
| - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| - * POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  #ifndef __FSL_DPKG_H_
 | |
|  #define __FSL_DPKG_H_
 | |
|  
 | |
|  #include <linux/types.h>
 | |
| -#include "net.h"
 | |
|  
 | |
|  /* Data Path Key Generator API
 | |
|   * Contains initialization APIs and runtime APIs for the Key Generator
 | |
| @@ -86,6 +57,355 @@ struct dpkg_mask {
 | |
|  	u8 offset;
 | |
|  };
 | |
|  
 | |
| +/* Protocol fields */
 | |
| +
 | |
| +/* Ethernet fields */
 | |
| +#define NH_FLD_ETH_DA				BIT(0)
 | |
| +#define NH_FLD_ETH_SA				BIT(1)
 | |
| +#define NH_FLD_ETH_LENGTH			BIT(2)
 | |
| +#define NH_FLD_ETH_TYPE				BIT(3)
 | |
| +#define NH_FLD_ETH_FINAL_CKSUM			BIT(4)
 | |
| +#define NH_FLD_ETH_PADDING			BIT(5)
 | |
| +#define NH_FLD_ETH_ALL_FIELDS			(BIT(6) - 1)
 | |
| +
 | |
| +/* VLAN fields */
 | |
| +#define NH_FLD_VLAN_VPRI			BIT(0)
 | |
| +#define NH_FLD_VLAN_CFI				BIT(1)
 | |
| +#define NH_FLD_VLAN_VID				BIT(2)
 | |
| +#define NH_FLD_VLAN_LENGTH			BIT(3)
 | |
| +#define NH_FLD_VLAN_TYPE			BIT(4)
 | |
| +#define NH_FLD_VLAN_ALL_FIELDS			(BIT(5) - 1)
 | |
| +
 | |
| +#define NH_FLD_VLAN_TCI				(NH_FLD_VLAN_VPRI | \
 | |
| +						 NH_FLD_VLAN_CFI | \
 | |
| +						 NH_FLD_VLAN_VID)
 | |
| +
 | |
| +/* IP (generic) fields */
 | |
| +#define NH_FLD_IP_VER				BIT(0)
 | |
| +#define NH_FLD_IP_DSCP				BIT(2)
 | |
| +#define NH_FLD_IP_ECN				BIT(3)
 | |
| +#define NH_FLD_IP_PROTO				BIT(4)
 | |
| +#define NH_FLD_IP_SRC				BIT(5)
 | |
| +#define NH_FLD_IP_DST				BIT(6)
 | |
| +#define NH_FLD_IP_TOS_TC			BIT(7)
 | |
| +#define NH_FLD_IP_ID				BIT(8)
 | |
| +#define NH_FLD_IP_ALL_FIELDS			(BIT(9) - 1)
 | |
| +
 | |
| +/* IPV4 fields */
 | |
| +#define NH_FLD_IPV4_VER				BIT(0)
 | |
| +#define NH_FLD_IPV4_HDR_LEN			BIT(1)
 | |
| +#define NH_FLD_IPV4_TOS				BIT(2)
 | |
| +#define NH_FLD_IPV4_TOTAL_LEN			BIT(3)
 | |
| +#define NH_FLD_IPV4_ID				BIT(4)
 | |
| +#define NH_FLD_IPV4_FLAG_D			BIT(5)
 | |
| +#define NH_FLD_IPV4_FLAG_M			BIT(6)
 | |
| +#define NH_FLD_IPV4_OFFSET			BIT(7)
 | |
| +#define NH_FLD_IPV4_TTL				BIT(8)
 | |
| +#define NH_FLD_IPV4_PROTO			BIT(9)
 | |
| +#define NH_FLD_IPV4_CKSUM			BIT(10)
 | |
| +#define NH_FLD_IPV4_SRC_IP			BIT(11)
 | |
| +#define NH_FLD_IPV4_DST_IP			BIT(12)
 | |
| +#define NH_FLD_IPV4_OPTS			BIT(13)
 | |
| +#define NH_FLD_IPV4_OPTS_COUNT			BIT(14)
 | |
| +#define NH_FLD_IPV4_ALL_FIELDS			(BIT(15) - 1)
 | |
| +
 | |
| +/* IPV6 fields */
 | |
| +#define NH_FLD_IPV6_VER				BIT(0)
 | |
| +#define NH_FLD_IPV6_TC				BIT(1)
 | |
| +#define NH_FLD_IPV6_SRC_IP			BIT(2)
 | |
| +#define NH_FLD_IPV6_DST_IP			BIT(3)
 | |
| +#define NH_FLD_IPV6_NEXT_HDR			BIT(4)
 | |
| +#define NH_FLD_IPV6_FL				BIT(5)
 | |
| +#define NH_FLD_IPV6_HOP_LIMIT			BIT(6)
 | |
| +#define NH_FLD_IPV6_ID				BIT(7)
 | |
| +#define NH_FLD_IPV6_ALL_FIELDS			(BIT(8) - 1)
 | |
| +
 | |
| +/* ICMP fields */
 | |
| +#define NH_FLD_ICMP_TYPE			BIT(0)
 | |
| +#define NH_FLD_ICMP_CODE			BIT(1)
 | |
| +#define NH_FLD_ICMP_CKSUM			BIT(2)
 | |
| +#define NH_FLD_ICMP_ID				BIT(3)
 | |
| +#define NH_FLD_ICMP_SQ_NUM			BIT(4)
 | |
| +#define NH_FLD_ICMP_ALL_FIELDS			(BIT(5) - 1)
 | |
| +
 | |
| +/* IGMP fields */
 | |
| +#define NH_FLD_IGMP_VERSION			BIT(0)
 | |
| +#define NH_FLD_IGMP_TYPE			BIT(1)
 | |
| +#define NH_FLD_IGMP_CKSUM			BIT(2)
 | |
| +#define NH_FLD_IGMP_DATA			BIT(3)
 | |
| +#define NH_FLD_IGMP_ALL_FIELDS			(BIT(4) - 1)
 | |
| +
 | |
| +/* TCP fields */
 | |
| +#define NH_FLD_TCP_PORT_SRC			BIT(0)
 | |
| +#define NH_FLD_TCP_PORT_DST			BIT(1)
 | |
| +#define NH_FLD_TCP_SEQ				BIT(2)
 | |
| +#define NH_FLD_TCP_ACK				BIT(3)
 | |
| +#define NH_FLD_TCP_OFFSET			BIT(4)
 | |
| +#define NH_FLD_TCP_FLAGS			BIT(5)
 | |
| +#define NH_FLD_TCP_WINDOW			BIT(6)
 | |
| +#define NH_FLD_TCP_CKSUM			BIT(7)
 | |
| +#define NH_FLD_TCP_URGPTR			BIT(8)
 | |
| +#define NH_FLD_TCP_OPTS				BIT(9)
 | |
| +#define NH_FLD_TCP_OPTS_COUNT			BIT(10)
 | |
| +#define NH_FLD_TCP_ALL_FIELDS			(BIT(11) - 1)
 | |
| +
 | |
| +/* UDP fields */
 | |
| +#define NH_FLD_UDP_PORT_SRC			BIT(0)
 | |
| +#define NH_FLD_UDP_PORT_DST			BIT(1)
 | |
| +#define NH_FLD_UDP_LEN				BIT(2)
 | |
| +#define NH_FLD_UDP_CKSUM			BIT(3)
 | |
| +#define NH_FLD_UDP_ALL_FIELDS			(BIT(4) - 1)
 | |
| +
 | |
| +/* UDP-lite fields */
 | |
| +#define NH_FLD_UDP_LITE_PORT_SRC		BIT(0)
 | |
| +#define NH_FLD_UDP_LITE_PORT_DST		BIT(1)
 | |
| +#define NH_FLD_UDP_LITE_ALL_FIELDS		(BIT(2) - 1)
 | |
| +
 | |
| +/* UDP-encap-ESP fields */
 | |
| +#define NH_FLD_UDP_ENC_ESP_PORT_SRC		BIT(0)
 | |
| +#define NH_FLD_UDP_ENC_ESP_PORT_DST		BIT(1)
 | |
| +#define NH_FLD_UDP_ENC_ESP_LEN			BIT(2)
 | |
| +#define NH_FLD_UDP_ENC_ESP_CKSUM		BIT(3)
 | |
| +#define NH_FLD_UDP_ENC_ESP_SPI			BIT(4)
 | |
| +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM		BIT(5)
 | |
| +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS		(BIT(6) - 1)
 | |
| +
 | |
| +/* SCTP fields */
 | |
| +#define NH_FLD_SCTP_PORT_SRC			BIT(0)
 | |
| +#define NH_FLD_SCTP_PORT_DST			BIT(1)
 | |
| +#define NH_FLD_SCTP_VER_TAG			BIT(2)
 | |
| +#define NH_FLD_SCTP_CKSUM			BIT(3)
 | |
| +#define NH_FLD_SCTP_ALL_FIELDS			(BIT(4) - 1)
 | |
| +
 | |
| +/* DCCP fields */
 | |
| +#define NH_FLD_DCCP_PORT_SRC			BIT(0)
 | |
| +#define NH_FLD_DCCP_PORT_DST			BIT(1)
 | |
| +#define NH_FLD_DCCP_ALL_FIELDS			(BIT(2) - 1)
 | |
| +
 | |
| +/* IPHC fields */
 | |
| +#define NH_FLD_IPHC_CID				BIT(0)
 | |
| +#define NH_FLD_IPHC_CID_TYPE			BIT(1)
 | |
| +#define NH_FLD_IPHC_HCINDEX			BIT(2)
 | |
| +#define NH_FLD_IPHC_GEN				BIT(3)
 | |
| +#define NH_FLD_IPHC_D_BIT			BIT(4)
 | |
| +#define NH_FLD_IPHC_ALL_FIELDS			(BIT(5) - 1)
 | |
| +
 | |
| +/* SCTP fields */
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_TYPE		BIT(0)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS		BIT(1)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH		BIT(2)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_TSN		BIT(3)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID	BIT(4)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN	BIT(5)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID	BIT(6)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED	BIT(7)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING	BIT(8)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_END		BIT(9)
 | |
| +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS	(BIT(10) - 1)
 | |
| +
 | |
| +/* L2TPV2 fields */
 | |
| +#define NH_FLD_L2TPV2_TYPE_BIT			BIT(0)
 | |
| +#define NH_FLD_L2TPV2_LENGTH_BIT		BIT(1)
 | |
| +#define NH_FLD_L2TPV2_SEQUENCE_BIT		BIT(2)
 | |
| +#define NH_FLD_L2TPV2_OFFSET_BIT		BIT(3)
 | |
| +#define NH_FLD_L2TPV2_PRIORITY_BIT		BIT(4)
 | |
| +#define NH_FLD_L2TPV2_VERSION			BIT(5)
 | |
| +#define NH_FLD_L2TPV2_LEN			BIT(6)
 | |
| +#define NH_FLD_L2TPV2_TUNNEL_ID			BIT(7)
 | |
| +#define NH_FLD_L2TPV2_SESSION_ID		BIT(8)
 | |
| +#define NH_FLD_L2TPV2_NS			BIT(9)
 | |
| +#define NH_FLD_L2TPV2_NR			BIT(10)
 | |
| +#define NH_FLD_L2TPV2_OFFSET_SIZE		BIT(11)
 | |
| +#define NH_FLD_L2TPV2_FIRST_BYTE		BIT(12)
 | |
| +#define NH_FLD_L2TPV2_ALL_FIELDS		(BIT(13) - 1)
 | |
| +
 | |
| +/* L2TPV3 fields */
 | |
| +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT		BIT(0)
 | |
| +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT		BIT(1)
 | |
| +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT		BIT(2)
 | |
| +#define NH_FLD_L2TPV3_CTRL_VERSION		BIT(3)
 | |
| +#define NH_FLD_L2TPV3_CTRL_LENGTH		BIT(4)
 | |
| +#define NH_FLD_L2TPV3_CTRL_CONTROL		BIT(5)
 | |
| +#define NH_FLD_L2TPV3_CTRL_SENT			BIT(6)
 | |
| +#define NH_FLD_L2TPV3_CTRL_RECV			BIT(7)
 | |
| +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE		BIT(8)
 | |
| +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS		(BIT(9) - 1)
 | |
| +
 | |
| +#define NH_FLD_L2TPV3_SESS_TYPE_BIT		BIT(0)
 | |
| +#define NH_FLD_L2TPV3_SESS_VERSION		BIT(1)
 | |
| +#define NH_FLD_L2TPV3_SESS_ID			BIT(2)
 | |
| +#define NH_FLD_L2TPV3_SESS_COOKIE		BIT(3)
 | |
| +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS		(BIT(4) - 1)
 | |
| +
 | |
| +/* PPP fields */
 | |
| +#define NH_FLD_PPP_PID				BIT(0)
 | |
| +#define NH_FLD_PPP_COMPRESSED			BIT(1)
 | |
| +#define NH_FLD_PPP_ALL_FIELDS			(BIT(2) - 1)
 | |
| +
 | |
| +/* PPPoE fields */
 | |
| +#define NH_FLD_PPPOE_VER			BIT(0)
 | |
| +#define NH_FLD_PPPOE_TYPE			BIT(1)
 | |
| +#define NH_FLD_PPPOE_CODE			BIT(2)
 | |
| +#define NH_FLD_PPPOE_SID			BIT(3)
 | |
| +#define NH_FLD_PPPOE_LEN			BIT(4)
 | |
| +#define NH_FLD_PPPOE_SESSION			BIT(5)
 | |
| +#define NH_FLD_PPPOE_PID			BIT(6)
 | |
| +#define NH_FLD_PPPOE_ALL_FIELDS			(BIT(7) - 1)
 | |
| +
 | |
| +/* PPP-Mux fields */
 | |
| +#define NH_FLD_PPPMUX_PID			BIT(0)
 | |
| +#define NH_FLD_PPPMUX_CKSUM			BIT(1)
 | |
| +#define NH_FLD_PPPMUX_COMPRESSED		BIT(2)
 | |
| +#define NH_FLD_PPPMUX_ALL_FIELDS		(BIT(3) - 1)
 | |
| +
 | |
| +/* PPP-Mux sub-frame fields */
 | |
| +#define NH_FLD_PPPMUX_SUBFRM_PFF		BIT(0)
 | |
| +#define NH_FLD_PPPMUX_SUBFRM_LXT		BIT(1)
 | |
| +#define NH_FLD_PPPMUX_SUBFRM_LEN		BIT(2)
 | |
| +#define NH_FLD_PPPMUX_SUBFRM_PID		BIT(3)
 | |
| +#define NH_FLD_PPPMUX_SUBFRM_USE_PID		BIT(4)
 | |
| +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS		(BIT(5) - 1)
 | |
| +
 | |
| +/* LLC fields */
 | |
| +#define NH_FLD_LLC_DSAP				BIT(0)
 | |
| +#define NH_FLD_LLC_SSAP				BIT(1)
 | |
| +#define NH_FLD_LLC_CTRL				BIT(2)
 | |
| +#define NH_FLD_LLC_ALL_FIELDS			(BIT(3) - 1)
 | |
| +
 | |
| +/* NLPID fields */
 | |
| +#define NH_FLD_NLPID_NLPID			BIT(0)
 | |
| +#define NH_FLD_NLPID_ALL_FIELDS			(BIT(1) - 1)
 | |
| +
 | |
| +/* SNAP fields */
 | |
| +#define NH_FLD_SNAP_OUI				BIT(0)
 | |
| +#define NH_FLD_SNAP_PID				BIT(1)
 | |
| +#define NH_FLD_SNAP_ALL_FIELDS			(BIT(2) - 1)
 | |
| +
 | |
| +/* LLC SNAP fields */
 | |
| +#define NH_FLD_LLC_SNAP_TYPE			BIT(0)
 | |
| +#define NH_FLD_LLC_SNAP_ALL_FIELDS		(BIT(1) - 1)
 | |
| +
 | |
| +/* ARP fields */
 | |
| +#define NH_FLD_ARP_HTYPE			BIT(0)
 | |
| +#define NH_FLD_ARP_PTYPE			BIT(1)
 | |
| +#define NH_FLD_ARP_HLEN				BIT(2)
 | |
| +#define NH_FLD_ARP_PLEN				BIT(3)
 | |
| +#define NH_FLD_ARP_OPER				BIT(4)
 | |
| +#define NH_FLD_ARP_SHA				BIT(5)
 | |
| +#define NH_FLD_ARP_SPA				BIT(6)
 | |
| +#define NH_FLD_ARP_THA				BIT(7)
 | |
| +#define NH_FLD_ARP_TPA				BIT(8)
 | |
| +#define NH_FLD_ARP_ALL_FIELDS			(BIT(9) - 1)
 | |
| +
 | |
| +/* RFC2684 fields */
 | |
| +#define NH_FLD_RFC2684_LLC			BIT(0)
 | |
| +#define NH_FLD_RFC2684_NLPID			BIT(1)
 | |
| +#define NH_FLD_RFC2684_OUI			BIT(2)
 | |
| +#define NH_FLD_RFC2684_PID			BIT(3)
 | |
| +#define NH_FLD_RFC2684_VPN_OUI			BIT(4)
 | |
| +#define NH_FLD_RFC2684_VPN_IDX			BIT(5)
 | |
| +#define NH_FLD_RFC2684_ALL_FIELDS		(BIT(6) - 1)
 | |
| +
 | |
| +/* User defined fields */
 | |
| +#define NH_FLD_USER_DEFINED_SRCPORT		BIT(0)
 | |
| +#define NH_FLD_USER_DEFINED_PCDID		BIT(1)
 | |
| +#define NH_FLD_USER_DEFINED_ALL_FIELDS		(BIT(2) - 1)
 | |
| +
 | |
| +/* Payload fields */
 | |
| +#define NH_FLD_PAYLOAD_BUFFER			BIT(0)
 | |
| +#define NH_FLD_PAYLOAD_SIZE			BIT(1)
 | |
| +#define NH_FLD_MAX_FRM_SIZE			BIT(2)
 | |
| +#define NH_FLD_MIN_FRM_SIZE			BIT(3)
 | |
| +#define NH_FLD_PAYLOAD_TYPE			BIT(4)
 | |
| +#define NH_FLD_FRAME_SIZE			BIT(5)
 | |
| +#define NH_FLD_PAYLOAD_ALL_FIELDS		(BIT(6) - 1)
 | |
| +
 | |
| +/* GRE fields */
 | |
| +#define NH_FLD_GRE_TYPE				BIT(0)
 | |
| +#define NH_FLD_GRE_ALL_FIELDS			(BIT(1) - 1)
 | |
| +
 | |
| +/* MINENCAP fields */
 | |
| +#define NH_FLD_MINENCAP_SRC_IP			BIT(0)
 | |
| +#define NH_FLD_MINENCAP_DST_IP			BIT(1)
 | |
| +#define NH_FLD_MINENCAP_TYPE			BIT(2)
 | |
| +#define NH_FLD_MINENCAP_ALL_FIELDS		(BIT(3) - 1)
 | |
| +
 | |
| +/* IPSEC AH fields */
 | |
| +#define NH_FLD_IPSEC_AH_SPI			BIT(0)
 | |
| +#define NH_FLD_IPSEC_AH_NH			BIT(1)
 | |
| +#define NH_FLD_IPSEC_AH_ALL_FIELDS		(BIT(2) - 1)
 | |
| +
 | |
| +/* IPSEC ESP fields */
 | |
| +#define NH_FLD_IPSEC_ESP_SPI			BIT(0)
 | |
| +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM		BIT(1)
 | |
| +#define NH_FLD_IPSEC_ESP_ALL_FIELDS		(BIT(2) - 1)
 | |
| +
 | |
| +/* MPLS fields */
 | |
| +#define NH_FLD_MPLS_LABEL_STACK			BIT(0)
 | |
| +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS	(BIT(1) - 1)
 | |
| +
 | |
| +/* MACSEC fields */
 | |
| +#define NH_FLD_MACSEC_SECTAG			BIT(0)
 | |
| +#define NH_FLD_MACSEC_ALL_FIELDS		(BIT(1) - 1)
 | |
| +
 | |
| +/* GTP fields */
 | |
| +#define NH_FLD_GTP_TEID				BIT(0)
 | |
| +
 | |
| +/* Supported protocols */
 | |
| +enum net_prot {
 | |
| +	NET_PROT_NONE = 0,
 | |
| +	NET_PROT_PAYLOAD,
 | |
| +	NET_PROT_ETH,
 | |
| +	NET_PROT_VLAN,
 | |
| +	NET_PROT_IPV4,
 | |
| +	NET_PROT_IPV6,
 | |
| +	NET_PROT_IP,
 | |
| +	NET_PROT_TCP,
 | |
| +	NET_PROT_UDP,
 | |
| +	NET_PROT_UDP_LITE,
 | |
| +	NET_PROT_IPHC,
 | |
| +	NET_PROT_SCTP,
 | |
| +	NET_PROT_SCTP_CHUNK_DATA,
 | |
| +	NET_PROT_PPPOE,
 | |
| +	NET_PROT_PPP,
 | |
| +	NET_PROT_PPPMUX,
 | |
| +	NET_PROT_PPPMUX_SUBFRM,
 | |
| +	NET_PROT_L2TPV2,
 | |
| +	NET_PROT_L2TPV3_CTRL,
 | |
| +	NET_PROT_L2TPV3_SESS,
 | |
| +	NET_PROT_LLC,
 | |
| +	NET_PROT_LLC_SNAP,
 | |
| +	NET_PROT_NLPID,
 | |
| +	NET_PROT_SNAP,
 | |
| +	NET_PROT_MPLS,
 | |
| +	NET_PROT_IPSEC_AH,
 | |
| +	NET_PROT_IPSEC_ESP,
 | |
| +	NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
 | |
| +	NET_PROT_MACSEC,
 | |
| +	NET_PROT_GRE,
 | |
| +	NET_PROT_MINENCAP,
 | |
| +	NET_PROT_DCCP,
 | |
| +	NET_PROT_ICMP,
 | |
| +	NET_PROT_IGMP,
 | |
| +	NET_PROT_ARP,
 | |
| +	NET_PROT_CAPWAP_DATA,
 | |
| +	NET_PROT_CAPWAP_CTRL,
 | |
| +	NET_PROT_RFC2684,
 | |
| +	NET_PROT_ICMPV6,
 | |
| +	NET_PROT_FCOE,
 | |
| +	NET_PROT_FIP,
 | |
| +	NET_PROT_ISCSI,
 | |
| +	NET_PROT_GTP,
 | |
| +	NET_PROT_USER_DEFINED_L2,
 | |
| +	NET_PROT_USER_DEFINED_L3,
 | |
| +	NET_PROT_USER_DEFINED_L4,
 | |
| +	NET_PROT_USER_DEFINED_L5,
 | |
| +	NET_PROT_USER_DEFINED_SHIM1,
 | |
| +	NET_PROT_USER_DEFINED_SHIM2,
 | |
| +
 | |
| +	NET_PROT_DUMMY_LAST
 | |
| +};
 | |
| +
 | |
|  /**
 | |
|   * struct dpkg_extract - A structure for defining a single extraction
 | |
|   * @type: Determines how the union below is interpreted:
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
 | |
| @@ -1,34 +1,6 @@
 | |
| +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 | |
|  /* Copyright 2013-2016 Freescale Semiconductor Inc.
 | |
|   * Copyright 2016 NXP
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - * * Redistributions of source code must retain the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer.
 | |
| - * * Redistributions in binary form must reproduce the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer in the
 | |
| - * documentation and/or other materials provided with the distribution.
 | |
| - * * Neither the name of the above-listed copyright holders nor the
 | |
| - * names of any contributors may be used to endorse or promote products
 | |
| - * derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 | |
| - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
 | |
| - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 | |
| - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 | |
| - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 | |
| - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 | |
| - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 | |
| - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| - * POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  #ifndef _FSL_DPNI_CMD_H
 | |
|  #define _FSL_DPNI_CMD_H
 | |
| @@ -39,9 +11,11 @@
 | |
|  #define DPNI_VER_MAJOR				7
 | |
|  #define DPNI_VER_MINOR				0
 | |
|  #define DPNI_CMD_BASE_VERSION			1
 | |
| +#define DPNI_CMD_2ND_VERSION			2
 | |
|  #define DPNI_CMD_ID_OFFSET			4
 | |
|  
 | |
|  #define DPNI_CMD(id)	(((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
 | |
| +#define DPNI_CMD_V2(id)	(((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
 | |
|  
 | |
|  #define DPNI_CMDID_OPEN					DPNI_CMD(0x801)
 | |
|  #define DPNI_CMDID_CLOSE				DPNI_CMD(0x800)
 | |
| @@ -64,16 +38,18 @@
 | |
|  #define DPNI_CMDID_GET_IRQ_STATUS			DPNI_CMD(0x016)
 | |
|  #define DPNI_CMDID_CLEAR_IRQ_STATUS			DPNI_CMD(0x017)
 | |
|  
 | |
| -#define DPNI_CMDID_SET_POOLS				DPNI_CMD(0x200)
 | |
| +#define DPNI_CMDID_SET_POOLS				DPNI_CMD_V2(0x200)
 | |
|  #define DPNI_CMDID_SET_ERRORS_BEHAVIOR			DPNI_CMD(0x20B)
 | |
|  
 | |
|  #define DPNI_CMDID_GET_QDID				DPNI_CMD(0x210)
 | |
|  #define DPNI_CMDID_GET_TX_DATA_OFFSET			DPNI_CMD(0x212)
 | |
|  #define DPNI_CMDID_GET_LINK_STATE			DPNI_CMD(0x215)
 | |
| +#define DPNI_CMDID_GET_LINK_STATE_V2			DPNI_CMD_V2(0x215)
 | |
|  #define DPNI_CMDID_SET_MAX_FRAME_LENGTH			DPNI_CMD(0x216)
 | |
|  #define DPNI_CMDID_GET_MAX_FRAME_LENGTH			DPNI_CMD(0x217)
 | |
|  #define DPNI_CMDID_SET_LINK_CFG				DPNI_CMD(0x21A)
 | |
| -#define DPNI_CMDID_SET_TX_SHAPING			DPNI_CMD(0x21B)
 | |
| +#define DPNI_CMDID_SET_LINK_CFG_V2			DPNI_CMD_V2(0x21A)
 | |
| +#define DPNI_CMDID_SET_TX_SHAPING			DPNI_CMD_V2(0x21B)
 | |
|  
 | |
|  #define DPNI_CMDID_SET_MCAST_PROMISC			DPNI_CMD(0x220)
 | |
|  #define DPNI_CMDID_GET_MCAST_PROMISC			DPNI_CMD(0x221)
 | |
| @@ -87,11 +63,16 @@
 | |
|  
 | |
|  #define DPNI_CMDID_SET_RX_TC_DIST			DPNI_CMD(0x235)
 | |
|  
 | |
| +#define DPNI_CMDID_SET_QOS_TBL				DPNI_CMD(0x240)
 | |
| +#define DPNI_CMDID_ADD_QOS_ENT				DPNI_CMD(0x241)
 | |
| +#define DPNI_CMDID_REMOVE_QOS_ENT			DPNI_CMD(0x242)
 | |
|  #define DPNI_CMDID_ADD_FS_ENT				DPNI_CMD(0x244)
 | |
|  #define DPNI_CMDID_REMOVE_FS_ENT			DPNI_CMD(0x245)
 | |
|  #define DPNI_CMDID_CLR_FS_ENT				DPNI_CMD(0x246)
 | |
|  
 | |
| -#define DPNI_CMDID_GET_STATISTICS			DPNI_CMD(0x25D)
 | |
| +#define DPNI_CMDID_SET_TX_PRIORITIES			DPNI_CMD_V2(0x250)
 | |
| +#define DPNI_CMDID_GET_STATISTICS			DPNI_CMD_V2(0x25D)
 | |
| +#define DPNI_CMDID_RESET_STATISTICS			DPNI_CMD(0x25E)
 | |
|  #define DPNI_CMDID_GET_QUEUE				DPNI_CMD(0x25F)
 | |
|  #define DPNI_CMDID_SET_QUEUE				DPNI_CMD(0x260)
 | |
|  #define DPNI_CMDID_GET_TAILDROP				DPNI_CMD(0x261)
 | |
| @@ -110,6 +91,9 @@
 | |
|  #define DPNI_CMDID_GET_OFFLOAD				DPNI_CMD(0x26B)
 | |
|  #define DPNI_CMDID_SET_OFFLOAD				DPNI_CMD(0x26C)
 | |
|  
 | |
| +#define DPNI_CMDID_SET_RX_FS_DIST			DPNI_CMD(0x273)
 | |
| +#define DPNI_CMDID_SET_RX_HASH_DIST			DPNI_CMD(0x274)
 | |
| +
 | |
|  /* Macros for accessing command fields smaller than 1byte */
 | |
|  #define DPNI_MASK(field)	\
 | |
|  	GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
 | |
| @@ -126,13 +110,14 @@ struct dpni_cmd_open {
 | |
|  
 | |
|  #define DPNI_BACKUP_POOL(val, order)	(((val) & 0x1) << (order))
 | |
|  struct dpni_cmd_set_pools {
 | |
| -	/* cmd word 0 */
 | |
|  	u8 num_dpbp;
 | |
|  	u8 backup_pool_mask;
 | |
|  	__le16 pad;
 | |
| -	/* cmd word 0..4 */
 | |
| -	__le32 dpbp_id[DPNI_MAX_DPBP];
 | |
| -	/* cmd word 4..6 */
 | |
| +	struct {
 | |
| +		__le16 dpbp_id;
 | |
| +		u8 priority_mask;
 | |
| +		u8 pad;
 | |
| +	} pool[DPNI_MAX_DPBP];
 | |
|  	__le16 buffer_size[DPNI_MAX_DPBP];
 | |
|  };
 | |
|  
 | |
| @@ -303,6 +288,7 @@ struct dpni_rsp_get_tx_data_offset {
 | |
|  
 | |
|  struct dpni_cmd_get_statistics {
 | |
|  	u8 page_number;
 | |
| +	u8 param;
 | |
|  };
 | |
|  
 | |
|  struct dpni_rsp_get_statistics {
 | |
| @@ -319,8 +305,22 @@ struct dpni_cmd_set_link_cfg {
 | |
|  	__le64 options;
 | |
|  };
 | |
|  
 | |
| +struct dpni_cmd_set_link_cfg_v2 {
 | |
| +	/* cmd word 0 */
 | |
| +	__le64 pad0;
 | |
| +	/* cmd word 1 */
 | |
| +	__le32 rate;
 | |
| +	__le32 pad1;
 | |
| +	/* cmd word 2 */
 | |
| +	__le64 options;
 | |
| +	/* cmd word 3 */
 | |
| +	__le64 advertising;
 | |
| +};
 | |
| +
 | |
|  #define DPNI_LINK_STATE_SHIFT		0
 | |
|  #define DPNI_LINK_STATE_SIZE		1
 | |
| +#define DPNI_STATE_VALID_SHIFT		1
 | |
| +#define DPNI_STATE_VALID_SIZE		1
 | |
|  
 | |
|  struct dpni_rsp_get_link_state {
 | |
|  	/* response word 0 */
 | |
| @@ -335,6 +335,39 @@ struct dpni_rsp_get_link_state {
 | |
|  	__le64 options;
 | |
|  };
 | |
|  
 | |
| +struct dpni_rsp_get_link_state_v2 {
 | |
| +	/* response word 0 */
 | |
| +	__le32 pad0;
 | |
| +	/* from LSB: up:1, valid:1 */
 | |
| +	u8 flags;
 | |
| +	u8 pad1[3];
 | |
| +	/* response word 1 */
 | |
| +	__le32 rate;
 | |
| +	__le32 pad2;
 | |
| +	/* response word 2 */
 | |
| +	__le64 options;
 | |
| +	/* cmd word 3 */
 | |
| +	__le64 supported;
 | |
| +	/* cmd word 4 */
 | |
| +	__le64 advertising;
 | |
| +};
 | |
| +
 | |
| +#define DPNI_COUPLED_SHIFT	0
 | |
| +#define DPNI_COUPLED_SIZE	1
 | |
| +
 | |
| +struct dpni_cmd_set_tx_shaping {
 | |
| +	/* cmd word 0 */
 | |
| +	__le16 tx_cr_max_burst_size;
 | |
| +	__le16 tx_er_max_burst_size;
 | |
| +	__le32 pad;
 | |
| +	/* cmd word 1 */
 | |
| +	__le32 tx_cr_rate_limit;
 | |
| +	__le32 tx_er_rate_limit;
 | |
| +	/* cmd word 2 */
 | |
| +	/* from LSB: coupled:1 */
 | |
| +	u8 coupled;
 | |
| +};
 | |
| +
 | |
|  struct dpni_cmd_set_max_frame_length {
 | |
|  	__le16 max_frame_length;
 | |
|  };
 | |
| @@ -394,6 +427,24 @@ struct dpni_cmd_clear_mac_filters {
 | |
|  	u8 flags;
 | |
|  };
 | |
|  
 | |
| +#define DPNI_SEPARATE_GRP_SHIFT 0
 | |
| +#define DPNI_SEPARATE_GRP_SIZE  1
 | |
| +#define DPNI_MODE_1_SHIFT		0
 | |
| +#define DPNI_MODE_1_SIZE		4
 | |
| +#define DPNI_MODE_2_SHIFT		4
 | |
| +#define DPNI_MODE_2_SIZE		4
 | |
| +
 | |
| +struct dpni_cmd_set_tx_priorities {
 | |
| +	__le16 flags;
 | |
| +	u8 prio_group_A;
 | |
| +	u8 prio_group_B;
 | |
| +	__le32 pad0;
 | |
| +	u8 modes[4];
 | |
| +	__le32 pad1;
 | |
| +	__le64 pad2;
 | |
| +	__le16 delta_bandwidth[8];
 | |
| +};
 | |
| +
 | |
|  #define DPNI_DIST_MODE_SHIFT		0
 | |
|  #define DPNI_DIST_MODE_SIZE		4
 | |
|  #define DPNI_MISS_ACTION_SHIFT		4
 | |
| @@ -503,6 +554,63 @@ struct dpni_cmd_set_queue {
 | |
|  	__le64 user_context;
 | |
|  };
 | |
|  
 | |
| +#define DPNI_DISCARD_ON_MISS_SHIFT	0
 | |
| +#define DPNI_DISCARD_ON_MISS_SIZE	1
 | |
| +
 | |
| +struct dpni_cmd_set_qos_table {
 | |
| +	__le32 pad;
 | |
| +	u8 default_tc;
 | |
| +	/* only the LSB */
 | |
| +	u8 discard_on_miss;
 | |
| +	__le16 pad1[21];
 | |
| +	__le64 key_cfg_iova;
 | |
| +};
 | |
| +
 | |
| +struct dpni_cmd_add_qos_entry {
 | |
| +	__le16 pad;
 | |
| +	u8 tc_id;
 | |
| +	u8 key_size;
 | |
| +	__le16 index;
 | |
| +	__le16 pad2;
 | |
| +	__le64 key_iova;
 | |
| +	__le64 mask_iova;
 | |
| +};
 | |
| +
 | |
| +struct dpni_cmd_remove_qos_entry {
 | |
| +	u8 pad1[3];
 | |
| +	u8 key_size;
 | |
| +	__le32 pad2;
 | |
| +	__le64 key_iova;
 | |
| +	__le64 mask_iova;
 | |
| +};
 | |
| +
 | |
| +struct dpni_cmd_add_fs_entry {
 | |
| +	/* cmd word 0 */
 | |
| +	__le16 options;
 | |
| +	u8 tc_id;
 | |
| +	u8 key_size;
 | |
| +	__le16 index;
 | |
| +	__le16 flow_id;
 | |
| +	/* cmd word 1 */
 | |
| +	__le64 key_iova;
 | |
| +	/* cmd word 2 */
 | |
| +	__le64 mask_iova;
 | |
| +	/* cmd word 3 */
 | |
| +	__le64 flc;
 | |
| +};
 | |
| +
 | |
| +struct dpni_cmd_remove_fs_entry {
 | |
| +	/* cmd word 0 */
 | |
| +	__le16 pad0;
 | |
| +	u8 tc_id;
 | |
| +	u8 key_size;
 | |
| +	__le32 pad1;
 | |
| +	/* cmd word 1 */
 | |
| +	__le64 key_iova;
 | |
| +	/* cmd word 2 */
 | |
| +	__le64 mask_iova;
 | |
| +};
 | |
| +
 | |
|  struct dpni_cmd_set_taildrop {
 | |
|  	/* cmd word 0 */
 | |
|  	u8 congestion_point;
 | |
| @@ -538,4 +646,79 @@ struct dpni_rsp_get_taildrop {
 | |
|  	__le32 threshold;
 | |
|  };
 | |
|  
 | |
| +struct dpni_rsp_get_api_version {
 | |
| +	u16 major;
 | |
| +	u16 minor;
 | |
| +};
 | |
| +
 | |
| +#define DPNI_DEST_TYPE_SHIFT		0
 | |
| +#define DPNI_DEST_TYPE_SIZE		4
 | |
| +#define DPNI_CONG_UNITS_SHIFT		4
 | |
| +#define DPNI_CONG_UNITS_SIZE		2
 | |
| +
 | |
| +struct dpni_cmd_set_congestion_notification {
 | |
| +	/* cmd word 0 */
 | |
| +	u8 qtype;
 | |
| +	u8 tc;
 | |
| +	u8 pad[6];
 | |
| +	/* cmd word 1 */
 | |
| +	__le32 dest_id;
 | |
| +	__le16 notification_mode;
 | |
| +	u8 dest_priority;
 | |
| +	/* from LSB: dest_type: 4 units:2 */
 | |
| +	u8 type_units;
 | |
| +	/* cmd word 2 */
 | |
| +	__le64 message_iova;
 | |
| +	/* cmd word 3 */
 | |
| +	__le64 message_ctx;
 | |
| +	/* cmd word 4 */
 | |
| +	__le32 threshold_entry;
 | |
| +	__le32 threshold_exit;
 | |
| +};
 | |
| +
 | |
| +struct dpni_cmd_get_congestion_notification {
 | |
| +	/* cmd word 0 */
 | |
| +	u8 qtype;
 | |
| +	u8 tc;
 | |
| +};
 | |
| +
 | |
| +struct dpni_rsp_get_congestion_notification {
 | |
| +	/* cmd word 0 */
 | |
| +	__le64 pad;
 | |
| +	/* cmd word 1 */
 | |
| +	__le32 dest_id;
 | |
| +	__le16 notification_mode;
 | |
| +	u8 dest_priority;
 | |
| +	/* from LSB: dest_type: 4 units:2 */
 | |
| +	u8 type_units;
 | |
| +	/* cmd word 2 */
 | |
| +	__le64 message_iova;
 | |
| +	/* cmd word 3 */
 | |
| +	__le64 message_ctx;
 | |
| +	/* cmd word 4 */
 | |
| +	__le32 threshold_entry;
 | |
| +	__le32 threshold_exit;
 | |
| +};
 | |
| +
 | |
| +#define DPNI_RX_FS_DIST_ENABLE_SHIFT	0
 | |
| +#define DPNI_RX_FS_DIST_ENABLE_SIZE	1
 | |
| +struct dpni_cmd_set_rx_fs_dist {
 | |
| +	__le16 dist_size;
 | |
| +	u8 enable;
 | |
| +	u8 tc;
 | |
| +	__le16 miss_flow_id;
 | |
| +	__le16 pad;
 | |
| +	__le64 key_cfg_iova;
 | |
| +};
 | |
| +
 | |
| +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT	0
 | |
| +#define DPNI_RX_HASH_DIST_ENABLE_SIZE	1
 | |
| +struct dpni_cmd_set_rx_hash_dist {
 | |
| +	__le16 dist_size;
 | |
| +	u8 enable;
 | |
| +	u8 tc;
 | |
| +	__le32 pad;
 | |
| +	__le64 key_cfg_iova;
 | |
| +};
 | |
| +
 | |
|  #endif /* _FSL_DPNI_CMD_H */
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
 | |
| @@ -1,34 +1,6 @@
 | |
| +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 | |
|  /* Copyright 2013-2016 Freescale Semiconductor Inc.
 | |
|   * Copyright 2016 NXP
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - * * Redistributions of source code must retain the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer.
 | |
| - * * Redistributions in binary form must reproduce the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer in the
 | |
| - * documentation and/or other materials provided with the distribution.
 | |
| - * * Neither the name of the above-listed copyright holders nor the
 | |
| - * names of any contributors may be used to endorse or promote products
 | |
| - * derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 | |
| - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
 | |
| - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 | |
| - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 | |
| - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 | |
| - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 | |
| - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 | |
| - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| - * POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  #include <linux/kernel.h>
 | |
|  #include <linux/errno.h>
 | |
| @@ -122,7 +94,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
 | |
|  	      int dpni_id,
 | |
|  	      u16 *token)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_open *cmd_params;
 | |
|  
 | |
|  	int err;
 | |
| @@ -160,7 +132,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
 | |
|  	       u32 cmd_flags,
 | |
|  	       u16 token)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  
 | |
|  	/* prepare command */
 | |
|  	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
 | |
| @@ -188,7 +160,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
 | |
|  		   u16 token,
 | |
|  		   const struct dpni_pools_cfg *cfg)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_pools *cmd_params;
 | |
|  	int i;
 | |
|  
 | |
| @@ -199,7 +171,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
 | |
|  	cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
 | |
|  	cmd_params->num_dpbp = cfg->num_dpbp;
 | |
|  	for (i = 0; i < DPNI_MAX_DPBP; i++) {
 | |
| -		cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
 | |
| +		cmd_params->pool[i].dpbp_id =
 | |
| +			cpu_to_le16(cfg->pools[i].dpbp_id);
 | |
| +		cmd_params->pool[i].priority_mask =
 | |
| +			cfg->pools[i].priority_mask;
 | |
|  		cmd_params->buffer_size[i] =
 | |
|  			cpu_to_le16(cfg->pools[i].buffer_size);
 | |
|  		cmd_params->backup_pool_mask |=
 | |
| @@ -222,7 +197,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
 | |
|  		u32 cmd_flags,
 | |
|  		u16 token)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  
 | |
|  	/* prepare command */
 | |
|  	cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
 | |
| @@ -245,7 +220,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
 | |
|  		 u32 cmd_flags,
 | |
|  		 u16 token)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  
 | |
|  	/* prepare command */
 | |
|  	cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
 | |
| @@ -270,7 +245,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
 | |
|  		    u16 token,
 | |
|  		    int *en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_is_enabled *rsp_params;
 | |
|  	int err;
 | |
|  
 | |
| @@ -303,7 +278,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
 | |
|  	       u32 cmd_flags,
 | |
|  	       u16 token)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  
 | |
|  	/* prepare command */
 | |
|  	cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
 | |
| @@ -335,7 +310,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
 | |
|  			u8 irq_index,
 | |
|  			u8 en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_irq_enable *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -366,7 +341,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
 | |
|  			u8 irq_index,
 | |
|  			u8 *en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_irq_enable *cmd_params;
 | |
|  	struct dpni_rsp_get_irq_enable *rsp_params;
 | |
|  
 | |
| @@ -413,7 +388,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
 | |
|  		      u8 irq_index,
 | |
|  		      u32 mask)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_irq_mask *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -447,7 +422,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
 | |
|  		      u8 irq_index,
 | |
|  		      u32 *mask)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_irq_mask *cmd_params;
 | |
|  	struct dpni_rsp_get_irq_mask *rsp_params;
 | |
|  	int err;
 | |
| @@ -489,7 +464,7 @@ int dpni_get_irq_status(struct fsl_mc_io
 | |
|  			u8 irq_index,
 | |
|  			u32 *status)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_irq_status *cmd_params;
 | |
|  	struct dpni_rsp_get_irq_status *rsp_params;
 | |
|  	int err;
 | |
| @@ -532,7 +507,7 @@ int dpni_clear_irq_status(struct fsl_mc_
 | |
|  			  u8 irq_index,
 | |
|  			  u32 status)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_clear_irq_status *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -561,7 +536,7 @@ int dpni_get_attributes(struct fsl_mc_io
 | |
|  			u16 token,
 | |
|  			struct dpni_attr *attr)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_attr *rsp_params;
 | |
|  
 | |
|  	int err;
 | |
| @@ -609,7 +584,7 @@ int dpni_set_errors_behavior(struct fsl_
 | |
|  			     u16 token,
 | |
|  			     struct dpni_error_cfg *cfg)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_errors_behavior *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -641,7 +616,7 @@ int dpni_get_buffer_layout(struct fsl_mc
 | |
|  			   enum dpni_queue_type qtype,
 | |
|  			   struct dpni_buffer_layout *layout)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_buffer_layout *cmd_params;
 | |
|  	struct dpni_rsp_get_buffer_layout *rsp_params;
 | |
|  	int err;
 | |
| @@ -689,7 +664,7 @@ int dpni_set_buffer_layout(struct fsl_mc
 | |
|  			   enum dpni_queue_type qtype,
 | |
|  			   const struct dpni_buffer_layout *layout)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_buffer_layout *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -731,7 +706,7 @@ int dpni_set_offload(struct fsl_mc_io *m
 | |
|  		     enum dpni_offload type,
 | |
|  		     u32 config)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_offload *cmd_params;
 | |
|  
 | |
|  	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
 | |
| @@ -750,7 +725,7 @@ int dpni_get_offload(struct fsl_mc_io *m
 | |
|  		     enum dpni_offload type,
 | |
|  		     u32 *config)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_offload *cmd_params;
 | |
|  	struct dpni_rsp_get_offload *rsp_params;
 | |
|  	int err;
 | |
| @@ -792,7 +767,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
 | |
|  		  enum dpni_queue_type qtype,
 | |
|  		  u16 *qdid)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_qdid *cmd_params;
 | |
|  	struct dpni_rsp_get_qdid *rsp_params;
 | |
|  	int err;
 | |
| @@ -830,7 +805,7 @@ int dpni_get_tx_data_offset(struct fsl_m
 | |
|  			    u16 token,
 | |
|  			    u16 *data_offset)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_tx_data_offset *rsp_params;
 | |
|  	int err;
 | |
|  
 | |
| @@ -865,7 +840,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
 | |
|  		      u16 token,
 | |
|  		      const struct dpni_link_cfg *cfg)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_link_cfg *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -881,6 +856,36 @@ int dpni_set_link_cfg(struct fsl_mc_io *
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| + * dpni_set_link_cfg_v2() - set the link configuration.
 | |
| + * @mc_io:      Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:  Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:      Token of DPNI object
 | |
| + * @cfg:        Link configuration
 | |
| + *
 | |
| + * Return:      '0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
 | |
| +			 u32 cmd_flags,
 | |
| +			 u16 token,
 | |
| +			 const struct dpni_link_cfg *cfg)
 | |
| +{
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +	struct dpni_cmd_set_link_cfg_v2 *cmd_params;
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG_V2,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_set_link_cfg_v2 *)cmd.params;
 | |
| +	cmd_params->rate = cpu_to_le32(cfg->rate);
 | |
| +	cmd_params->options = cpu_to_le64(cfg->options);
 | |
| +	cmd_params->advertising = cpu_to_le64(cfg->advertising);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
|   * dpni_get_link_state() - Return the link state (either up or down)
 | |
|   * @mc_io:	Pointer to MC portal's I/O object
 | |
|   * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| @@ -894,7 +899,7 @@ int dpni_get_link_state(struct fsl_mc_io
 | |
|  			u16 token,
 | |
|  			struct dpni_link_state *state)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_link_state *rsp_params;
 | |
|  	int err;
 | |
|  
 | |
| @@ -918,6 +923,84 @@ int dpni_get_link_state(struct fsl_mc_io
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| + * dpni_get_link_state_v2() - Return the link state (either up or down)
 | |
| + * @mc_io:      Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:  Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:      Token of DPNI object
 | |
| + * @state:      Returned link state;
 | |
| + *
 | |
| + * Return:      '0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
 | |
| +			   u32 cmd_flags,
 | |
| +			   u16 token,
 | |
| +			   struct dpni_link_state *state)
 | |
| +{
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +	struct dpni_rsp_get_link_state_v2 *rsp_params;
 | |
| +	int err;
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE_V2,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	err = mc_send_command(mc_io, &cmd);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	/* retrieve response parameters */
 | |
| +	rsp_params = (struct dpni_rsp_get_link_state_v2 *)cmd.params;
 | |
| +	state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
 | |
| +	state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID);
 | |
| +	state->rate = le32_to_cpu(rsp_params->rate);
 | |
| +	state->options = le64_to_cpu(rsp_params->options);
 | |
| +	state->supported = le64_to_cpu(rsp_params->supported);
 | |
| +	state->advertising = le64_to_cpu(rsp_params->advertising);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_set_tx_shaping() - Set the transmit shaping
 | |
| + * @mc_io:		Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:		Token of DPNI object
 | |
| + * @tx_cr_shaper:	TX committed rate shaping configuration
 | |
| + * @tx_er_shaper:	TX excess rate shaping configuration
 | |
| + * @coupled:		Committed and excess rate shapers are coupled
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			const struct dpni_tx_shaping_cfg *tx_cr_shaper,
 | |
| +			const struct dpni_tx_shaping_cfg *tx_er_shaper,
 | |
| +			int coupled)
 | |
| +{
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +	struct dpni_cmd_set_tx_shaping *cmd_params;
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
 | |
| +	cmd_params->tx_cr_max_burst_size =
 | |
| +				cpu_to_le16(tx_cr_shaper->max_burst_size);
 | |
| +	cmd_params->tx_er_max_burst_size =
 | |
| +				cpu_to_le16(tx_er_shaper->max_burst_size);
 | |
| +	cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
 | |
| +	cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
 | |
| +	dpni_set_field(cmd_params->coupled, COUPLED, coupled);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
|   * dpni_set_max_frame_length() - Set the maximum received frame length.
 | |
|   * @mc_io:	Pointer to MC portal's I/O object
 | |
|   * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| @@ -933,7 +1016,7 @@ int dpni_set_max_frame_length(struct fsl
 | |
|  			      u16 token,
 | |
|  			      u16 max_frame_length)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_max_frame_length *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -963,7 +1046,7 @@ int dpni_get_max_frame_length(struct fsl
 | |
|  			      u16 token,
 | |
|  			      u16 *max_frame_length)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_max_frame_length *rsp_params;
 | |
|  	int err;
 | |
|  
 | |
| @@ -998,7 +1081,7 @@ int dpni_set_multicast_promisc(struct fs
 | |
|  			       u16 token,
 | |
|  			       int en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_multicast_promisc *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -1026,7 +1109,7 @@ int dpni_get_multicast_promisc(struct fs
 | |
|  			       u16 token,
 | |
|  			       int *en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_multicast_promisc *rsp_params;
 | |
|  	int err;
 | |
|  
 | |
| @@ -1061,7 +1144,7 @@ int dpni_set_unicast_promisc(struct fsl_
 | |
|  			     u16 token,
 | |
|  			     int en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_unicast_promisc *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -1089,7 +1172,7 @@ int dpni_get_unicast_promisc(struct fsl_
 | |
|  			     u16 token,
 | |
|  			     int *en)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_unicast_promisc *rsp_params;
 | |
|  	int err;
 | |
|  
 | |
| @@ -1124,7 +1207,7 @@ int dpni_set_primary_mac_addr(struct fsl
 | |
|  			      u16 token,
 | |
|  			      const u8 mac_addr[6])
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_primary_mac_addr *cmd_params;
 | |
|  	int i;
 | |
|  
 | |
| @@ -1154,7 +1237,7 @@ int dpni_get_primary_mac_addr(struct fsl
 | |
|  			      u16 token,
 | |
|  			      u8 mac_addr[6])
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_primary_mac_addr *rsp_params;
 | |
|  	int i, err;
 | |
|  
 | |
| @@ -1193,7 +1276,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
 | |
|  			   u16 token,
 | |
|  			   u8 mac_addr[6])
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_rsp_get_port_mac_addr *rsp_params;
 | |
|  	int i, err;
 | |
|  
 | |
| @@ -1229,7 +1312,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
 | |
|  		      u16 token,
 | |
|  		      const u8 mac_addr[6])
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_add_mac_addr *cmd_params;
 | |
|  	int i;
 | |
|  
 | |
| @@ -1259,7 +1342,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
 | |
|  			 u16 token,
 | |
|  			 const u8 mac_addr[6])
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_remove_mac_addr *cmd_params;
 | |
|  	int i;
 | |
|  
 | |
| @@ -1293,7 +1376,7 @@ int dpni_clear_mac_filters(struct fsl_mc
 | |
|  			   int unicast,
 | |
|  			   int multicast)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_clear_mac_filters *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -1309,6 +1392,55 @@ int dpni_clear_mac_filters(struct fsl_mc
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| + * dpni_set_tx_priorities() - Set transmission TC priority configuration
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @cfg:	Transmission selection configuration
 | |
| + *
 | |
| + * warning:	Allowed only when DPNI is disabled
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
 | |
| +			   u32 cmd_flags,
 | |
| +			   u16 token,
 | |
| +			   const struct dpni_tx_priorities_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_set_tx_priorities *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +	int i;
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
 | |
| +	dpni_set_field(cmd_params->flags,
 | |
| +		       SEPARATE_GRP,
 | |
| +		       cfg->separate_groups);
 | |
| +	cmd_params->prio_group_A = cfg->prio_group_A;
 | |
| +	cmd_params->prio_group_B = cfg->prio_group_B;
 | |
| +
 | |
| +	for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
 | |
| +		dpni_set_field(cmd_params->modes[i / 2],
 | |
| +			       MODE_1,
 | |
| +			       cfg->tc_sched[i].mode);
 | |
| +		dpni_set_field(cmd_params->modes[i / 2],
 | |
| +			       MODE_2,
 | |
| +			       cfg->tc_sched[i + 1].mode);
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < DPNI_MAX_TC; i++) {
 | |
| +		cmd_params->delta_bandwidth[i] =
 | |
| +				cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
 | |
| +	}
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
|   * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
 | |
|   * @mc_io:	Pointer to MC portal's I/O object
 | |
|   * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| @@ -1327,7 +1459,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
 | |
|  			u8 tc_id,
 | |
|  			const struct dpni_rx_tc_dist_cfg *cfg)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_rx_tc_dist *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -1346,6 +1478,215 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
 | |
|  	return mc_send_command(mc_io, &cmd);
 | |
|  }
 | |
|  
 | |
| +/*
 | |
| + * dpni_set_qos_table() - Set QoS mapping table
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @cfg:	QoS table configuration
 | |
| + *
 | |
| + * This function and all QoS-related functions require that
 | |
| + *'max_tcs > 1' was set at DPNI creation.
 | |
| + *
 | |
| + * warning: Before calling this function, call dpkg_prepare_key_cfg() to
 | |
| + *			prepare the key_cfg_iova parameter
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
 | |
| +		       u32 cmd_flags,
 | |
| +		       u16 token,
 | |
| +		       const struct dpni_qos_tbl_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_set_qos_table *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
 | |
| +	cmd_params->default_tc = cfg->default_tc;
 | |
| +	cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
 | |
| +	dpni_set_field(cmd_params->discard_on_miss,
 | |
| +		       ENABLE,
 | |
| +		       cfg->discard_on_miss);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @cfg:	QoS rule to add
 | |
| + * @tc_id:	Traffic class selection (0-7)
 | |
| + * @index:	Location in the QoS table where to insert the entry.
 | |
| + *		Only relevant if MASKING is enabled for QoS classification on
 | |
| + *		this DPNI, it is ignored for exact match.
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
 | |
| +		       u32 cmd_flags,
 | |
| +		       u16 token,
 | |
| +		       const struct dpni_rule_cfg *cfg,
 | |
| +		       u8 tc_id,
 | |
| +		       u16 index)
 | |
| +{
 | |
| +	struct dpni_cmd_add_qos_entry *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
 | |
| +	cmd_params->tc_id = tc_id;
 | |
| +	cmd_params->key_size = cfg->key_size;
 | |
| +	cmd_params->index = cpu_to_le16(index);
 | |
| +	cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
 | |
| +	cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_remove_qos_entry() - Remove QoS mapping entry
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @cfg:	QoS rule to remove
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
 | |
| +			  u32 cmd_flags,
 | |
| +			  u16 token,
 | |
| +			  const struct dpni_rule_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_remove_qos_entry *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
 | |
| +	cmd_params->key_size = cfg->key_size;
 | |
| +	cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
 | |
| +	cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_set_congestion_notification() - Set traffic class congestion
 | |
| + *					notification configuration
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @qtype:	Type of queue - Rx, Tx and Tx confirm types are supported
 | |
| + * @tc_id:	Traffic class selection (0-7)
 | |
| + * @cfg:	Congestion notification configuration
 | |
| + *
 | |
| + * Return:	'0' on Success; error code otherwise.
 | |
| + */
 | |
| +int dpni_set_congestion_notification(
 | |
| +			struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			enum dpni_queue_type qtype,
 | |
| +			u8 tc_id,
 | |
| +			const struct dpni_congestion_notification_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_set_congestion_notification *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(
 | |
| +			DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
 | |
| +			cmd_flags,
 | |
| +			token);
 | |
| +	cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
 | |
| +	cmd_params->qtype = qtype;
 | |
| +	cmd_params->tc = tc_id;
 | |
| +	cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
 | |
| +	cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
 | |
| +	cmd_params->dest_priority = cfg->dest_cfg.priority;
 | |
| +	dpni_set_field(cmd_params->type_units, DEST_TYPE,
 | |
| +		       cfg->dest_cfg.dest_type);
 | |
| +	dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
 | |
| +	cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
 | |
| +	cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
 | |
| +	cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
 | |
| +	cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_get_congestion_notification() - Get traffic class congestion
 | |
| + *	notification configuration
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @qtype:	Type of queue - Rx, Tx and Tx confirm types are supported
 | |
| + * @tc_id:	bits 7-4 contain ceetm channel index (valid only for TX);
 | |
| + *		bits 3-0 contain traffic class.
 | |
| + *		Use macro DPNI_BUILD_CH_TC() to build correct value for
 | |
| + *		tc_id parameter.
 | |
| + * @cfg:	congestion notification configuration
 | |
| + *
 | |
| + * Return:	'0' on Success; error code otherwise.
 | |
| + */
 | |
| +int dpni_get_congestion_notification(
 | |
| +			struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			enum dpni_queue_type qtype,
 | |
| +			u8 tc_id,
 | |
| +			struct dpni_congestion_notification_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_rsp_get_congestion_notification *rsp_params;
 | |
| +	struct dpni_cmd_get_congestion_notification *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +	int err;
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(
 | |
| +				DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
 | |
| +				cmd_flags,
 | |
| +				token);
 | |
| +	cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
 | |
| +	cmd_params->qtype = qtype;
 | |
| +	cmd_params->tc = tc_id;
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	err = mc_send_command(mc_io, &cmd);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
 | |
| +	cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
 | |
| +	cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
 | |
| +	cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
 | |
| +	cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
 | |
| +	cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
 | |
| +	cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
 | |
| +	cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
 | |
| +	cfg->dest_cfg.priority = rsp_params->dest_priority;
 | |
| +	cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
 | |
| +						 DEST_TYPE);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
|  /**
 | |
|   * dpni_set_queue() - Set queue parameters
 | |
|   * @mc_io:	Pointer to MC portal's I/O object
 | |
| @@ -1371,7 +1712,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
 | |
|  		   u8 options,
 | |
|  		   const struct dpni_queue *queue)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_queue *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -1419,7 +1760,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
 | |
|  		   struct dpni_queue *queue,
 | |
|  		   struct dpni_queue_id *qid)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_queue *cmd_params;
 | |
|  	struct dpni_rsp_get_queue *rsp_params;
 | |
|  	int err;
 | |
| @@ -1463,6 +1804,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
 | |
|   * @token:	Token of DPNI object
 | |
|   * @page:	Selects the statistics page to retrieve, see
 | |
|   *		DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
 | |
| + * @param:	Custom parameter for some pages used to select a certain
 | |
| + *		statistic source, for example the TC.
 | |
|   * @stat:	Structure containing the statistics
 | |
|   *
 | |
|   * Return:	'0' on Success; Error code otherwise.
 | |
| @@ -1471,9 +1814,10 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|  			u32 cmd_flags,
 | |
|  			u16 token,
 | |
|  			u8 page,
 | |
| +			u8 param,
 | |
|  			union dpni_statistics *stat)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_statistics *cmd_params;
 | |
|  	struct dpni_rsp_get_statistics *rsp_params;
 | |
|  	int i, err;
 | |
| @@ -1484,6 +1828,7 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|  					  token);
 | |
|  	cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
 | |
|  	cmd_params->page_number = page;
 | |
| +	cmd_params->param = param;
 | |
|  
 | |
|  	/* send command to mc */
 | |
|  	err = mc_send_command(mc_io, &cmd);
 | |
| @@ -1499,6 +1844,29 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| + * dpni_reset_statistics() - Clears DPNI statistics
 | |
| + * @mc_io:		Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:		Token of DPNI object
 | |
| + *
 | |
| + * Return:  '0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
 | |
| +			  u32 cmd_flags,
 | |
| +			  u16 token)
 | |
| +{
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
|   * dpni_set_taildrop() - Set taildrop per queue or TC
 | |
|   * @mc_io:	Pointer to MC portal's I/O object
 | |
|   * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| @@ -1506,7 +1874,10 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|   * @cg_point:	Congestion point
 | |
|   * @q_type:	Queue type on which the taildrop is configured.
 | |
|   *		Only Rx queues are supported for now
 | |
| - * @tc:		Traffic class to apply this taildrop to
 | |
| + * @tc:		bits 7-4 contain ceetm channel index (valid only for TX);
 | |
| + *		bits 3-0 contain traffic class.
 | |
| + *		Use macro DPNI_BUILD_CH_TC() to build correct value for
 | |
| + *		tc parameter.
 | |
|   * @q_index:	Index of the queue if the DPNI supports multiple queues for
 | |
|   *		traffic distribution. Ignored if CONGESTION_POINT is not 0.
 | |
|   * @taildrop:	Taildrop structure
 | |
| @@ -1522,7 +1893,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
 | |
|  		      u8 index,
 | |
|  		      struct dpni_taildrop *taildrop)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_set_taildrop *cmd_params;
 | |
|  
 | |
|  	/* prepare command */
 | |
| @@ -1550,7 +1921,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
 | |
|   * @cg_point:	Congestion point
 | |
|   * @q_type:	Queue type on which the taildrop is configured.
 | |
|   *		Only Rx queues are supported for now
 | |
| - * @tc:		Traffic class to apply this taildrop to
 | |
| + * @tc:		bits 7-4 contain ceetm channel index (valid only for TX);
 | |
| + *		bits 3-0 contain traffic class.
 | |
| + *		Use macro DPNI_BUILD_CH_TC() to build correct value for
 | |
| + *		tc parameter.
 | |
|   * @q_index:	Index of the queue if the DPNI supports multiple queues for
 | |
|   *		traffic distribution. Ignored if CONGESTION_POINT is not 0.
 | |
|   * @taildrop:	Taildrop structure
 | |
| @@ -1566,7 +1940,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
 | |
|  		      u8 index,
 | |
|  		      struct dpni_taildrop *taildrop)
 | |
|  {
 | |
| -	struct mc_command cmd = { 0 };
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
|  	struct dpni_cmd_get_taildrop *cmd_params;
 | |
|  	struct dpni_rsp_get_taildrop *rsp_params;
 | |
|  	int err;
 | |
| @@ -1594,3 +1968,187 @@ int dpni_get_taildrop(struct fsl_mc_io *
 | |
|  
 | |
|  	return 0;
 | |
|  }
 | |
| +
 | |
| +/**
 | |
| + * dpni_get_api_version() - Get Data Path Network Interface API version
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @major_ver:	Major version of data path network interface API
 | |
| + * @minor_ver:	Minor version of data path network interface API
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_get_api_version(struct fsl_mc_io *mc_io,
 | |
| +			 u32 cmd_flags,
 | |
| +			 u16 *major_ver,
 | |
| +			 u16 *minor_ver)
 | |
| +{
 | |
| +	struct dpni_rsp_get_api_version *rsp_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +	int err;
 | |
| +
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
 | |
| +					  cmd_flags, 0);
 | |
| +
 | |
| +	err = mc_send_command(mc_io, &cmd);
 | |
| +	if (err)
 | |
| +		return err;
 | |
| +
 | |
| +	rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
 | |
| +	*major_ver = le16_to_cpu(rsp_params->major);
 | |
| +	*minor_ver = le16_to_cpu(rsp_params->minor);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @cfg: Distribution configuration
 | |
| + * If the FS is already enabled with a previous call the classification
 | |
| + * key will be changed but all the table rules are kept. If the
 | |
| + * existing rules do not match the key the results will not be
 | |
| + * predictable. It is the user responsibility to keep key integrity.
 | |
| + * If cfg.enable is set to 1 the command will create a flow steering table
 | |
| + * and will classify packets according to this table. The packets that
 | |
| + * miss all the table rules will be classified according to settings
 | |
| + * made in dpni_set_rx_hash_dist()
 | |
| + * If cfg.enable is set to 0 the command will clear flow steering table.
 | |
| + * The packets will be classified according to settings made in
 | |
| + * dpni_set_rx_hash_dist()
 | |
| + */
 | |
| +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			const struct dpni_rx_dist_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_set_rx_fs_dist *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
 | |
| +	cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
 | |
| +	dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
 | |
| +	cmd_params->tc = cfg->tc;
 | |
| +	cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
 | |
| +	cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @cfg: Distribution configuration
 | |
| + * If cfg.enable is set to 1 the packets will be classified using a hash
 | |
| + * function based on the key received in cfg.key_cfg_iova parameter.
 | |
| + * If cfg.enable is set to 0 the packets will be sent to the queue configured
 | |
| + * in dpni_set_rx_dist_default_queue() call
 | |
| + */
 | |
| +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
 | |
| +			  u32 cmd_flags,
 | |
| +			  u16 token,
 | |
| +			  const struct dpni_rx_dist_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_set_rx_hash_dist *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
 | |
| +	cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
 | |
| +	dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
 | |
| +	cmd_params->tc = cfg->tc;
 | |
| +	cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
 | |
| + *			(to select a flow ID)
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @tc_id:	Traffic class selection (0-7)
 | |
| + * @index:	Location in the QoS table where to insert the entry.
 | |
| + *		Only relevant if MASKING is enabled for QoS
 | |
| + *		classification on this DPNI, it is ignored for exact match.
 | |
| + * @cfg:	Flow steering rule to add
 | |
| + * @action:	Action to be taken as result of a classification hit
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
 | |
| +		      u32 cmd_flags,
 | |
| +		      u16 token,
 | |
| +		      u8 tc_id,
 | |
| +		      u16 index,
 | |
| +		      const struct dpni_rule_cfg *cfg,
 | |
| +		      const struct dpni_fs_action_cfg *action)
 | |
| +{
 | |
| +	struct dpni_cmd_add_fs_entry *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
 | |
| +	cmd_params->tc_id = tc_id;
 | |
| +	cmd_params->key_size = cfg->key_size;
 | |
| +	cmd_params->index = cpu_to_le16(index);
 | |
| +	cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
 | |
| +	cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
 | |
| +	cmd_params->options = cpu_to_le16(action->options);
 | |
| +	cmd_params->flow_id = cpu_to_le16(action->flow_id);
 | |
| +	cmd_params->flc = cpu_to_le64(action->flc);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
 | |
| + *			    traffic class
 | |
| + * @mc_io:	Pointer to MC portal's I/O object
 | |
| + * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
 | |
| + * @token:	Token of DPNI object
 | |
| + * @tc_id:	Traffic class selection (0-7)
 | |
| + * @cfg:	Flow steering rule to remove
 | |
| + *
 | |
| + * Return:	'0' on Success; Error code otherwise.
 | |
| + */
 | |
| +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
 | |
| +			 u32 cmd_flags,
 | |
| +			 u16 token,
 | |
| +			 u8 tc_id,
 | |
| +			 const struct dpni_rule_cfg *cfg)
 | |
| +{
 | |
| +	struct dpni_cmd_remove_fs_entry *cmd_params;
 | |
| +	struct fsl_mc_command cmd = { 0 };
 | |
| +
 | |
| +	/* prepare command */
 | |
| +	cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
 | |
| +					  cmd_flags,
 | |
| +					  token);
 | |
| +	cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
 | |
| +	cmd_params->tc_id = tc_id;
 | |
| +	cmd_params->key_size = cfg->key_size;
 | |
| +	cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
 | |
| +	cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
 | |
| +
 | |
| +	/* send command to mc*/
 | |
| +	return mc_send_command(mc_io, &cmd);
 | |
| +}
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
 | |
| @@ -1,34 +1,6 @@
 | |
| +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 | |
|  /* Copyright 2013-2016 Freescale Semiconductor Inc.
 | |
|   * Copyright 2016 NXP
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - * * Redistributions of source code must retain the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer.
 | |
| - * * Redistributions in binary form must reproduce the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer in the
 | |
| - * documentation and/or other materials provided with the distribution.
 | |
| - * * Neither the name of the above-listed copyright holders nor the
 | |
| - * names of any contributors may be used to endorse or promote products
 | |
| - * derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 | |
| - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
 | |
| - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 | |
| - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 | |
| - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 | |
| - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 | |
| - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 | |
| - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| - * POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  #ifndef __FSL_DPNI_H
 | |
|  #define __FSL_DPNI_H
 | |
| @@ -52,6 +24,14 @@ struct fsl_mc_io;
 | |
|   * Maximum number of buffer pools per DPNI
 | |
|   */
 | |
|  #define DPNI_MAX_DPBP				8
 | |
| +/**
 | |
| + * Maximum number of senders
 | |
| + */
 | |
| +#define DPNI_MAX_SENDERS			16
 | |
| +/**
 | |
| + * Maximum distribution size
 | |
| + */
 | |
| +#define DPNI_MAX_DIST_SIZE			16
 | |
|  
 | |
|  /**
 | |
|   * All traffic classes considered; see dpni_set_queue()
 | |
| @@ -123,13 +103,15 @@ struct dpni_pools_cfg {
 | |
|  	/**
 | |
|  	 * struct pools - Buffer pools parameters
 | |
|  	 * @dpbp_id: DPBP object ID
 | |
| +	 * @priority_mask: priorities served by DPBP
 | |
|  	 * @buffer_size: Buffer size
 | |
|  	 * @backup_pool: Backup pool
 | |
|  	 */
 | |
|  	struct {
 | |
| -		int	dpbp_id;
 | |
| +		u16	dpbp_id;
 | |
| +		u8	priority_mask;
 | |
|  		u16	buffer_size;
 | |
| -		int	backup_pool;
 | |
| +		u8	backup_pool;
 | |
|  	} pools[DPNI_MAX_DPBP];
 | |
|  };
 | |
|  
 | |
| @@ -476,6 +458,24 @@ union dpni_statistics {
 | |
|  		u64 egress_confirmed_frames;
 | |
|  	} page_2;
 | |
|  	/**
 | |
| +	 * struct page_3 - Page_3 statistics structure with values for the
 | |
| +	 *		   selected TC
 | |
| +	 * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
 | |
| +	 *			 dequeued
 | |
| +	 * @ceetm_dequeue_frames: Cumulative count of the number of frames
 | |
| +	 *			  dequeued
 | |
| +	 * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
 | |
| +	 *			frames whose enqueue was rejected
 | |
| +	 * @ceetm_reject_frames: Cumulative count of all frame enqueues
 | |
| +	 *			 rejected
 | |
| +	 */
 | |
| +	struct {
 | |
| +		u64 ceetm_dequeue_bytes;
 | |
| +		u64 ceetm_dequeue_frames;
 | |
| +		u64 ceetm_reject_bytes;
 | |
| +		u64 ceetm_reject_frames;
 | |
| +	} page_3;
 | |
| +	/**
 | |
|  	 * struct raw - raw statistics structure
 | |
|  	 */
 | |
|  	struct {
 | |
| @@ -487,8 +487,13 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|  			u32			cmd_flags,
 | |
|  			u16			token,
 | |
|  			u8			page,
 | |
| +			u8			param,
 | |
|  			union dpni_statistics	*stat);
 | |
|  
 | |
| +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
 | |
| +			  u32 cmd_flags,
 | |
| +			  u16 token);
 | |
| +
 | |
|  /**
 | |
|   * Enable auto-negotiation
 | |
|   */
 | |
| @@ -505,6 +510,23 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|   * Enable a-symmetric pause frames
 | |
|   */
 | |
|  #define DPNI_LINK_OPT_ASYM_PAUSE	0x0000000000000008ULL
 | |
| +/**
 | |
| + * Enable priority flow control pause frames
 | |
| + */
 | |
| +#define DPNI_LINK_OPT_PFC_PAUSE		0x0000000000000010ULL
 | |
| +/**
 | |
| + * Advertised link speeds
 | |
| + */
 | |
| +#define DPNI_ADVERTISED_10BASET_FULL           0x0000000000000001ULL
 | |
| +#define DPNI_ADVERTISED_100BASET_FULL          0x0000000000000002ULL
 | |
| +#define DPNI_ADVERTISED_1000BASET_FULL         0x0000000000000004ULL
 | |
| +#define DPNI_ADVERTISED_10000BASET_FULL        0x0000000000000010ULL
 | |
| +#define DPNI_ADVERTISED_2500BASEX_FULL         0x0000000000000020ULL
 | |
| +
 | |
| +/**
 | |
| + * Advertise auto-negotiation enabled
 | |
| + */
 | |
| +#define DPNI_ADVERTISED_AUTONEG                0x0000000000000008ULL
 | |
|  
 | |
|  /**
 | |
|   * struct - Structure representing DPNI link configuration
 | |
| @@ -514,6 +536,7 @@ int dpni_get_statistics(struct fsl_mc_io
 | |
|  struct dpni_link_cfg {
 | |
|  	u32 rate;
 | |
|  	u64 options;
 | |
| +	u64 advertising;
 | |
|  };
 | |
|  
 | |
|  int dpni_set_link_cfg(struct fsl_mc_io			*mc_io,
 | |
| @@ -521,6 +544,11 @@ int dpni_set_link_cfg(struct fsl_mc_io
 | |
|  		      u16				token,
 | |
|  		      const struct dpni_link_cfg	*cfg);
 | |
|  
 | |
| +int dpni_set_link_cfg_v2(struct fsl_mc_io		*mc_io,
 | |
| +			 u32				cmd_flags,
 | |
| +			 u16				token,
 | |
| +			 const struct dpni_link_cfg	*cfg);
 | |
| +
 | |
|  /**
 | |
|   * struct dpni_link_state - Structure representing DPNI link state
 | |
|   * @rate: Rate
 | |
| @@ -530,7 +558,10 @@ int dpni_set_link_cfg(struct fsl_mc_io
 | |
|  struct dpni_link_state {
 | |
|  	u32	rate;
 | |
|  	u64	options;
 | |
| +	u64	supported;
 | |
| +	u64	advertising;
 | |
|  	int	up;
 | |
| +	int	state_valid;
 | |
|  };
 | |
|  
 | |
|  int dpni_get_link_state(struct fsl_mc_io	*mc_io,
 | |
| @@ -538,6 +569,28 @@ int dpni_get_link_state(struct fsl_mc_io
 | |
|  			u16			token,
 | |
|  			struct dpni_link_state	*state);
 | |
|  
 | |
| +int dpni_get_link_state_v2(struct fsl_mc_io	*mc_io,
 | |
| +			   u32			cmd_flags,
 | |
| +			   u16			token,
 | |
| +			   struct dpni_link_state	*state);
 | |
| +
 | |
| +/**
 | |
| + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
 | |
| + * @rate_limit: rate in Mbps
 | |
| + * @max_burst_size: burst size in bytes (up to 64KB)
 | |
| + */
 | |
| +struct dpni_tx_shaping_cfg {
 | |
| +	u32	rate_limit;
 | |
| +	u16	max_burst_size;
 | |
| +};
 | |
| +
 | |
| +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			const struct dpni_tx_shaping_cfg *tx_cr_shaper,
 | |
| +			const struct dpni_tx_shaping_cfg *tx_er_shaper,
 | |
| +			int coupled);
 | |
| +
 | |
|  int dpni_set_max_frame_length(struct fsl_mc_io	*mc_io,
 | |
|  			      u32		cmd_flags,
 | |
|  			      u16		token,
 | |
| @@ -639,6 +692,70 @@ int dpni_prepare_key_cfg(const struct dp
 | |
|  			 u8 *key_cfg_buf);
 | |
|  
 | |
|  /**
 | |
| + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
 | |
| + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
 | |
| + *		key extractions to be used as the QoS criteria by calling
 | |
| + *		dpkg_prepare_key_cfg()
 | |
| + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
 | |
| + *		'0' to use the 'default_tc' in such cases
 | |
| + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
 | |
| + */
 | |
| +struct dpni_qos_tbl_cfg {
 | |
| +	u64 key_cfg_iova;
 | |
| +	int discard_on_miss;
 | |
| +	u8 default_tc;
 | |
| +};
 | |
| +
 | |
| +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
 | |
| +		       u32 cmd_flags,
 | |
| +		       u16 token,
 | |
| +		       const struct dpni_qos_tbl_cfg *cfg);
 | |
| +
 | |
| +/**
 | |
| + * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
 | |
| + * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
 | |
| + * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
 | |
| + * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
 | |
| + */
 | |
| +enum dpni_tx_schedule_mode {
 | |
| +	DPNI_TX_SCHED_STRICT_PRIORITY = 0,
 | |
| +	DPNI_TX_SCHED_WEIGHTED_A,
 | |
| +	DPNI_TX_SCHED_WEIGHTED_B,
 | |
| +};
 | |
| +
 | |
| +/**
 | |
| + * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
 | |
| + * @mode:		Scheduling mode
 | |
| + * @delta_bandwidth:	Bandwidth represented in weights from 100 to 10000;
 | |
| + *	not applicable for 'strict-priority' mode;
 | |
| + */
 | |
| +struct dpni_tx_schedule_cfg {
 | |
| +	enum dpni_tx_schedule_mode mode;
 | |
| +	u16 delta_bandwidth;
 | |
| +};
 | |
| +
 | |
| +/**
 | |
| + * struct dpni_tx_priorities_cfg - Structure representing transmission
 | |
| + *					priorities for DPNI TCs
 | |
| + * @tc_sched:	An array of traffic-classes
 | |
| + * @prio_group_A: Priority of group A
 | |
| + * @prio_group_B: Priority of group B
 | |
| + * @separate_groups: Treat A and B groups as separate
 | |
| + * @ceetm_ch_idx: ceetm channel index to apply the changes
 | |
| + */
 | |
| +struct dpni_tx_priorities_cfg {
 | |
| +	struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
 | |
| +	u8 prio_group_A;
 | |
| +	u8 prio_group_B;
 | |
| +	u8 separate_groups;
 | |
| +};
 | |
| +
 | |
| +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
 | |
| +			   u32 cmd_flags,
 | |
| +			   u16 token,
 | |
| +			   const struct dpni_tx_priorities_cfg *cfg);
 | |
| +
 | |
| +/**
 | |
|   * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
 | |
|   * @dist_size: Set the distribution size;
 | |
|   *	supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
 | |
| @@ -784,6 +901,108 @@ enum dpni_congestion_point {
 | |
|  };
 | |
|  
 | |
|  /**
 | |
| + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
 | |
| + * @dest_type:	Destination type
 | |
| + * @dest_id:	Either DPIO ID or DPCON ID, depending on the destination type
 | |
| + * @priority:	Priority selection within the DPIO or DPCON channel; valid
 | |
| + *		values are 0-1 or 0-7, depending on the number of priorities
 | |
| + *		in that channel; not relevant for 'DPNI_DEST_NONE' option
 | |
| + */
 | |
| +struct dpni_dest_cfg {
 | |
| +	enum dpni_dest dest_type;
 | |
| +	int dest_id;
 | |
| +	u8 priority;
 | |
| +};
 | |
| +
 | |
| +/* DPNI congestion options */
 | |
| +
 | |
| +/**
 | |
| + * CSCN message is written to message_iova once entering a
 | |
| + * congestion state (see 'threshold_entry')
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER        0x00000001
 | |
| +/**
 | |
| + * CSCN message is written to message_iova once exiting a
 | |
| + * congestion state (see 'threshold_exit')
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT         0x00000002
 | |
| +/**
 | |
| + * CSCN write will attempt to allocate into a cache (coherent write);
 | |
| + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_COHERENT_WRITE            0x00000004
 | |
| +/**
 | |
| + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
 | |
| + * DPIO/DPCON's WQ channel once entering a congestion state
 | |
| + * (see 'threshold_entry')
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER      0x00000008
 | |
| +/**
 | |
| + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
 | |
| + * DPIO/DPCON's WQ channel once exiting a congestion state
 | |
| + * (see 'threshold_exit')
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT       0x00000010
 | |
| +/**
 | |
| + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
 | |
| + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED  0x00000020
 | |
| +/**
 | |
| + * This congestion will trigger flow control or priority flow control.
 | |
| + * This will have effect only if flow control is enabled with
 | |
| + * dpni_set_link_cfg().
 | |
| + */
 | |
| +#define DPNI_CONG_OPT_FLOW_CONTROL	0x00000040
 | |
| +
 | |
| +/**
 | |
| + * struct dpni_congestion_notification_cfg - congestion notification
 | |
| + *					configuration
 | |
| + * @units: Units type
 | |
| + * @threshold_entry: Above this threshold we enter a congestion state.
 | |
| + *		set it to '0' to disable it
 | |
| + * @threshold_exit: Below this threshold we exit the congestion state.
 | |
| + * @message_ctx: The context that will be part of the CSCN message
 | |
| + * @message_iova: I/O virtual address (must be in DMA-able memory),
 | |
| + *		must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
 | |
| + *		is contained in 'options'
 | |
| + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
 | |
| + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
 | |
| + */
 | |
| +
 | |
| +struct dpni_congestion_notification_cfg {
 | |
| +	enum dpni_congestion_unit units;
 | |
| +	u32 threshold_entry;
 | |
| +	u32 threshold_exit;
 | |
| +	u64 message_ctx;
 | |
| +	u64 message_iova;
 | |
| +	struct dpni_dest_cfg dest_cfg;
 | |
| +	u16 notification_mode;
 | |
| +};
 | |
| +
 | |
| +/** Compose TC parameter for function dpni_set_congestion_notification()
 | |
| + * and dpni_get_congestion_notification().
 | |
| + */
 | |
| +#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
 | |
| +	((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
 | |
| +
 | |
| +int dpni_set_congestion_notification(
 | |
| +			struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			enum dpni_queue_type qtype,
 | |
| +			u8 tc_id,
 | |
| +			const struct dpni_congestion_notification_cfg *cfg);
 | |
| +
 | |
| +int dpni_get_congestion_notification(
 | |
| +			struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			enum dpni_queue_type qtype,
 | |
| +			u8 tc_id,
 | |
| +			struct dpni_congestion_notification_cfg *cfg);
 | |
| +
 | |
| +/**
 | |
|   * struct dpni_taildrop - Structure representing the taildrop
 | |
|   * @enable:	Indicates whether the taildrop is active or not.
 | |
|   * @units:	Indicates the unit of THRESHOLD. Queue taildrop only supports
 | |
| @@ -829,4 +1048,124 @@ struct dpni_rule_cfg {
 | |
|  	u8	key_size;
 | |
|  };
 | |
|  
 | |
| +int dpni_get_api_version(struct fsl_mc_io *mc_io,
 | |
| +			 u32 cmd_flags,
 | |
| +			 u16 *major_ver,
 | |
| +			 u16 *minor_ver);
 | |
| +
 | |
| +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
 | |
| +		       u32 cmd_flags,
 | |
| +		       u16 token,
 | |
| +		       const struct dpni_rule_cfg *cfg,
 | |
| +		       u8 tc_id,
 | |
| +		       u16 index);
 | |
| +
 | |
| +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
 | |
| +			  u32 cmd_flags,
 | |
| +			  u16 token,
 | |
| +			  const struct dpni_rule_cfg *cfg);
 | |
| +
 | |
| +int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
 | |
| +			 u32 cmd_flags,
 | |
| +			 u16 token);
 | |
| +
 | |
| +/**
 | |
| + * Discard matching traffic. If set, this takes precedence over any other
 | |
| + * configuration and matching traffic is always discarded.
 | |
| + */
 | |
| + #define DPNI_FS_OPT_DISCARD            0x1
 | |
| +
 | |
| +/**
 | |
| + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
 | |
| + * override the FLC value set per queue.
 | |
| + * For more details check the Frame Descriptor section in the hardware
 | |
| + * documentation.
 | |
| + */
 | |
| +#define DPNI_FS_OPT_SET_FLC            0x2
 | |
| +
 | |
| +/*
 | |
| + * Indicates whether the 6 lowest significant bits of FLC are used for stash
 | |
| + * control. If set, the 6 least significant bits in value are interpreted as
 | |
| + * follows:
 | |
| + *     - bits 0-1: indicates the number of 64 byte units of context that are
 | |
| + *     stashed. FLC value is interpreted as a memory address in this case,
 | |
| + *     excluding the 6 LS bits.
 | |
| + *     - bits 2-3: indicates the number of 64 byte units of frame annotation
 | |
| + *     to be stashed. Annotation is placed at FD[ADDR].
 | |
| + *     - bits 4-5: indicates the number of 64 byte units of frame data to be
 | |
| + *     stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
 | |
| + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
 | |
| + */
 | |
| +#define DPNI_FS_OPT_SET_STASH_CONTROL  0x4
 | |
| +
 | |
| +/**
 | |
| + * struct dpni_fs_action_cfg - Action configuration for table look-up
 | |
| + * @flc:	FLC value for traffic matching this rule. Please check the
 | |
| + *		Frame Descriptor section in the hardware documentation for
 | |
| + *		more information.
 | |
| + * @flow_id:	Identifies the Rx queue used for matching traffic. Supported
 | |
| + *		values are in range 0 to num_queue-1.
 | |
| + * @options:	Any combination of DPNI_FS_OPT_ values.
 | |
| + */
 | |
| +struct dpni_fs_action_cfg {
 | |
| +	u64 flc;
 | |
| +	u16 flow_id;
 | |
| +	u16 options;
 | |
| +};
 | |
| +
 | |
| +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
 | |
| +		      u32 cmd_flags,
 | |
| +		      u16 token,
 | |
| +		      u8 tc_id,
 | |
| +		      u16 index,
 | |
| +		      const struct dpni_rule_cfg *cfg,
 | |
| +		      const struct dpni_fs_action_cfg *action);
 | |
| +
 | |
| +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
 | |
| +			 u32 cmd_flags,
 | |
| +			 u16 token,
 | |
| +			 u8 tc_id,
 | |
| +			 const struct dpni_rule_cfg *cfg);
 | |
| +
 | |
| +/**
 | |
| + * When used for queue_idx in function dpni_set_rx_dist_default_queue
 | |
| + * will signal to dpni to drop all unclassified frames
 | |
| + */
 | |
| +#define DPNI_FS_MISS_DROP		((uint16_t)-1)
 | |
| +
 | |
| +/**
 | |
| + * struct dpni_rx_dist_cfg - distribution configuration
 | |
| + * @dist_size:	distribution size; supported values: 1,2,3,4,6,7,8,
 | |
| + *		12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
 | |
| + *		512,768,896,1024
 | |
| + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
 | |
| + *		the extractions to be used for the distribution key by calling
 | |
| + *		dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
 | |
| + *		it can be '0'
 | |
| + * @enable: enable/disable the distribution.
 | |
| + * @tc: TC id for which distribution is set
 | |
| + * @fs_miss_flow_id: when packet misses all rules from flow steering table and
 | |
| + *		hash is disabled it will be put into this queue id; use
 | |
| + *		DPNI_FS_MISS_DROP to drop frames. The value of this field is
 | |
| + *		used only when flow steering distribution is enabled and hash
 | |
| + *		distribution is disabled
 | |
| + */
 | |
| +struct dpni_rx_dist_cfg {
 | |
| +	u16 dist_size;
 | |
| +	u64 key_cfg_iova;
 | |
| +	u8 enable;
 | |
| +	u8 tc;
 | |
| +	u16 fs_miss_flow_id;
 | |
| +};
 | |
| +
 | |
| +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
 | |
| +			u32 cmd_flags,
 | |
| +			u16 token,
 | |
| +			const struct dpni_rx_dist_cfg *cfg);
 | |
| +
 | |
| +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
 | |
| +			  u32 cmd_flags,
 | |
| +			  u16 token,
 | |
| +			  const struct dpni_rx_dist_cfg *cfg);
 | |
| +
 | |
|  #endif /* __FSL_DPNI_H */
 | |
| --- a/drivers/staging/fsl-dpaa2/ethernet/net.h
 | |
| +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
 | |
| @@ -1,33 +1,5 @@
 | |
| +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 | |
|  /* Copyright 2013-2015 Freescale Semiconductor Inc.
 | |
| - *
 | |
| - * Redistribution and use in source and binary forms, with or without
 | |
| - * modification, are permitted provided that the following conditions are met:
 | |
| - * * Redistributions of source code must retain the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer.
 | |
| - * * Redistributions in binary form must reproduce the above copyright
 | |
| - * notice, this list of conditions and the following disclaimer in the
 | |
| - * documentation and/or other materials provided with the distribution.
 | |
| - * * Neither the name of the above-listed copyright holders nor the
 | |
| - * names of any contributors may be used to endorse or promote products
 | |
| - * derived from this software without specific prior written permission.
 | |
| - *
 | |
| - *
 | |
| - * ALTERNATIVELY, this software may be distributed under the terms of the
 | |
| - * GNU General Public License ("GPL") as published by the Free Software
 | |
| - * Foundation, either version 2 of that License or (at your option) any
 | |
| - * later version.
 | |
| - *
 | |
| - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 | |
| - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
 | |
| - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 | |
| - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 | |
| - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 | |
| - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 | |
| - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 | |
| - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| - * POSSIBILITY OF SUCH DAMAGE.
 | |
|   */
 | |
|  #ifndef __FSL_NET_H
 | |
|  #define __FSL_NET_H
 |