DPDK logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2015-2020
 */

#ifndef _TXGBE_ETHDEV_H_
#define _TXGBE_ETHDEV_H_

#include <stdint.h>

#include "base/txgbe.h"
#include "txgbe_ptypes.h"
#ifdef RTE_LIB_SECURITY
#include "txgbe_ipsec.h"
#endif
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_time.h>
#include <rte_ethdev.h>
#include <rte_ethdev_core.h>
#include <rte_hash.h>
#include <rte_hash_crc.h>
#include <rte_bus_pci.h>
#include <rte_tm_driver.h>

/* need update link, bit flag */
#define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
#define TXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
#define TXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
#define TXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
#define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)

/*
 * Defines that were not part of txgbe_type.h as they are not used by the
 * FreeBSD driver.
 */
#define TXGBE_VFTA_SIZE 128
#define TXGBE_VLAN_TAG_SIZE 4
#define TXGBE_HKEY_MAX_INDEX 10
/*Default value of Max Rx Queue*/
#define TXGBE_MAX_RX_QUEUE_NUM	128
#define TXGBE_VMDQ_DCB_NB_QUEUES     TXGBE_MAX_RX_QUEUE_NUM

#ifndef NBBY
#define NBBY	8	/* number of bits in a byte */
#endif
#define TXGBE_HWSTRIP_BITMAP_SIZE \
	(TXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))

#define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT	500 /* 500us */

#define TXGBE_MAX_QUEUE_NUM_PER_VF  8

#define TXGBE_5TUPLE_MAX_PRI            7
#define TXGBE_5TUPLE_MIN_PRI            1

#define TXGBE_RSS_OFFLOAD_ALL ( \
	ETH_RSS_IPV4 | \
	ETH_RSS_NONFRAG_IPV4_TCP | \
	ETH_RSS_NONFRAG_IPV4_UDP | \
	ETH_RSS_IPV6 | \
	ETH_RSS_NONFRAG_IPV6_TCP | \
	ETH_RSS_NONFRAG_IPV6_UDP | \
	ETH_RSS_IPV6_EX | \
	ETH_RSS_IPV6_TCP_EX | \
	ETH_RSS_IPV6_UDP_EX)

#define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
#define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET

#define TXGBE_MAX_FDIR_FILTER_NUM       (1024 * 32)
#define TXGBE_MAX_L2_TN_FILTER_NUM      128

/*
 * Information about the fdir mode.
 */
struct txgbe_hw_fdir_mask {
	uint16_t vlan_tci_mask;
	uint32_t src_ipv4_mask;
	uint32_t dst_ipv4_mask;
	uint16_t src_ipv6_mask;
	uint16_t dst_ipv6_mask;
	uint16_t src_port_mask;
	uint16_t dst_port_mask;
	uint16_t flex_bytes_mask;
	uint8_t  mac_addr_byte_mask;
	uint32_t tunnel_id_mask;
	uint8_t  tunnel_type_mask;
};

struct txgbe_fdir_filter {
	TAILQ_ENTRY(txgbe_fdir_filter) entries;
	struct txgbe_atr_input input; /* key of fdir filter*/
	uint32_t fdirflags; /* drop or forward */
	uint32_t fdirhash; /* hash value for fdir */
	uint8_t queue; /* assigned rx queue */
};

/* list of fdir filters */
TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);

struct txgbe_fdir_rule {
	struct txgbe_hw_fdir_mask mask;
	struct txgbe_atr_input input; /* key of fdir filter */
	bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */
	bool b_mask; /* If TRUE, mask has meaning. */
	enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
	uint32_t fdirflags; /* drop or forward */
	uint32_t soft_id; /* an unique value for this rule */
	uint8_t queue; /* assigned rx queue */
	uint8_t flex_bytes_offset;
};

struct txgbe_hw_fdir_info {
	struct txgbe_hw_fdir_mask mask;
	uint8_t     flex_bytes_offset;
	uint16_t    collision;
	uint16_t    free;
	uint16_t    maxhash;
	uint8_t     maxlen;
	uint64_t    add;
	uint64_t    remove;
	uint64_t    f_add;
	uint64_t    f_remove;
	struct txgbe_fdir_filter_list fdir_list; /* filter list*/
	/* store the pointers of the filters, index is the hash value. */
	struct txgbe_fdir_filter **hash_map;
	struct rte_hash *hash_handle; /* cuckoo hash handler */
	bool mask_added; /* If already got mask from consistent filter */
};

struct txgbe_rte_flow_rss_conf {
	struct rte_flow_action_rss conf; /**< RSS parameters. */
	uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
	uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};

/* structure for interrupt relative data */
struct txgbe_interrupt {
	uint32_t flags;
	uint32_t mask_misc;
	/* to save original mask during delayed handler */
	uint32_t mask_misc_orig;
	uint32_t mask[2];
};

#define TXGBE_NB_STAT_MAPPING  32
#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
#define NB_QMAP_FIELDS_PER_QSM_REG 4
#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
struct txgbe_stat_mappings {
	uint32_t tqsm[TXGBE_NB_STAT_MAPPING];
	uint32_t rqsm[TXGBE_NB_STAT_MAPPING];
};

struct txgbe_vfta {
	uint32_t vfta[TXGBE_VFTA_SIZE];
};

struct txgbe_hwstrip {
	uint32_t bitmap[TXGBE_HWSTRIP_BITMAP_SIZE];
};

/*
 * VF data which used by PF host only
 */
#define TXGBE_MAX_VF_MC_ENTRIES      30

struct txgbe_uta_info {
	uint8_t  uc_filter_type;
	uint16_t uta_in_use;
	uint32_t uta_shadow[TXGBE_MAX_UTA];
};

#define TXGBE_MAX_MIRROR_RULES 4  /* Maximum nb. of mirror rules. */

struct txgbe_mirror_info {
	struct rte_eth_mirror_conf mr_conf[TXGBE_MAX_MIRROR_RULES];
	/* store PF mirror rules configuration */
};

struct txgbe_vf_info {
	uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
	uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES];
	uint16_t num_vf_mc_hashes;
	bool clear_to_send;
	uint16_t tx_rate[TXGBE_MAX_QUEUE_NUM_PER_VF];
	uint16_t vlan_count;
	uint8_t api_version;
	uint16_t switch_domain_id;
	uint16_t xcast_mode;
	uint16_t mac_count;
};

TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter);

struct txgbe_5tuple_filter_info {
	uint32_t dst_ip;
	uint32_t src_ip;
	uint16_t dst_port;
	uint16_t src_port;
	enum txgbe_5tuple_protocol proto;        /* l4 protocol. */
	uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
				  * used when more than one filter matches.
				  */
	uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
		src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
		dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
		src_port_mask:1, /* if mask is 1b, do not compare src port. */
		proto_mask:1;    /* if mask is 1b, do not compare protocol. */
};

/* 5tuple filter structure */
struct txgbe_5tuple_filter {
	TAILQ_ENTRY(txgbe_5tuple_filter) entries;
	uint16_t index;       /* the index of 5tuple filter */
	struct txgbe_5tuple_filter_info filter_info;
	uint16_t queue;       /* rx queue assigned to */
};

#define TXGBE_5TUPLE_ARRAY_SIZE \
	(RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
	 (sizeof(uint32_t) * NBBY))

struct txgbe_ethertype_filter {
	uint16_t ethertype;
	uint32_t etqf;
	uint32_t etqs;
	/**
	 * If this filter is added by configuration,
	 * it should not be removed.
	 */
	bool     conf;
};

/*
 * Structure to store filters' info.
 */
struct txgbe_filter_info {
	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
	/* store used ethertype filters*/
	struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
	/* Bit mask for every used 5tuple filter */
	uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
	struct txgbe_5tuple_filter_list fivetuple_list;
	/* store the SYN filter info */
	uint32_t syn_info;
	/* store the rss filter info */
	struct txgbe_rte_flow_rss_conf rss_info;
};

struct txgbe_l2_tn_key {
	enum rte_eth_tunnel_type          l2_tn_type;
	uint32_t                          tn_id;
};

struct txgbe_l2_tn_filter {
	TAILQ_ENTRY(txgbe_l2_tn_filter)    entries;
	struct txgbe_l2_tn_key             key;
	uint32_t                           pool;
};

TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter);

struct txgbe_l2_tn_info {
	struct txgbe_l2_tn_filter_list      l2_tn_list;
	struct txgbe_l2_tn_filter         **hash_map;
	struct rte_hash                    *hash_handle;
	bool e_tag_en; /* e-tag enabled */
	bool e_tag_fwd_en; /* e-tag based forwarding enabled */
	uint16_t e_tag_ether_type; /* ether type for e-tag */
};

struct rte_flow {
	enum rte_filter_type filter_type;
	void *rule;
};

/* The configuration of bandwidth */
struct txgbe_bw_conf {
	uint8_t tc_num; /* Number of TCs. */
};

/* Struct to store Traffic Manager shaper profile. */
struct txgbe_tm_shaper_profile {
	TAILQ_ENTRY(txgbe_tm_shaper_profile) node;
	uint32_t shaper_profile_id;
	uint32_t reference_count;
	struct rte_tm_shaper_params profile;
};

TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);

/* node type of Traffic Manager */
enum txgbe_tm_node_type {
	TXGBE_TM_NODE_TYPE_PORT,
	TXGBE_TM_NODE_TYPE_TC,
	TXGBE_TM_NODE_TYPE_QUEUE,
	TXGBE_TM_NODE_TYPE_MAX,
};

/* Struct to store Traffic Manager node configuration. */
struct txgbe_tm_node {
	TAILQ_ENTRY(txgbe_tm_node) node;
	uint32_t id;
	uint32_t priority;
	uint32_t weight;
	uint32_t reference_count;
	uint16_t no;
	struct txgbe_tm_node *parent;
	struct txgbe_tm_shaper_profile *shaper_profile;
	struct rte_tm_node_params params;
};

TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node);

/* The configuration of Traffic Manager */
struct txgbe_tm_conf {
	struct txgbe_shaper_profile_list shaper_profile_list;
	struct txgbe_tm_node *root; /* root node - port */
	struct txgbe_tm_node_list tc_list; /* node list for all the TCs */
	struct txgbe_tm_node_list queue_list; /* node list for all the queues */
	/**
	 * The number of added TC nodes.
	 * It should be no more than the TC number of this port.
	 */
	uint32_t nb_tc_node;
	/**
	 * The number of added queue nodes.
	 * It should be no more than the queue number of this port.
	 */
	uint32_t nb_queue_node;
	/**
	 * This flag is used to check if APP can change the TM node
	 * configuration.
	 * When it's true, means the configuration is applied to HW,
	 * APP should not change the configuration.
	 * As we don't support on-the-fly configuration, when starting
	 * the port, APP should call the hierarchy_commit API to set this
	 * flag to true. When stopping the port, this flag should be set
	 * to false.
	 */
	bool committed;
};

/*
 * Structure to store private data for each driver instance (for each port).
 */
struct txgbe_adapter {
	struct txgbe_hw             hw;
	struct txgbe_hw_stats       stats;
	struct txgbe_hw_fdir_info   fdir;
	struct txgbe_interrupt      intr;
	struct txgbe_stat_mappings  stat_mappings;
	struct txgbe_vfta           shadow_vfta;
	struct txgbe_hwstrip        hwstrip;
	struct txgbe_dcb_config     dcb_config;
	struct txgbe_mirror_info    mr_data;
	struct txgbe_vf_info        *vfdata;
	struct txgbe_uta_info       uta_info;
	struct txgbe_filter_info    filter;
	struct txgbe_l2_tn_info     l2_tn;
	struct txgbe_bw_conf        bw_conf;
#ifdef RTE_LIB_SECURITY
	struct txgbe_ipsec          ipsec;
#endif
	bool rx_bulk_alloc_allowed;
	struct rte_timecounter      systime_tc;
	struct rte_timecounter      rx_tstamp_tc;
	struct rte_timecounter      tx_tstamp_tc;
	struct txgbe_tm_conf        tm_conf;

	/* For RSS reta table update */
	uint8_t rss_reta_updated;
};

#define TXGBE_DEV_ADAPTER(dev) \
	((struct txgbe_adapter *)(dev)->data->dev_private)

#define TXGBE_DEV_HW(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->hw)

#define TXGBE_DEV_STATS(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->stats)

#define TXGBE_DEV_INTR(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)

#define TXGBE_DEV_FDIR(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir)

#define TXGBE_DEV_STAT_MAPPINGS(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)

#define TXGBE_DEV_VFTA(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->shadow_vfta)

#define TXGBE_DEV_HWSTRIP(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip)

#define TXGBE_DEV_DCB_CONFIG(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config)

#define TXGBE_DEV_VFDATA(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)

#define TXGBE_DEV_MR_INFO(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->mr_data)

#define TXGBE_DEV_UTA_INFO(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)

#define TXGBE_DEV_FILTER(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)

#define TXGBE_DEV_L2_TN(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn)

#define TXGBE_DEV_BW_CONF(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)

#define TXGBE_DEV_TM_CONF(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)

#define TXGBE_DEV_IPSEC(dev) \
	(&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec)

/*
 * RX/TX function prototypes
 */
void txgbe_dev_clear_queues(struct rte_eth_dev *dev);

void txgbe_dev_free_queues(struct rte_eth_dev *dev);

void txgbe_dev_rx_queue_release(void *rxq);

void txgbe_dev_tx_queue_release(void *txq);

int  txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
		uint16_t nb_rx_desc, unsigned int socket_id,
		const struct rte_eth_rxconf *rx_conf,
		struct rte_mempool *mb_pool);

int  txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
		uint16_t nb_tx_desc, unsigned int socket_id,
		const struct rte_eth_txconf *tx_conf);

uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
		uint16_t rx_queue_id);

int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);

int txgbe_dev_rx_init(struct rte_eth_dev *dev);

void txgbe_dev_tx_init(struct rte_eth_dev *dev);

int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);

void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);

int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);

int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);

int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);

int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
	struct rte_eth_rxq_info *qinfo);

void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
	struct rte_eth_txq_info *qinfo);

uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
		uint16_t nb_pkts);

uint16_t txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
				    uint16_t nb_pkts);

uint16_t txgbe_recv_pkts_lro_single_alloc(void *rx_queue,
		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);

uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		uint16_t nb_pkts);

uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
		uint16_t nb_pkts);

uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		uint16_t nb_pkts);

int txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
			      struct rte_eth_rss_conf *rss_conf);

int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
				struct rte_eth_rss_conf *rss_conf);

bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);

int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
			struct rte_eth_ntuple_filter *filter,
			bool add);
int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
			struct rte_eth_ethertype_filter *filter,
			bool add);
int txgbe_syn_filter_set(struct rte_eth_dev *dev,
			struct rte_eth_syn_filter *filter,
			bool add);

/**
 * l2 tunnel configuration.
 */
struct txgbe_l2_tunnel_conf {
	enum rte_eth_tunnel_type l2_tunnel_type;
	uint16_t ether_type; /* ether type in l2 header */
	uint32_t tunnel_id; /* port tag id for e-tag */
	uint16_t vf_id; /* VF id for tag insertion */
	uint32_t pool; /* destination pool for tag based forwarding */
};

int
txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
			       struct txgbe_l2_tunnel_conf *l2_tunnel,
			       bool restore);
int
txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
			       struct txgbe_l2_tunnel_conf *l2_tunnel);
void txgbe_filterlist_init(void);
void txgbe_filterlist_flush(void);

void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
			       uint8_t queue, uint8_t msix_vector);

/*
 * Flow director function prototypes
 */
int txgbe_fdir_configure(struct rte_eth_dev *dev);
int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
				    uint16_t offset);
int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
			      struct txgbe_fdir_rule *rule,
			      bool del, bool update);

void txgbe_configure_pb(struct rte_eth_dev *dev);
void txgbe_configure_port(struct rte_eth_dev *dev);
void txgbe_configure_dcb(struct rte_eth_dev *dev);

int
txgbe_dev_link_update_share(struct rte_eth_dev *dev,
		int wait_to_complete);
int txgbe_pf_host_init(struct rte_eth_dev *eth_dev);

void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);

void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);

int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);

uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);

void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);

extern const struct rte_flow_ops txgbe_flow_ops;

void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);

int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
			    uint16_t tx_rate, uint64_t q_msk);
int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
void txgbe_tm_conf_init(struct rte_eth_dev *dev);
void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
			       uint16_t tx_rate);
int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
			const struct rte_flow_action_rss *in);
int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
			  const struct rte_flow_action_rss *with);
int txgbe_config_rss_filter(struct rte_eth_dev *dev,
		struct txgbe_rte_flow_rss_conf *conf, bool add);

static inline int
txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
			      uint16_t ethertype)
{
	int i;

	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
		if (filter_info->ethertype_filters[i].ethertype == ethertype &&
		    (filter_info->ethertype_mask & (1 << i)))
			return i;
	}
	return -1;
}

static inline int
txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info,
			      struct txgbe_ethertype_filter *ethertype_filter)
{
	int i;

	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
		if (filter_info->ethertype_mask & (1 << i))
			continue;

		filter_info->ethertype_mask |= 1 << i;
		filter_info->ethertype_filters[i].ethertype =
				ethertype_filter->ethertype;
		filter_info->ethertype_filters[i].etqf =
				ethertype_filter->etqf;
		filter_info->ethertype_filters[i].etqs =
				ethertype_filter->etqs;
		filter_info->ethertype_filters[i].conf =
				ethertype_filter->conf;
		break;
	}
	return (i < TXGBE_ETF_ID_MAX ? i : -1);
}

static inline int
txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info,
			      uint8_t idx)
{
	if (idx >= TXGBE_ETF_ID_MAX)
		return -1;
	filter_info->ethertype_mask &= ~(1 << idx);
	filter_info->ethertype_filters[idx].ethertype = 0;
	filter_info->ethertype_filters[idx].etqf = 0;
	filter_info->ethertype_filters[idx].etqs = 0;
	filter_info->ethertype_filters[idx].etqs = FALSE;
	return idx;
}

#ifdef RTE_LIB_SECURITY
int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
#endif

/* High threshold controlling when to start sending XOFF frames. */
#define TXGBE_FC_XOFF_HITH              128 /*KB*/
/* Low threshold controlling when to start sending XON frames. */
#define TXGBE_FC_XON_LOTH               64 /*KB*/

/* Timer value included in XOFF frames. */
#define TXGBE_FC_PAUSE_TIME 0x680

#define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define TXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
#define TXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */

/*
 *  Default values for RX/TX configuration
 */
#define TXGBE_DEFAULT_RX_FREE_THRESH  32
#define TXGBE_DEFAULT_RX_PTHRESH      8
#define TXGBE_DEFAULT_RX_HTHRESH      8
#define TXGBE_DEFAULT_RX_WTHRESH      0

#define TXGBE_DEFAULT_TX_FREE_THRESH  32
#define TXGBE_DEFAULT_TX_PTHRESH      32
#define TXGBE_DEFAULT_TX_HTHRESH      0
#define TXGBE_DEFAULT_TX_WTHRESH      0

/* Additional timesync values. */
#define NSEC_PER_SEC             1000000000L
#define TXGBE_INCVAL_10GB        0xCCCCCC
#define TXGBE_INCVAL_1GB         0x800000
#define TXGBE_INCVAL_100         0xA00000
#define TXGBE_INCVAL_10          0xC7F380
#define TXGBE_INCVAL_FPGA        0x800000
#define TXGBE_INCVAL_SHIFT_10GB  20
#define TXGBE_INCVAL_SHIFT_1GB   18
#define TXGBE_INCVAL_SHIFT_100   15
#define TXGBE_INCVAL_SHIFT_10    12
#define TXGBE_INCVAL_SHIFT_FPGA  17

#define TXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL

/* store statistics names and its offset in stats structure */
struct rte_txgbe_xstats_name_off {
	char name[RTE_ETH_XSTATS_NAME_SIZE];
	unsigned int offset;
};

const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
				      struct rte_ether_addr *mc_addr_set,
				      uint32_t nb_mc_addr);
int txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
			struct rte_eth_rss_reta_entry64 *reta_conf,
			uint16_t reta_size);
int txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
			struct rte_eth_rss_reta_entry64 *reta_conf,
			uint16_t reta_size);
void txgbe_dev_setup_link_alarm_handler(void *param);
void txgbe_read_stats_registers(struct txgbe_hw *hw,
			   struct txgbe_hw_stats *hw_stats);

void txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
void txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
void txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
void txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
		uint16_t queue, bool on);
void txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
						  int mask);

#endif /* _TXGBE_ETHDEV_H_ */