2019-01-18 09:27:45 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2017-04-21 10:43:26 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rte_eal.h>
|
|
|
|
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_errno.h>
|
|
|
|
#include <rte_ethdev.h>
|
|
|
|
#include <rte_launch.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_mbuf.h>
|
|
|
|
#include <rte_ring.h>
|
|
|
|
|
|
|
|
#include <rte_byteorder.h>
|
|
|
|
|
|
|
|
#include "args.h"
|
|
|
|
#include "main.h"
|
|
|
|
#include "init.h"
|
|
|
|
#include "../include/conf.h"
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef QW_SOFTWARE_FC
|
|
|
|
#define SEND_PAUSE_FRAME(port_id, duration) send_pause_frame(port_id, duration)
|
|
|
|
#else
|
|
|
|
#define SEND_PAUSE_FRAME(port_id, duration) do { } while(0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define ETHER_TYPE_FLOW_CONTROL 0x8808
|
|
|
|
|
|
|
|
struct ether_fc_frame {
|
2018-05-15 09:49:22 +00:00
|
|
|
uint16_t opcode;
|
|
|
|
uint16_t param;
|
2017-04-21 10:43:26 +00:00
|
|
|
} __attribute__((__packed__));
|
|
|
|
|
|
|
|
|
|
|
|
int *quota;
|
|
|
|
unsigned int *low_watermark;
|
2018-05-15 09:49:22 +00:00
|
|
|
unsigned int *high_watermark;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
uint16_t port_pairs[RTE_MAX_ETHPORTS];
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
|
|
|
|
struct rte_mempool *mbuf_pool;
|
|
|
|
|
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
static void send_pause_frame(uint16_t port_id, uint16_t duration)
|
2017-04-21 10:43:26 +00:00
|
|
|
{
|
2018-05-15 09:49:22 +00:00
|
|
|
struct rte_mbuf *mbuf;
|
|
|
|
struct ether_fc_frame *pause_frame;
|
|
|
|
struct ether_hdr *hdr;
|
|
|
|
struct ether_addr mac_addr;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
RTE_LOG_DP(DEBUG, USER1,
|
|
|
|
"Sending PAUSE frame (duration=%d) on port %d\n",
|
|
|
|
duration, port_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Get a mbuf from the pool */
|
|
|
|
mbuf = rte_pktmbuf_alloc(mbuf_pool);
|
|
|
|
if (unlikely(mbuf == NULL))
|
|
|
|
return;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Prepare a PAUSE frame */
|
|
|
|
hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
|
|
|
|
pause_frame = (struct ether_fc_frame *) &hdr[1];
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
rte_eth_macaddr_get(port_id, &mac_addr);
|
|
|
|
ether_addr_copy(&mac_addr, &hdr->s_addr);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
void *tmp = &hdr->d_addr.addr_bytes[0];
|
|
|
|
*((uint64_t *)tmp) = 0x010000C28001ULL;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
pause_frame->opcode = rte_cpu_to_be_16(0x0001);
|
|
|
|
pause_frame->param = rte_cpu_to_be_16(duration);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
mbuf->pkt_len = 60;
|
|
|
|
mbuf->data_len = 60;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
rte_eth_tx_burst(port_id, 0, &mbuf, 1);
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the previous enabled lcore ID
|
|
|
|
*
|
|
|
|
* @param lcore_id
|
|
|
|
* The current lcore ID.
|
|
|
|
* @return
|
|
|
|
* The previous enabled lcore_id or -1 if not found.
|
|
|
|
*/
|
|
|
|
static unsigned int
|
|
|
|
get_previous_lcore_id(unsigned int lcore_id)
|
|
|
|
{
|
2018-05-15 09:49:22 +00:00
|
|
|
int i;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
for (i = lcore_id - 1; i >= 0; i--)
|
|
|
|
if (rte_lcore_is_enabled(i))
|
|
|
|
return i;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
return -1;
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the last enabled lcore ID
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* The last enabled lcore_id.
|
|
|
|
*/
|
|
|
|
static unsigned int
|
|
|
|
get_last_lcore_id(void)
|
|
|
|
{
|
2018-05-15 09:49:22 +00:00
|
|
|
int i;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
for (i = RTE_MAX_LCORE; i >= 0; i--)
|
|
|
|
if (rte_lcore_is_enabled(i))
|
|
|
|
return i;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
return 0;
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
receive_stage(__attribute__((unused)) void *args)
|
|
|
|
{
|
2018-05-15 09:49:22 +00:00
|
|
|
int i, ret;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
uint16_t port_id;
|
|
|
|
uint16_t nb_rx_pkts;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
unsigned int lcore_id;
|
|
|
|
unsigned int free;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
struct rte_mbuf *pkts[MAX_PKT_QUOTA];
|
|
|
|
struct rte_ring *ring;
|
|
|
|
enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
lcore_id = rte_lcore_id();
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
RTE_LOG(INFO, USER1,
|
|
|
|
"%s() started on core %u\n", __func__, lcore_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
while (1) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Process each port round robin style */
|
|
|
|
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (!is_bit_set(port_id, portmask))
|
|
|
|
continue;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
ring = rings[lcore_id][port_id];
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (ring_state[port_id] != RING_READY) {
|
|
|
|
if (rte_ring_count(ring) > *low_watermark)
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
ring_state[port_id] = RING_READY;
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Enqueue received packets on the RX ring */
|
|
|
|
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
|
|
|
|
(uint16_t) *quota);
|
|
|
|
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
|
|
|
|
nb_rx_pkts, &free);
|
|
|
|
if (RING_SIZE - free > *high_watermark) {
|
|
|
|
ring_state[port_id] = RING_OVERLOADED;
|
|
|
|
send_pause_frame(port_id, 1337);
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (ret == 0) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/*
|
|
|
|
* Return mbufs to the pool,
|
|
|
|
* effectively dropping packets
|
|
|
|
*/
|
|
|
|
for (i = 0; i < nb_rx_pkts; i++)
|
|
|
|
rte_pktmbuf_free(pkts[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|
|
|
|
|
2018-11-21 08:34:11 +00:00
|
|
|
static int
|
2017-04-21 10:43:26 +00:00
|
|
|
pipeline_stage(__attribute__((unused)) void *args)
|
|
|
|
{
|
2018-05-15 09:49:22 +00:00
|
|
|
int i, ret;
|
|
|
|
int nb_dq_pkts;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
uint16_t port_id;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
unsigned int lcore_id, previous_lcore_id;
|
|
|
|
unsigned int free;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
void *pkts[MAX_PKT_QUOTA];
|
|
|
|
struct rte_ring *rx, *tx;
|
|
|
|
enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
lcore_id = rte_lcore_id();
|
|
|
|
previous_lcore_id = get_previous_lcore_id(lcore_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
RTE_LOG(INFO, USER1,
|
|
|
|
"%s() started on core %u - processing packets from core %u\n",
|
|
|
|
__func__, lcore_id, previous_lcore_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
while (1) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (!is_bit_set(port_id, portmask))
|
|
|
|
continue;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
tx = rings[lcore_id][port_id];
|
|
|
|
rx = rings[previous_lcore_id][port_id];
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (ring_state[port_id] != RING_READY) {
|
|
|
|
if (rte_ring_count(tx) > *low_watermark)
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
ring_state[port_id] = RING_READY;
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Dequeue up to quota mbuf from rx */
|
|
|
|
nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
|
|
|
|
*quota, NULL);
|
|
|
|
if (unlikely(nb_dq_pkts < 0))
|
|
|
|
continue;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Enqueue them on tx */
|
|
|
|
ret = rte_ring_enqueue_bulk(tx, pkts,
|
|
|
|
nb_dq_pkts, &free);
|
|
|
|
if (RING_SIZE - free > *high_watermark)
|
|
|
|
ring_state[port_id] = RING_OVERLOADED;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (ret == 0) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/*
|
|
|
|
* Return mbufs to the pool,
|
|
|
|
* effectively dropping packets
|
|
|
|
*/
|
|
|
|
for (i = 0; i < nb_dq_pkts; i++)
|
|
|
|
rte_pktmbuf_free(pkts[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-11-21 08:34:11 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|
|
|
|
|
2018-11-21 08:34:11 +00:00
|
|
|
static int
|
2017-04-21 10:43:26 +00:00
|
|
|
send_stage(__attribute__((unused)) void *args)
|
|
|
|
{
|
|
|
|
uint16_t nb_dq_pkts;
|
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
uint16_t port_id;
|
|
|
|
uint16_t dest_port_id;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
unsigned int lcore_id, previous_lcore_id;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
struct rte_ring *tx;
|
|
|
|
struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
lcore_id = rte_lcore_id();
|
|
|
|
previous_lcore_id = get_previous_lcore_id(lcore_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
RTE_LOG(INFO, USER1,
|
|
|
|
"%s() started on core %u - processing packets from core %u\n",
|
|
|
|
__func__, lcore_id, previous_lcore_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
while (1) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Process each ring round robin style */
|
|
|
|
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (!is_bit_set(port_id, portmask))
|
|
|
|
continue;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
dest_port_id = port_pairs[port_id];
|
|
|
|
tx = rings[previous_lcore_id][port_id];
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
if (rte_ring_empty(tx))
|
|
|
|
continue;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Dequeue packets from tx and send them */
|
|
|
|
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
|
|
|
|
(void *) tx_pkts, *quota, NULL);
|
|
|
|
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
|
|
|
|
}
|
|
|
|
}
|
2018-11-21 08:34:11 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
2018-05-15 09:49:22 +00:00
|
|
|
int ret;
|
|
|
|
unsigned int lcore_id, master_lcore_id, last_lcore_id;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
uint16_t port_id;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
rte_log_set_global_level(RTE_LOG_INFO);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
ret = rte_eal_init(argc, argv);
|
|
|
|
if (ret < 0)
|
|
|
|
rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
argc -= ret;
|
|
|
|
argv += ret;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
init_dpdk();
|
|
|
|
setup_shared_variables();
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
*quota = 32;
|
|
|
|
*low_watermark = 60 * RING_SIZE / 100;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
last_lcore_id = get_last_lcore_id();
|
|
|
|
master_lcore_id = rte_get_master_lcore();
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Parse the application's arguments */
|
|
|
|
ret = parse_qw_args(argc, argv);
|
|
|
|
if (ret < 0)
|
|
|
|
rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Create a pool of mbuf to store packets */
|
|
|
|
mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
|
|
|
|
MBUF_DATA_SIZE, rte_socket_id());
|
|
|
|
if (mbuf_pool == NULL)
|
|
|
|
rte_panic("%s\n", rte_strerror(rte_errno));
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
|
|
|
|
if (is_bit_set(port_id, portmask)) {
|
|
|
|
configure_eth_port(port_id);
|
|
|
|
init_ring(master_lcore_id, port_id);
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
pair_ports();
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/*
|
|
|
|
* Start pipeline_connect() on all the available slave lcores
|
|
|
|
* but the last
|
|
|
|
*/
|
|
|
|
for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
|
|
|
|
if (rte_lcore_is_enabled(lcore_id) &&
|
|
|
|
lcore_id != master_lcore_id) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
|
|
|
|
if (is_bit_set(port_id, portmask))
|
|
|
|
init_ring(lcore_id, port_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-11-21 08:34:11 +00:00
|
|
|
rte_eal_remote_launch(pipeline_stage,
|
2018-05-15 09:49:22 +00:00
|
|
|
NULL, lcore_id);
|
|
|
|
}
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Start send_stage() on the last slave core */
|
2018-11-21 08:34:11 +00:00
|
|
|
rte_eal_remote_launch(send_stage, NULL, last_lcore_id);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
/* Start receive_stage() on the master core */
|
|
|
|
receive_stage(NULL);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
2018-05-15 09:49:22 +00:00
|
|
|
return 0;
|
2017-04-21 10:43:26 +00:00
|
|
|
}
|