F-Stack support HPTS for rack/bbr, and rack work correctly, but bbr

still has some problems when transferring large files, and the
issue with bbr will be attempted to be resolved after FreeBSD is
upgraded to 13.1 in the next release(1.23).
This commit is contained in:
fengbojiang 2022-09-01 19:18:12 +08:00
parent fa1a923248
commit ae7ea12bc0
3 changed files with 33 additions and 32 deletions

View File

@ -161,6 +161,7 @@ gateway=192.168.1.1
# FreeBSD network performance tuning configurations.
# Most native FreeBSD configurations are supported.
[freebsd.boot]
# If use rack/bbr which depend HPTS, you should set a greater value of hz, such as 100000 means a tick is 10us.
hz=100
# Block out a range of descriptors to avoid overlap
@ -210,5 +211,5 @@ net.inet.udp.blackhole=1
net.inet.ip.redirect=0
net.inet.ip.forwarding=0
#set default stacks:freebsd, rack or bbr
# set default stacks:freebsd, rack or bbr, may be you need increase the value of parameter 'freebsd.boot.hz' while use rack or bbr.
net.inet.tcp.functions_default=freebsd

View File

@ -358,7 +358,7 @@ init_mem_pool(void)
} else {
printf("create mbuf pool on socket %d\n", socketid);
}
#ifdef FF_USE_PAGE_ARRAY
nb_mbuf = RTE_ALIGN_CEIL (
nb_ports*nb_lcores*MAX_PKT_BURST +
@ -827,8 +827,8 @@ init_clock(void)
{
rte_timer_subsystem_init();
uint64_t hz = rte_get_timer_hz();
uint64_t intrs = MS_PER_S/ff_global_cfg.freebsd.hz;
uint64_t tsc = (hz + MS_PER_S - 1) / MS_PER_S*intrs;
uint64_t intrs = US_PER_S / ff_global_cfg.freebsd.hz;
uint64_t tsc = (hz + US_PER_S - 1) / US_PER_S * intrs;
rte_timer_init(&freebsd_clock);
rte_timer_reset(&freebsd_clock, tsc, PERIODICAL,
@ -866,7 +866,7 @@ port_flow_complain(struct rte_flow_error *error)
const char *errstr;
char buf[32];
int err = rte_errno;
if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
!errstrlist[error->type])
errstr = "unknown type";
@ -885,7 +885,7 @@ static int
port_flow_isolate(uint16_t port_id, int set)
{
struct rte_flow_error error;
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x66, sizeof(error));
if (rte_flow_isolate(port_id, set, &error))
@ -1088,8 +1088,8 @@ ff_dpdk_init(int argc, char **argv)
#ifdef FF_USE_PAGE_ARRAY
ff_mmap_init();
#endif
#ifdef FF_FLOW_ISOLATE
#ifdef FF_FLOW_ISOLATE
// run once in primary process
if (0 == lcore_conf.tx_queue_id[0]){
ret = port_flow_isolate(0, 1);
@ -1097,7 +1097,7 @@ ff_dpdk_init(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_port_isolate failed\n");
}
#endif
ret = init_port_start();
if (ret < 0) {
rte_exit(EXIT_FAILURE, "init_port_start failed\n");
@ -1105,8 +1105,8 @@ ff_dpdk_init(int argc, char **argv)
init_clock();
#ifdef FF_FLOW_ISOLATE
//Only give a example usage: port_id=0, tcp_port= 80.
//Recommend:
//Only give a example usage: port_id=0, tcp_port= 80.
//Recommend:
//1. init_flow should replace `set_rss_table` in `init_port_start` loop, This can set all NIC's port_id_list instead only 0 device(port_id).
//2. using config options `tcp_port` replace magic number of 80
ret = init_flow(0, 80);
@ -1505,7 +1505,7 @@ handle_ipfw_msg(struct ff_msg *msg)
case FF_IPFW_SET:
ret = ff_setsockopt_freebsd(fd, msg->ipfw.level,
msg->ipfw.optname, msg->ipfw.optval,
*(msg->ipfw.optlen));
*(msg->ipfw.optlen));
break;
default:
ret = -1;
@ -1644,11 +1644,11 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
if (unlikely(ff_global_cfg.pcap.enable)) {
uint16_t i;
for (i = 0; i < n; i++) {
ff_dump_packets( ff_global_cfg.pcap.save_path, m_table[i],
ff_dump_packets( ff_global_cfg.pcap.save_path, m_table[i],
ff_global_cfg.pcap.snap_len, ff_global_cfg.pcap.save_len);
}
}
ret = rte_eth_tx_burst(port, queueid, m_table, n);
ff_traffic.tx_packets += ret;
uint16_t i;
@ -1658,7 +1658,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
if (qconf->tx_mbufs[port].bsd_m_table[i])
ff_enq_tx_bsdmbuf(port, qconf->tx_mbufs[port].bsd_m_table[i], m_table[i]->nb_segs);
#endif
}
}
if (unlikely(ret < n)) {
do {
rte_pktmbuf_free(m_table[ret]);
@ -1700,7 +1700,7 @@ ff_dpdk_if_send(struct ff_dpdk_if_context *ctx, void *m,
#ifdef FF_USE_PAGE_ARRAY
struct lcore_conf *qconf = &lcore_conf;
int len = 0;
len = ff_if_send_onepkt(ctx, m,total);
if (unlikely(len == MAX_PKT_BURST)) {
send_burst(qconf, MAX_PKT_BURST, ctx->port_id);

View File

@ -38,7 +38,7 @@
*
* From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
*
* Derived in part from libplebnet's pn_kern_timeout.c and libuinet's uinet_timecounter.c.
* Derived in part from libplebnet's pn_kern_timeout.c and libuinet's uinet_timecounter.c.
*
*/
@ -393,7 +393,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
struct lock_object *c_lock;
uintptr_t lock_status;
int c_iflags;
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
sbintime_t sbt1, sbt2;
struct timespec ts2;
static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
@ -420,7 +420,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
c->c_iflags = CALLOUT_LOCAL_ALLOC;
else
c->c_iflags &= ~CALLOUT_PENDING;
cc_exec_curr(cc, direct) = c;
cc_exec_cancel(cc, direct) = false;
cc_exec_drain(cc, direct) = NULL;
@ -491,7 +491,7 @@ skip:
cc_exec_curr(cc, direct) = NULL;
if (cc_exec_drain(cc, direct)) {
void (*drain)(void *);
drain = cc_exec_drain(cc, direct);
cc_exec_drain(cc, direct) = NULL;
CC_UNLOCK(cc);
@ -706,7 +706,7 @@ callout_reset_tick_on(struct callout *c, int to_ticks,
panic("Invalid CPU in callout %d", cpu);
}
/*
/*
* This flag used to be added by callout_cc_add, but the
* first time you call this we could end up with the
* wrong direct flag if we don't do it before we add.
@ -722,7 +722,7 @@ callout_reset_tick_on(struct callout *c, int to_ticks,
/*
* Don't allow migration of pre-allocated callouts lest they
* become unbalanced or handle the case where the user does
* not care.
* not care.
*/
if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
ignore_cpu) {
@ -924,7 +924,7 @@ again:
}
} else if (use_lock &&
!cc_exec_cancel(cc, direct) && (drain == NULL)) {
/*
* The current callout is waiting for its
* lock which we hold. Cancel the callout
@ -1039,18 +1039,18 @@ _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
}
#ifdef APM_FIXUP_CALLTODO
/*
* Adjust the kernel calltodo timeout list. This routine is used after
* an APM resume to recalculate the calltodo timer list values with the
* number of hz's we have been sleeping. The next hardclock() will detect
/*
* Adjust the kernel calltodo timeout list. This routine is used after
* an APM resume to recalculate the calltodo timer list values with the
* number of hz's we have been sleeping. The next hardclock() will detect
* that there are fired timers and run softclock() to execute them.
*
* Please note, I have not done an exhaustive analysis of what code this
* might break. I am motivated to have my select()'s and alarm()'s that
* have expired during suspend firing upon resume so that the applications
* which set the timer can do the maintanence the timer was for as close
* as possible to the originally intended time. Testing this code for a
* week showed that resuming from a suspend resulted in 22 to 25 timers
* as possible to the originally intended time. Testing this code for a
* week showed that resuming from a suspend resulted in 22 to 25 timers
* firing, which seemed independent on whether the suspend was 2 hours or
* 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
*/
@ -1060,7 +1060,7 @@ adjust_timeout_calltodo(struct timeval *time_change)
register struct callout *p;
unsigned long delta_ticks;
/*
/*
* How many ticks were we asleep?
* (stolen from tvtohz()).
*/
@ -1080,7 +1080,7 @@ adjust_timeout_calltodo(struct timeval *time_change)
if (delta_ticks > INT_MAX)
delta_ticks = INT_MAX;
/*
/*
* Now rip through the timer calltodo list looking for timers
* to expire.
*/
@ -1205,7 +1205,7 @@ ff_hardclock(void)
{
atomic_add_int(&ticks, 1);
callout_tick();
tc_ticktock(1);
tc_ticktock((hz + 999)/1000);
cpu_tick_calibration();
#ifdef DEVICE_POLLING