f-stack/dpdk/lib/vhost/socket.c

1225 lines
29 KiB
C
Raw Normal View History

2019-06-25 11:12:58 +00:00
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2016 Intel Corporation
2017-04-21 10:43:26 +00:00
*/
#include <stdint.h>
#include <stdio.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/queue.h>
#include <errno.h>
#include <fcntl.h>
2025-01-10 11:50:43 +00:00
#include <rte_thread.h>
2017-04-21 10:43:26 +00:00
#include <rte_log.h>
#include "fd_man.h"
2025-01-10 11:50:43 +00:00
#include "vduse.h"
#include "vhost.h"
#include "vhost_user.h"
TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
2017-04-21 10:43:26 +00:00
/*
* Every time rte_vhost_driver_register() is invoked, an associated
* vhost_user_socket struct will be created.
*/
struct vhost_user_socket {
struct vhost_user_connection_list conn_list;
pthread_mutex_t conn_mutex;
2017-04-21 10:43:26 +00:00
char *path;
int socket_fd;
struct sockaddr_un un;
2017-04-21 10:43:26 +00:00
bool is_server;
2025-01-10 11:50:43 +00:00
bool is_vduse;
2017-04-21 10:43:26 +00:00
bool reconnect;
bool iommu_support;
2019-06-25 11:12:58 +00:00
bool use_builtin_virtio_net;
2020-06-18 16:55:50 +00:00
bool extbuf;
bool linearbuf;
2021-02-05 08:48:47 +00:00
bool async_copy;
2022-09-02 04:40:05 +00:00
bool net_compliant_ol_flags;
2023-09-13 12:21:49 +00:00
bool stats_enabled;
/*
* The "supported_features" indicates the feature bits the
* vhost driver supports. The "features" indicates the feature
* bits after the rte_vhost_driver_features_disable/enable().
* It is also the final feature bits used for vhost-user
* features negotiation.
*/
uint64_t supported_features;
uint64_t features;
2019-06-25 11:12:58 +00:00
uint64_t protocol_features;
2025-01-10 11:50:43 +00:00
uint32_t max_queue_pairs;
2021-02-05 08:48:47 +00:00
struct rte_vdpa_device *vdpa_dev;
2019-06-25 11:12:58 +00:00
2022-09-06 04:00:10 +00:00
struct rte_vhost_device_ops const *notify_ops;
2017-04-21 10:43:26 +00:00
};
struct vhost_user_connection {
struct vhost_user_socket *vsocket;
int connfd;
2017-04-21 10:43:26 +00:00
int vid;
TAILQ_ENTRY(vhost_user_connection) next;
2017-04-21 10:43:26 +00:00
};
#define MAX_VHOST_SOCKET 1024
struct vhost_user {
struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
struct fdset fdset;
int vsocket_cnt;
pthread_mutex_t mutex;
};
#define MAX_VIRTIO_BACKLOG 128
static void vhost_user_server_new_connection(int fd, void *data, int *remove);
static void vhost_user_read_cb(int fd, void *dat, int *remove);
static int create_unix_socket(struct vhost_user_socket *vsocket);
static int vhost_user_start_client(struct vhost_user_socket *vsocket);
2017-04-21 10:43:26 +00:00
static struct vhost_user vhost_user = {
.fdset = {
.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
2019-06-26 10:17:41 +00:00
.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
2025-01-10 11:50:43 +00:00
.sync_mutex = PTHREAD_MUTEX_INITIALIZER,
2017-04-21 10:43:26 +00:00
.num = 0
},
.vsocket_cnt = 0,
.mutex = PTHREAD_MUTEX_INITIALIZER,
};
2019-06-25 11:12:58 +00:00
/*
* return bytes# of read on success or negative val on failure. Update fdnum
* with number of fds read.
*/
int
2023-09-13 12:21:49 +00:00
read_fd_message(char *ifname, int sockfd, char *buf, int buflen, int *fds, int max_fds,
2019-06-25 11:12:58 +00:00
int *fd_num)
2017-04-21 10:43:26 +00:00
{
struct iovec iov;
struct msghdr msgh;
2019-06-25 11:12:58 +00:00
char control[CMSG_SPACE(max_fds * sizeof(int))];
2017-04-21 10:43:26 +00:00
struct cmsghdr *cmsg;
2019-06-25 11:12:58 +00:00
int got_fds = 0;
2017-04-21 10:43:26 +00:00
int ret;
2019-06-25 11:12:58 +00:00
*fd_num = 0;
2017-04-21 10:43:26 +00:00
memset(&msgh, 0, sizeof(msgh));
iov.iov_base = buf;
iov.iov_len = buflen;
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
msgh.msg_control = control;
msgh.msg_controllen = sizeof(control);
ret = recvmsg(sockfd, &msgh, 0);
if (ret <= 0) {
2020-06-18 16:55:50 +00:00
if (ret)
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(ifname, ERR, "recvmsg failed on fd %d (%s)\n",
sockfd, strerror(errno));
2017-04-21 10:43:26 +00:00
return ret;
}
2023-09-11 06:58:14 +00:00
if (msgh.msg_flags & MSG_TRUNC)
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(ifname, ERR, "truncated msg (fd %d)\n", sockfd);
2023-09-11 06:58:14 +00:00
/* MSG_CTRUNC may be caused by LSM misconfiguration */
if (msgh.msg_flags & MSG_CTRUNC)
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(ifname, ERR, "truncated control data (fd %d)\n", sockfd);
2017-04-21 10:43:26 +00:00
for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
if ((cmsg->cmsg_level == SOL_SOCKET) &&
(cmsg->cmsg_type == SCM_RIGHTS)) {
2019-06-25 11:12:58 +00:00
got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
*fd_num = got_fds;
memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int));
2017-04-21 10:43:26 +00:00
break;
}
}
2019-06-25 11:12:58 +00:00
/* Clear out unused file descriptors */
while (got_fds < max_fds)
fds[got_fds++] = -1;
2017-04-21 10:43:26 +00:00
return ret;
}
int
2023-09-13 12:21:49 +00:00
send_fd_message(char *ifname, int sockfd, char *buf, int buflen, int *fds, int fd_num)
2017-04-21 10:43:26 +00:00
{
struct iovec iov;
struct msghdr msgh;
size_t fdsize = fd_num * sizeof(int);
char control[CMSG_SPACE(fdsize)];
struct cmsghdr *cmsg;
int ret;
memset(&msgh, 0, sizeof(msgh));
iov.iov_base = buf;
iov.iov_len = buflen;
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
if (fds && fd_num > 0) {
msgh.msg_control = control;
msgh.msg_controllen = sizeof(control);
cmsg = CMSG_FIRSTHDR(&msgh);
2018-11-21 08:34:11 +00:00
if (cmsg == NULL) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(ifname, ERR, "cmsg == NULL\n");
2018-11-21 08:34:11 +00:00
errno = EINVAL;
return -1;
}
2017-04-21 10:43:26 +00:00
cmsg->cmsg_len = CMSG_LEN(fdsize);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
memcpy(CMSG_DATA(cmsg), fds, fdsize);
} else {
msgh.msg_control = NULL;
msgh.msg_controllen = 0;
}
do {
2019-06-25 11:12:58 +00:00
ret = sendmsg(sockfd, &msgh, MSG_NOSIGNAL);
2017-04-21 10:43:26 +00:00
} while (ret < 0 && errno == EINTR);
if (ret < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(ifname, ERR, "sendmsg error on fd %d (%s)\n",
sockfd, strerror(errno));
2017-04-21 10:43:26 +00:00
return ret;
}
return ret;
}
static void
vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
{
int vid;
size_t size;
struct vhost_user_connection *conn;
int ret;
2021-02-05 08:48:47 +00:00
struct virtio_net *dev;
2017-04-21 10:43:26 +00:00
2019-06-25 11:12:58 +00:00
if (vsocket == NULL)
return;
2017-04-21 10:43:26 +00:00
conn = malloc(sizeof(*conn));
if (conn == NULL) {
close(fd);
return;
}
2025-01-10 11:50:43 +00:00
vid = vhost_user_new_device();
2017-04-21 10:43:26 +00:00
if (vid == -1) {
goto err;
2017-04-21 10:43:26 +00:00
}
size = strnlen(vsocket->path, PATH_MAX);
vhost_set_ifname(vid, vsocket->path, size);
2022-09-02 04:40:05 +00:00
vhost_setup_virtio_net(vid, vsocket->use_builtin_virtio_net,
2023-09-13 12:21:49 +00:00
vsocket->net_compliant_ol_flags, vsocket->stats_enabled,
vsocket->iommu_support);
2019-06-25 11:12:58 +00:00
2021-02-05 08:48:47 +00:00
vhost_attach_vdpa_device(vid, vsocket->vdpa_dev);
2020-06-18 16:55:50 +00:00
if (vsocket->extbuf)
vhost_enable_extbuf(vid);
if (vsocket->linearbuf)
vhost_enable_linearbuf(vid);
2021-02-05 08:48:47 +00:00
if (vsocket->async_copy) {
dev = get_device(vid);
if (dev)
dev->async_copy = 1;
}
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(vsocket->path, INFO, "new device, handle is %d\n", vid);
2017-04-21 10:43:26 +00:00
if (vsocket->notify_ops->new_connection) {
ret = vsocket->notify_ops->new_connection(vid);
if (ret < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(vsocket->path, ERR,
"failed to add vhost user connection with fd %d\n",
fd);
2019-06-26 10:17:41 +00:00
goto err_cleanup;
}
}
conn->connfd = fd;
2017-04-21 10:43:26 +00:00
conn->vsocket = vsocket;
conn->vid = vid;
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
2017-04-21 10:43:26 +00:00
NULL, conn);
if (ret < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(vsocket->path, ERR,
2017-04-21 10:43:26 +00:00
"failed to add fd %d into vhost server fdset\n",
fd);
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
2019-06-26 10:17:41 +00:00
goto err_cleanup;
2017-04-21 10:43:26 +00:00
}
pthread_mutex_lock(&vsocket->conn_mutex);
TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
pthread_mutex_unlock(&vsocket->conn_mutex);
2019-06-25 11:12:58 +00:00
fdset_pipe_notify(&vhost_user.fdset);
return;
2019-06-26 10:17:41 +00:00
err_cleanup:
vhost_destroy_device(vid);
err:
free(conn);
close(fd);
2017-04-21 10:43:26 +00:00
}
/* call back when there is new vhost-user connection from client */
static void
vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
{
struct vhost_user_socket *vsocket = dat;
fd = accept(fd, NULL, NULL);
if (fd < 0)
return;
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(vsocket->path, INFO, "new vhost user connection is %d\n", fd);
2017-04-21 10:43:26 +00:00
vhost_user_add_connection(fd, vsocket);
}
static void
vhost_user_read_cb(int connfd, void *dat, int *remove)
2017-04-21 10:43:26 +00:00
{
struct vhost_user_connection *conn = dat;
struct vhost_user_socket *vsocket = conn->vsocket;
2017-04-21 10:43:26 +00:00
int ret;
ret = vhost_user_msg_handler(conn->vid, connfd);
if (ret < 0) {
2019-06-26 10:17:41 +00:00
struct virtio_net *dev = get_device(conn->vid);
2017-04-21 10:43:26 +00:00
close(connfd);
*remove = 1;
2019-06-26 10:17:41 +00:00
if (dev)
vhost_destroy_device_notify(dev);
2017-04-21 10:43:26 +00:00
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
2017-04-21 10:43:26 +00:00
2019-06-26 10:17:41 +00:00
vhost_destroy_device(conn->vid);
2020-06-18 16:55:50 +00:00
if (vsocket->reconnect) {
create_unix_socket(vsocket);
vhost_user_start_client(vsocket);
}
pthread_mutex_lock(&vsocket->conn_mutex);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
pthread_mutex_unlock(&vsocket->conn_mutex);
2017-04-21 10:43:26 +00:00
free(conn);
2017-04-21 10:43:26 +00:00
}
}
static int
create_unix_socket(struct vhost_user_socket *vsocket)
2017-04-21 10:43:26 +00:00
{
int fd;
struct sockaddr_un *un = &vsocket->un;
2017-04-21 10:43:26 +00:00
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0)
return -1;
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(vsocket->path, INFO, "vhost-user %s: socket created, fd: %d\n",
vsocket->is_server ? "server" : "client", fd);
2017-04-21 10:43:26 +00:00
if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(vsocket->path, ERR,
"vhost-user: can't set nonblocking mode for socket, fd: %d (%s)\n",
fd, strerror(errno));
2017-04-21 10:43:26 +00:00
close(fd);
return -1;
}
memset(un, 0, sizeof(*un));
un->sun_family = AF_UNIX;
strncpy(un->sun_path, vsocket->path, sizeof(un->sun_path));
2017-04-21 10:43:26 +00:00
un->sun_path[sizeof(un->sun_path) - 1] = '\0';
vsocket->socket_fd = fd;
return 0;
2017-04-21 10:43:26 +00:00
}
static int
vhost_user_start_server(struct vhost_user_socket *vsocket)
2017-04-21 10:43:26 +00:00
{
int ret;
int fd = vsocket->socket_fd;
2017-04-21 10:43:26 +00:00
const char *path = vsocket->path;
2019-06-25 11:12:58 +00:00
/*
* bind () may fail if the socket file with the same name already
* exists. But the library obviously should not delete the file
* provided by the user, since we can not be sure that it is not
* being used by other applications. Moreover, many applications form
* socket names based on user input, which is prone to errors.
*
* The user must ensure that the socket does not exist before
* registering the vhost driver in server mode.
*/
ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
2017-04-21 10:43:26 +00:00
if (ret < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to bind: %s; remove it and try again\n",
strerror(errno));
2017-04-21 10:43:26 +00:00
goto err;
}
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, INFO, "binding succeeded\n");
2017-04-21 10:43:26 +00:00
ret = listen(fd, MAX_VIRTIO_BACKLOG);
if (ret < 0)
goto err;
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
NULL, vsocket);
if (ret < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to add listen fd %d to vhost server fdset\n",
2017-04-21 10:43:26 +00:00
fd);
goto err;
}
return 0;
err:
close(fd);
return -1;
}
struct vhost_user_reconnect {
struct sockaddr_un un;
int fd;
struct vhost_user_socket *vsocket;
TAILQ_ENTRY(vhost_user_reconnect) next;
};
TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
struct vhost_user_reconnect_list {
struct vhost_user_reconnect_tailq_list head;
pthread_mutex_t mutex;
};
static struct vhost_user_reconnect_list reconn_list;
2025-01-10 11:50:43 +00:00
static rte_thread_t reconn_tid;
2017-04-21 10:43:26 +00:00
static int
2023-09-13 12:21:49 +00:00
vhost_user_connect_nonblock(char *path, int fd, struct sockaddr *un, size_t sz)
2017-04-21 10:43:26 +00:00
{
int ret, flags;
ret = connect(fd, un, sz);
if (ret < 0 && errno != EISCONN)
return -1;
flags = fcntl(fd, F_GETFL, 0);
if (flags < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "can't get flags for connfd %d (%s)\n",
fd, strerror(errno));
2017-04-21 10:43:26 +00:00
return -2;
}
if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "can't disable nonblocking on fd %d\n", fd);
2017-04-21 10:43:26 +00:00
return -2;
}
return 0;
}
2025-01-10 11:50:43 +00:00
static uint32_t
2017-04-21 10:43:26 +00:00
vhost_user_client_reconnect(void *arg __rte_unused)
{
int ret;
struct vhost_user_reconnect *reconn, *next;
while (1) {
pthread_mutex_lock(&reconn_list.mutex);
/*
* An equal implementation of TAILQ_FOREACH_SAFE,
* which does not exist on all platforms.
*/
for (reconn = TAILQ_FIRST(&reconn_list.head);
reconn != NULL; reconn = next) {
next = TAILQ_NEXT(reconn, next);
2023-09-13 12:21:49 +00:00
ret = vhost_user_connect_nonblock(reconn->vsocket->path, reconn->fd,
2017-04-21 10:43:26 +00:00
(struct sockaddr *)&reconn->un,
sizeof(reconn->un));
if (ret == -2) {
close(reconn->fd);
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(reconn->vsocket->path, ERR,
2017-04-21 10:43:26 +00:00
"reconnection for fd %d failed\n",
reconn->fd);
goto remove_fd;
}
if (ret == -1)
continue;
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(reconn->vsocket->path, INFO, "connected\n");
2017-04-21 10:43:26 +00:00
vhost_user_add_connection(reconn->fd, reconn->vsocket);
remove_fd:
TAILQ_REMOVE(&reconn_list.head, reconn, next);
free(reconn);
}
pthread_mutex_unlock(&reconn_list.mutex);
sleep(1);
}
2025-01-10 11:50:43 +00:00
return 0;
2017-04-21 10:43:26 +00:00
}
static int
vhost_user_reconnect_init(void)
{
int ret;
ret = pthread_mutex_init(&reconn_list.mutex, NULL);
if (ret < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG("thread", ERR, "%s: failed to initialize mutex\n", __func__);
return ret;
}
2017-04-21 10:43:26 +00:00
TAILQ_INIT(&reconn_list.head);
2025-01-10 11:50:43 +00:00
ret = rte_thread_create_internal_control(&reconn_tid, "vhost-reco",
vhost_user_client_reconnect, NULL);
if (ret != 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG("thread", ERR, "failed to create reconnect thread\n");
if (pthread_mutex_destroy(&reconn_list.mutex))
VHOST_LOG_CONFIG("thread", ERR,
"%s: failed to destroy reconnect mutex\n",
__func__);
}
2017-04-21 10:43:26 +00:00
return ret;
}
static int
vhost_user_start_client(struct vhost_user_socket *vsocket)
2017-04-21 10:43:26 +00:00
{
int ret;
int fd = vsocket->socket_fd;
2017-04-21 10:43:26 +00:00
const char *path = vsocket->path;
struct vhost_user_reconnect *reconn;
2023-09-13 12:21:49 +00:00
ret = vhost_user_connect_nonblock(vsocket->path, fd, (struct sockaddr *)&vsocket->un,
sizeof(vsocket->un));
2017-04-21 10:43:26 +00:00
if (ret == 0) {
vhost_user_add_connection(fd, vsocket);
return 0;
}
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, WARNING, "failed to connect: %s\n", strerror(errno));
2017-04-21 10:43:26 +00:00
if (ret == -2 || !vsocket->reconnect) {
close(fd);
return -1;
}
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, INFO, "reconnecting...\n");
2017-04-21 10:43:26 +00:00
reconn = malloc(sizeof(*reconn));
if (reconn == NULL) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to allocate memory for reconnect\n");
2017-04-21 10:43:26 +00:00
close(fd);
return -1;
}
reconn->un = vsocket->un;
2017-04-21 10:43:26 +00:00
reconn->fd = fd;
reconn->vsocket = vsocket;
pthread_mutex_lock(&reconn_list.mutex);
TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
pthread_mutex_unlock(&reconn_list.mutex);
return 0;
}
static struct vhost_user_socket *
find_vhost_user_socket(const char *path)
{
int i;
2019-06-26 10:17:41 +00:00
if (path == NULL)
return NULL;
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
if (!strcmp(vsocket->path, path))
return vsocket;
}
return NULL;
}
2019-06-25 11:12:58 +00:00
int
2021-02-05 08:48:47 +00:00
rte_vhost_driver_attach_vdpa_device(const char *path,
struct rte_vdpa_device *dev)
2019-06-25 11:12:58 +00:00
{
struct vhost_user_socket *vsocket;
2021-02-05 08:48:47 +00:00
if (dev == NULL || path == NULL)
2019-06-25 11:12:58 +00:00
return -1;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
2021-02-05 08:48:47 +00:00
vsocket->vdpa_dev = dev;
2019-06-25 11:12:58 +00:00
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
int
rte_vhost_driver_detach_vdpa_device(const char *path)
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
2021-02-05 08:48:47 +00:00
vsocket->vdpa_dev = NULL;
2019-06-25 11:12:58 +00:00
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
2021-02-05 08:48:47 +00:00
struct rte_vdpa_device *
rte_vhost_driver_get_vdpa_device(const char *path)
2019-06-25 11:12:58 +00:00
{
struct vhost_user_socket *vsocket;
2021-02-05 08:48:47 +00:00
struct rte_vdpa_device *dev = NULL;
2019-06-25 11:12:58 +00:00
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
2021-02-05 08:48:47 +00:00
dev = vsocket->vdpa_dev;
2019-06-25 11:12:58 +00:00
pthread_mutex_unlock(&vhost_user.mutex);
2021-02-05 08:48:47 +00:00
return dev;
2019-06-25 11:12:58 +00:00
}
2023-09-13 12:21:49 +00:00
int
rte_vhost_driver_get_vdpa_dev_type(const char *path, uint32_t *type)
{
struct vhost_user_socket *vsocket;
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
ret = -1;
goto unlock_exit;
}
vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev) {
ret = -1;
goto unlock_exit;
}
*type = vdpa_dev->type;
unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
}
int
rte_vhost_driver_disable_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
2019-06-25 11:12:58 +00:00
/* Note that use_builtin_virtio_net is not affected by this function
* since callers may want to selectively disable features of the
* built-in vhost net device backend.
*/
if (vsocket)
vsocket->features &= ~features;
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
int
rte_vhost_driver_enable_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket) {
if ((vsocket->supported_features & features) != features) {
/*
* trying to enable features the driver doesn't
* support.
*/
pthread_mutex_unlock(&vhost_user.mutex);
return -1;
}
vsocket->features |= features;
}
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
int
rte_vhost_driver_set_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket) {
vsocket->supported_features = features;
vsocket->features = features;
2019-06-25 11:12:58 +00:00
/* Anyone setting feature bits is implementing their own vhost
* device backend.
*/
vsocket->use_builtin_virtio_net = false;
}
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
int
rte_vhost_driver_get_features(const char *path, uint64_t *features)
{
struct vhost_user_socket *vsocket;
2019-06-25 11:12:58 +00:00
uint64_t vdpa_features;
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
2019-06-25 11:12:58 +00:00
if (!vsocket) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto unlock_exit;
}
2021-02-05 08:48:47 +00:00
vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev) {
*features = vsocket->features;
2019-06-25 11:12:58 +00:00
goto unlock_exit;
}
2021-02-05 08:48:47 +00:00
if (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa features for socket file.\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto unlock_exit;
}
*features = vsocket->features & vdpa_features;
unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
}
2020-06-18 16:55:50 +00:00
int
rte_vhost_driver_set_protocol_features(const char *path,
uint64_t protocol_features)
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
vsocket->protocol_features = protocol_features;
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
2019-06-25 11:12:58 +00:00
int
rte_vhost_driver_get_protocol_features(const char *path,
uint64_t *protocol_features)
{
struct vhost_user_socket *vsocket;
uint64_t vdpa_protocol_features;
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto unlock_exit;
}
2021-02-05 08:48:47 +00:00
vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev) {
2019-06-25 11:12:58 +00:00
*protocol_features = vsocket->protocol_features;
goto unlock_exit;
}
2021-02-05 08:48:47 +00:00
if (vdpa_dev->ops->get_protocol_features(vdpa_dev,
2019-06-25 11:12:58 +00:00
&vdpa_protocol_features) < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa protocol features.\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto unlock_exit;
}
*protocol_features = vsocket->protocol_features
& vdpa_protocol_features;
unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
2019-06-25 11:12:58 +00:00
return ret;
}
2019-06-25 11:12:58 +00:00
int
rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
{
struct vhost_user_socket *vsocket;
uint32_t vdpa_queue_num;
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto unlock_exit;
}
2021-02-05 08:48:47 +00:00
vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev) {
2025-01-10 11:50:43 +00:00
*queue_num = vsocket->max_queue_pairs;
2019-06-25 11:12:58 +00:00
goto unlock_exit;
}
2021-02-05 08:48:47 +00:00
if (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa queue number.\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto unlock_exit;
}
2025-01-10 11:50:43 +00:00
*queue_num = RTE_MIN(vsocket->max_queue_pairs, vdpa_queue_num);
2019-06-25 11:12:58 +00:00
unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
}
2025-01-10 11:50:43 +00:00
int
rte_vhost_driver_set_max_queue_num(const char *path, uint32_t max_queue_pairs)
2019-06-25 11:12:58 +00:00
{
2025-01-10 11:50:43 +00:00
struct vhost_user_socket *vsocket;
int ret = 0;
VHOST_LOG_CONFIG(path, INFO, "Setting max queue pairs to %u\n", max_queue_pairs);
if (max_queue_pairs > VHOST_MAX_QUEUE_PAIRS) {
VHOST_LOG_CONFIG(path, ERR, "Library only supports up to %u queue pairs\n",
VHOST_MAX_QUEUE_PAIRS);
return -1;
2019-06-25 11:12:58 +00:00
}
2025-01-10 11:50:43 +00:00
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
ret = -1;
goto unlock_exit;
}
2025-01-10 11:50:43 +00:00
/*
* This is only useful for VDUSE for which number of virtqueues is set
* by the backend. For Vhost-user, the number of virtqueues is defined
* by the frontend.
*/
if (!vsocket->is_vduse) {
VHOST_LOG_CONFIG(path, DEBUG, "Keeping %u max queue pairs for Vhost-user backend\n",
VHOST_MAX_QUEUE_PAIRS);
goto unlock_exit;
}
vsocket->max_queue_pairs = max_queue_pairs;
unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
}
static void
vhost_user_socket_mem_free(struct vhost_user_socket *vsocket)
{
if (vsocket == NULL)
return;
free(vsocket->path);
free(vsocket);
}
2017-04-21 10:43:26 +00:00
/*
* Register a new vhost-user socket; here we could act as server
* (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
* is set.
*/
int
rte_vhost_driver_register(const char *path, uint64_t flags)
{
int ret = -1;
struct vhost_user_socket *vsocket;
if (!path)
return -1;
pthread_mutex_lock(&vhost_user.mutex);
if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "the number of vhost sockets reaches maximum\n");
2017-04-21 10:43:26 +00:00
goto out;
}
vsocket = malloc(sizeof(struct vhost_user_socket));
if (!vsocket)
goto out;
memset(vsocket, 0, sizeof(struct vhost_user_socket));
vsocket->path = strdup(path);
if (vsocket->path == NULL) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to copy socket path string\n");
2019-06-25 11:12:58 +00:00
vhost_user_socket_mem_free(vsocket);
goto out;
}
TAILQ_INIT(&vsocket->conn_list);
ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
if (ret) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to init connection mutex\n");
goto out_free;
}
2025-01-10 11:50:43 +00:00
if (!strncmp("/dev/vduse/", path, strlen("/dev/vduse/")))
vsocket->is_vduse = true;
2021-02-05 08:48:47 +00:00
vsocket->vdpa_dev = NULL;
2025-01-10 11:50:43 +00:00
vsocket->max_queue_pairs = VHOST_MAX_QUEUE_PAIRS;
2020-06-18 16:55:50 +00:00
vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT;
vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT;
2021-02-05 08:48:47 +00:00
vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY;
2022-09-02 04:40:05 +00:00
vsocket->net_compliant_ol_flags = flags & RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
2023-09-13 12:21:49 +00:00
vsocket->stats_enabled = flags & RTE_VHOST_USER_NET_STATS_ENABLE;
2025-01-10 11:50:43 +00:00
if (vsocket->is_vduse)
vsocket->iommu_support = true;
else
vsocket->iommu_support = flags & RTE_VHOST_USER_IOMMU_SUPPORT;
2021-02-05 08:48:47 +00:00
2025-01-10 11:50:43 +00:00
if (vsocket->async_copy && (vsocket->iommu_support ||
(flags & RTE_VHOST_USER_POSTCOPY_SUPPORT))) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "async copy with IOMMU or post-copy not supported\n");
2020-06-18 16:55:50 +00:00
goto out_mutex;
}
/*
* Set the supported features correctly for the builtin vhost-user
* net driver.
*
* Applications know nothing about features the builtin virtio net
* driver (virtio_net.c) supports, thus it's not possible for them
* to invoke rte_vhost_driver_set_features(). To workaround it, here
* we set it unconditionally. If the application want to implement
* another vhost-user driver (say SCSI), it should call the
* rte_vhost_driver_set_features(), which will overwrite following
* two values.
*/
2019-06-25 11:12:58 +00:00
vsocket->use_builtin_virtio_net = true;
2025-01-10 11:50:43 +00:00
if (vsocket->is_vduse) {
vsocket->supported_features = VDUSE_NET_SUPPORTED_FEATURES;
vsocket->features = VDUSE_NET_SUPPORTED_FEATURES;
} else {
vsocket->supported_features = VHOST_USER_NET_SUPPORTED_FEATURES;
vsocket->features = VHOST_USER_NET_SUPPORTED_FEATURES;
vsocket->protocol_features = VHOST_USER_PROTOCOL_FEATURES;
}
2019-06-25 11:12:58 +00:00
2021-02-05 08:48:47 +00:00
if (vsocket->async_copy) {
vsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL);
vsocket->features &= ~(1ULL << VHOST_F_LOG_ALL);
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, INFO, "logging feature is disabled in async copy mode\n");
2019-06-25 11:12:58 +00:00
}
2020-06-18 16:55:50 +00:00
/*
* We'll not be able to receive a buffer from guest in linear mode
* without external buffer if it will not fit in a single mbuf, which is
* likely if segmentation offloading enabled.
*/
if (vsocket->linearbuf && !vsocket->extbuf) {
uint64_t seg_offload_features =
(1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6) |
(1ULL << VIRTIO_NET_F_HOST_UFO);
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, INFO, "Linear buffers requested without external buffers,\n");
VHOST_LOG_CONFIG(path, INFO, "disabling host segmentation offloading support\n");
2020-06-18 16:55:50 +00:00
vsocket->supported_features &= ~seg_offload_features;
vsocket->features &= ~seg_offload_features;
}
2025-01-10 11:50:43 +00:00
if (!vsocket->iommu_support) {
vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
}
2017-04-21 10:43:26 +00:00
2019-06-25 11:12:58 +00:00
if (!(flags & RTE_VHOST_USER_POSTCOPY_SUPPORT)) {
vsocket->protocol_features &=
~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT);
} else {
#ifndef RTE_LIBRTE_VHOST_POSTCOPY
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "Postcopy requested but not compiled\n");
2019-06-25 11:12:58 +00:00
ret = -1;
goto out_mutex;
#endif
}
2025-01-10 11:50:43 +00:00
if (!vsocket->is_vduse) {
if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid.opaque_id == 0) {
if (vhost_user_reconnect_init() != 0)
goto out_mutex;
}
} else {
vsocket->is_server = true;
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
ret = create_unix_socket(vsocket);
if (ret < 0)
goto out_mutex;
2017-04-21 10:43:26 +00:00
}
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
out_mutex:
if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to destroy connection mutex\n");
}
out_free:
2019-06-25 11:12:58 +00:00
vhost_user_socket_mem_free(vsocket);
2017-04-21 10:43:26 +00:00
out:
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
}
static bool
vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
{
int found = false;
struct vhost_user_reconnect *reconn, *next;
pthread_mutex_lock(&reconn_list.mutex);
for (reconn = TAILQ_FIRST(&reconn_list.head);
reconn != NULL; reconn = next) {
next = TAILQ_NEXT(reconn, next);
if (reconn->vsocket == vsocket) {
TAILQ_REMOVE(&reconn_list.head, reconn, next);
close(reconn->fd);
free(reconn);
found = true;
break;
}
}
pthread_mutex_unlock(&reconn_list.mutex);
return found;
}
/**
* Unregister the specified vhost socket
*/
int
rte_vhost_driver_unregister(const char *path)
{
int i;
int count;
struct vhost_user_connection *conn, *next;
2017-04-21 10:43:26 +00:00
2019-06-26 10:17:41 +00:00
if (path == NULL)
return -1;
again:
2017-04-21 10:43:26 +00:00
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
2022-09-02 04:40:05 +00:00
if (strcmp(vsocket->path, path))
continue;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (vsocket->is_vduse) {
vduse_device_destroy(path);
} else if (vsocket->is_server) {
2022-09-02 04:40:05 +00:00
/*
* If r/wcb is executing, release vhost_user's
* mutex lock, and try again since the r/wcb
* may use the mutex lock.
*/
if (fdset_try_del(&vhost_user.fdset, vsocket->socket_fd) == -1) {
pthread_mutex_unlock(&vhost_user.mutex);
goto again;
2019-06-25 11:12:58 +00:00
}
2022-09-02 04:40:05 +00:00
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
2019-06-25 11:12:58 +00:00
2022-09-02 04:40:05 +00:00
pthread_mutex_lock(&vsocket->conn_mutex);
for (conn = TAILQ_FIRST(&vsocket->conn_list);
conn != NULL;
conn = next) {
next = TAILQ_NEXT(conn, next);
2017-04-21 10:43:26 +00:00
2022-09-02 04:40:05 +00:00
/*
* If r/wcb is executing, release vsocket's
* conn_mutex and vhost_user's mutex locks, and
* try again since the r/wcb may use the
* conn_mutex and mutex locks.
*/
if (fdset_try_del(&vhost_user.fdset,
conn->connfd) == -1) {
pthread_mutex_unlock(&vsocket->conn_mutex);
pthread_mutex_unlock(&vhost_user.mutex);
goto again;
}
2017-04-21 10:43:26 +00:00
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, INFO, "free connfd %d\n", conn->connfd);
2022-09-02 04:40:05 +00:00
close(conn->connfd);
vhost_destroy_device(conn->vid);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
free(conn);
2017-04-21 10:43:26 +00:00
}
2022-09-02 04:40:05 +00:00
pthread_mutex_unlock(&vsocket->conn_mutex);
if (vsocket->is_server) {
close(vsocket->socket_fd);
unlink(path);
}
pthread_mutex_destroy(&vsocket->conn_mutex);
vhost_user_socket_mem_free(vsocket);
count = --vhost_user.vsocket_cnt;
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
return 0;
2017-04-21 10:43:26 +00:00
}
pthread_mutex_unlock(&vhost_user.mutex);
return -1;
}
/*
* Register ops so that we can add/remove device to data core.
*/
2017-04-21 10:43:26 +00:00
int
rte_vhost_driver_callback_register(const char *path,
2022-09-06 04:00:10 +00:00
struct rte_vhost_device_ops const * const ops)
2017-04-21 10:43:26 +00:00
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
vsocket->notify_ops = ops;
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
2022-09-06 04:00:10 +00:00
struct rte_vhost_device_ops const *
vhost_driver_callback_get(const char *path)
{
struct vhost_user_socket *vsocket;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? vsocket->notify_ops : NULL;
}
int
rte_vhost_driver_start(const char *path)
{
struct vhost_user_socket *vsocket;
2025-01-10 11:50:43 +00:00
static rte_thread_t fdset_tid;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
pthread_mutex_unlock(&vhost_user.mutex);
if (!vsocket)
return -1;
2025-01-10 11:50:43 +00:00
if (vsocket->is_vduse)
return vduse_device_create(path, vsocket->net_compliant_ol_flags);
if (fdset_tid.opaque_id == 0) {
2019-06-25 11:12:58 +00:00
/**
* create a pipe which will be waited by poll and notified to
* rebuild the wait list of poll.
*/
if (fdset_pipe_init(&vhost_user.fdset) < 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to create pipe for vhost fdset\n");
2019-06-25 11:12:58 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
int ret = rte_thread_create_internal_control(&fdset_tid,
"vhost-evt", fdset_event_dispatch, &vhost_user.fdset);
2019-06-25 11:12:58 +00:00
if (ret != 0) {
2023-09-13 12:21:49 +00:00
VHOST_LOG_CONFIG(path, ERR, "failed to create fdset handling thread\n");
2019-06-25 11:12:58 +00:00
fdset_pipe_uninit(&vhost_user.fdset);
return -1;
}
}
if (vsocket->is_server)
return vhost_user_start_server(vsocket);
else
return vhost_user_start_client(vsocket);
2017-04-21 10:43:26 +00:00
}