feat: start setup virtio

This commit is contained in:
2022-09-01 20:45:40 +02:00
parent e850dabc8b
commit f037adcebd
7 changed files with 196 additions and 3 deletions

21
include/endian.h Normal file
View File

@@ -0,0 +1,21 @@
//
// Created by rick on 14-5-22.
//
#ifndef NEW_KERNEL_ENDIAN_H
#define NEW_KERNEL_ENDIAN_H
#include <stdint.h>
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define leu16_to_native(x) x
#define leu32_to_native(x) x
#else
#define leu16_to_native(x) ((x) >> 8 | (x) << 8)
#define leu32_to_native(x) (((x)>>24)&0xff) | \
(((x)<<8)&0xff0000) | \
(((x)>>8)&0xff00) | \
(((x)<<24)&0xff000000)
#endif
#endif //NEW_KERNEL_ENDIAN_H

View File

@@ -14,7 +14,8 @@
#define PCI_CLASS_BRIDGE 0x06
// class MASS STORAGE 0x01
#define PCI_SUB_CLASS_IDE 0x01
#define PCI_SUB_CLASS_MASS_SCSI 0x00
#define PCI_SUB_CLASS_MASS_IDE 0x01
// class BRIDGE 0x06
#define PCI_SUB_CLASS_PCI_PCI_BRIDGE_4 0x04

View File

@@ -0,0 +1,18 @@
//
// Created by rick on 14-5-22.
//
#ifndef NEW_KERNEL_VIRTIO_H
#define NEW_KERNEL_VIRTIO_H
#define PCI_VENDOR_VIRTIO 0x1af4
#define PCI_DEVICE_VIRTIO_NETWORK 0x1000
#define PCI_DEVICE_VIRTIO_BLOCK 0x1001
#define PCI_DEVICE_VIRTIO_BALLOON 0x1002
#define PCI_DEVICE_VIRTIO_CONSOLE 0x1003
#define PCI_DEVICE_VIRTIO_SCSI 0x1004
#define PCI_DEVICE_VIRTIO_ENTROPY 0x1005
#define PCI_DEVICE_VIRTIO_9P 0x1009
#endif //NEW_KERNEL_VIRTIO_H

103
include/virtio_queue.h Normal file
View File

@@ -0,0 +1,103 @@
#ifndef VIRTQUEUE_H
#define VIRTQUEUE_H
/*
* Virtual I/O Device (VIRTIO) Version 1.2
* Committee Specification Draft 01
* 09 May 2022
* Copyright (c) OASIS Open 2022. All Rights Reserved.
* Source: http://docs.oasis-open.org/virtio/virtio/v1.2/csd01/listings/
* Latest stage of narrative specification: http://docs.oasis-open.org/virtio/virtio/v1.2/virtio-v1.2.html
* TC IPR Statement: https://github.com/oasis-tcs/virtio-admin/blob/master/IPR.md
*/
/*
* An interface for efficient virtio implementation.
*/
#include <stdint.h>
/* This marks a buffer as continuing via the next field. */
#define VIRTQ_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VIRTQ_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VIRTQ_DESC_F_INDIRECT 4
/* The device uses this in used->flags to advise the driver: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. */
#define VIRTQ_USED_F_NO_NOTIFY 1
/* The driver uses this in avail->flags to advise the device: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization. */
#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
/* Support for indirect descriptors */
#define VIRTIO_F_INDIRECT_DESC 28
/* Support for avail_event and used_event fields */
#define VIRTIO_F_EVENT_IDX 29
/* Arbitrary descriptor layouts. */
#define VIRTIO_F_ANY_LAYOUT 27
/* Virtqueue descriptors: 16 bytes.
* These can chain together via "next". */
struct virtq_desc {
/* Address (guest-physical). */
le64 addr;
/* Length. */
le32 len;
/* The flags as indicated above. */
le16 flags;
/* We chain unused descriptors via this, too */
le16 next;
};
struct virtq_avail {
le16 flags;
le16 idx;
le16 ring[];
/* Only if VIRTIO_F_EVENT_IDX: le16 used_event; */
};
/* le32 is used here for ids for padding reasons. */
struct virtq_used_elem {
/* Index of start of used descriptor chain. */
le32 id;
/* Total length of the descriptor chain which was written to. */
le32 len;
};
struct virtq_used {
le16 flags;
le16 idx;
struct virtq_used_elem ring[];
/* Only if VIRTIO_F_EVENT_IDX: le16 avail_event; */
};
struct virtq {
unsigned int num;
struct virtq_desc *desc;
struct virtq_avail *avail;
struct virtq_used *used;
};
static inline int virtq_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old_idx)
{
return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old_idx);
}
/* Get location of event indices (only with VIRTIO_F_EVENT_IDX) */
static inline le16 *virtq_used_event(struct virtq *vq)
{
/* For backwards compat, used event index is at *end* of avail ring. */
return &vq->avail->ring[vq->num];
}
static inline le16 *virtq_avail_event(struct virtq *vq)
{
/* For backwards compat, avail event index is at *end* of used ring. */
return (le16 *)&vq->used->ring[vq->num];
}
#endif /* VIRTQUEUE_H */